Changes in / [783c1b5:d24e41d] in sasview
- Files:
-
- 5 added
- 7 deleted
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
sasview/setup_exe.py
ra1b8fee ra1b8fee 228 228 # Copy the settings file for the sas.dataloader file extension associations 229 229 import sas.sascalc.dataloader.readers 230 f = os.path.join(sas.sascalc.dataloader.readers.get_data_path() )230 f = os.path.join(sas.sascalc.dataloader.readers.get_data_path(), 'defaults.json') 231 231 if os.path.isfile(f): 232 232 data_files.append(('.', [f])) … … 242 242 if os.path.isfile(f): 243 243 data_files.append(('.', [f])) 244 245 #f = 'default_categories.json' 246 #if os.path.isfile(f): 247 # data_files.append(('.', [f])) 244 248 245 249 # numerical libraries -
sasview/setup_mac.py
ra1b8fee ra1b8fee 51 51 52 52 #CANSAxml reader data files 53 RESOURCES_FILES.append(os.path.join(sas.sascalc.dataloader.readers.get_data_path() ))53 RESOURCES_FILES.append(os.path.join(sas.sascalc.dataloader.readers.get_data_path(),'defaults.json')) 54 54 55 55 DATA_FILES.append('logging.ini') -
setup.py
r7a5d066 r14bb7a4 221 221 package_dir["sas.sascalc.dataloader"] = os.path.join( 222 222 "src", "sas", "sascalc", "dataloader") 223 package_data["sas.sascalc.dataloader.readers"] = ['schema/*.xsd'] 223 package_data["sas.sascalc.dataloader.readers"] = [ 224 'defaults.json', 'schema/*.xsd'] 224 225 packages.extend(["sas.sascalc.dataloader", "sas.sascalc.dataloader.readers", 225 226 "sas.sascalc.dataloader.readers.schema"]) -
src/sas/sascalc/data_util/registry.py
r3ece5dd ra1b8fee 1 # This program is public domain 1 2 """ 2 3 File extension registry. … … 7 8 from __future__ import print_function 8 9 9 from sas.sascalc.dataloader.loader_exceptions import NoKnownLoaderException 10 10 import os.path 11 11 12 12 class ExtensionRegistry(object): … … 22 22 # Add an association by setting an element 23 23 registry['.zip'] = unzip 24 24 25 25 # Multiple extensions for one loader 26 26 registry['.tgz'] = untar 27 27 registry['.tar.gz'] = untar 28 28 29 # Generic extensions to use after trying more specific extensions; 29 # Generic extensions to use after trying more specific extensions; 30 30 # these will be checked after the more specific extensions fail. 31 31 registry['.gz'] = gunzip … … 38 38 # Show registered extensions 39 39 print registry.extensions() 40 40 41 41 # Can also register a format name for explicit control from caller 42 42 registry['cx3'] = cx3 … … 62 62 def __init__(self, **kw): 63 63 self.loaders = {} 64 65 64 def __setitem__(self, ext, loader): 66 65 if ext not in self.loaders: 67 66 self.loaders[ext] = [] 68 67 self.loaders[ext].insert(0,loader) 69 70 68 def __getitem__(self, ext): 71 69 return self.loaders[ext] 72 73 70 def __contains__(self, ext): 74 71 return ext in self.loaders 75 76 72 def formats(self): 77 73 """ … … 81 77 names.sort() 82 78 return names 83 84 79 def extensions(self): 85 80 """ … … 89 84 exts.sort() 90 85 return exts 91 92 86 def lookup(self, path): 93 87 """ 94 88 Return the loader associated with the file type of path. 95 96 :param path: Data file path 97 :raises ValueError: When no loaders are found for the file. 98 :return: List of available readers for the file extension 99 """ 89 90 Raises ValueError if file type is not known. 91 """ 100 92 # Find matching extensions 101 93 extlist = [ext for ext in self.extensions() if path.endswith(ext)] … … 114 106 # Raise an error if there are no matching extensions 115 107 if len(loaders) == 0: 116 raise ValueError("Unknown file type for "+path) 108 raise ValueError, "Unknown file type for "+path 109 # All done 117 110 return loaders 118 119 111 def load(self, path, format=None): 120 112 """ 121 113 Call the loader for the file type of path. 122 114 123 :raise ValueError:if no loader is available.124 :raise KeyError:if format is not available.125 May raise a loader-defined exception if loader fails. 115 Raises ValueError if no loader is available. 116 Raises KeyError if format is not available. 117 May raise a loader-defined exception if loader fails. 126 118 """ 127 loaders = []128 119 if format is None: 129 try: 130 loaders = self.lookup(path) 131 except ValueError as e: 132 pass 120 loaders = self.lookup(path) 133 121 else: 134 try: 135 loaders = self.loaders[format] 136 except KeyError as e: 137 pass 138 last_exc = None 122 loaders = self.loaders[format] 139 123 for fn in loaders: 140 124 try: 141 125 return fn(path) 142 except Exception as e: 143 last_exc = e 144 pass # give other loaders a chance to succeed 126 except: 127 pass # give other loaders a chance to succeed 145 128 # If we get here it is because all loaders failed 146 if last_exc is not None and len(loaders) != 0: 147 # If file has associated loader(s) and they;ve failed 148 raise last_exc 149 raise NoKnownLoaderException(e.message) # raise generic exception 129 raise # reraises last exception 130 131 def test(): 132 reg = ExtensionRegistry() 133 class CxError(Exception): pass 134 def cx(file): return 'cx' 135 def new_cx(file): return 'new_cx' 136 def fail_cx(file): raise CxError 137 def cat(file): return 'cat' 138 def gunzip(file): return 'gunzip' 139 reg['.cx'] = cx 140 reg['.cx1'] = cx 141 reg['.cx'] = new_cx 142 reg['.gz'] = gunzip 143 reg['.cx.gz'] = new_cx 144 reg['.cx1.gz'] = fail_cx 145 reg['.cx1'] = fail_cx 146 reg['.cx2'] = fail_cx 147 reg['new_cx'] = new_cx 148 149 # Two loaders associated with .cx 150 assert reg.lookup('hello.cx') == [new_cx,cx] 151 # Make sure the last loader applies first 152 assert reg.load('hello.cx') == 'new_cx' 153 # Make sure the next loader applies if the first fails 154 assert reg.load('hello.cx1') == 'cx' 155 # Make sure the format override works 156 assert reg.load('hello.cx1',format='.cx.gz') == 'new_cx' 157 # Make sure the format override works 158 assert reg.load('hello.cx1',format='new_cx') == 'new_cx' 159 # Make sure the case of all loaders failing is correct 160 try: reg.load('hello.cx2') 161 except CxError: pass # correct failure 162 else: raise AssertError,"Incorrect error on load failure" 163 # Make sure the case of no loaders fails correctly 164 try: reg.load('hello.missing') 165 except ValueError,msg: 166 assert str(msg)=="Unknown file type for hello.missing",'Message: <%s>'%(msg) 167 else: raise AssertError,"No error raised for missing extension" 168 assert reg.formats() == ['new_cx'] 169 assert reg.extensions() == ['.cx','.cx.gz','.cx1','.cx1.gz','.cx2','.gz'] 170 # make sure that it supports multiple '.' in filename 171 assert reg.load('hello.extra.cx1') == 'cx' 172 assert reg.load('hello.gz') == 'gunzip' 173 assert reg.load('hello.cx1.gz') == 'gunzip' # Since .cx1.gz fails 174 175 if __name__ == "__main__": test() -
src/sas/sascalc/dataloader/data_info.py
ra1b8fee ra1b8fee 1181 1181 return return_string 1182 1182 1183 if hasattr(data, "xmax"): 1184 final_dataset.xmax = data.xmax 1185 if hasattr(data, "ymax"): 1186 final_dataset.ymax = data.ymax 1187 if hasattr(data, "xmin"): 1188 final_dataset.xmin = data.xmin 1189 if hasattr(data, "ymin"): 1190 final_dataset.ymin = data.ymin 1183 final_dataset.xmax = data.xmax 1184 final_dataset.ymax = data.ymax 1185 final_dataset.xmin = data.xmin 1186 final_dataset.ymin = data.ymin 1191 1187 final_dataset.isSesans = datainfo.isSesans 1192 1188 final_dataset.title = datainfo.title -
src/sas/sascalc/dataloader/loader.py
rdcb91cf r463e7ffc 1 1 """ 2 2 File handler to support different file extensions. 3 Uses reflectomet erregistry utility.3 Uses reflectometry's registry utility. 4 4 5 5 The default readers are found in the 'readers' sub-module … … 14 14 """ 15 15 ##################################################################### 16 # 17 # 18 # 19 # 20 # 16 #This software was developed by the University of Tennessee as part of the 17 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 18 #project funded by the US National Science Foundation. 19 #See the license text in license.txt 20 #copyright 2008, University of Tennessee 21 21 ###################################################################### 22 22 … … 29 29 # Default readers are defined in the readers sub-module 30 30 import readers 31 from loader_exceptions import NoKnownLoaderException, FileContentsException,\32 DefaultReaderException33 31 from readers import ascii_reader 34 32 from readers import cansas_reader 35 from readers import cansas_reader_HDF536 33 37 34 logger = logging.getLogger(__name__) 38 39 35 40 36 class Registry(ExtensionRegistry): … … 43 39 Readers and writers are supported. 44 40 """ 41 45 42 def __init__(self): 46 43 super(Registry, self).__init__() 47 44 48 # Writers45 ## Writers 49 46 self.writers = {} 50 47 51 # List of wildcards48 ## List of wildcards 52 49 self.wildcards = ['All (*.*)|*.*'] 53 50 54 # Creation time, for testing51 ## Creation time, for testing 55 52 self._created = time.time() 56 53 … … 66 63 of a particular reader 67 64 68 Defaults to the ascii (multi-column), cansas XML, and cansas NeXuS 69 readers if no reader was registered for the file's extension. 70 """ 71 # Gets set to a string if the file has an associated reader that fails 72 msg_from_reader = None 65 Defaults to the ascii (multi-column) reader 66 if no reader was registered for the file's 67 extension. 68 """ 73 69 try: 74 70 return super(Registry, self).load(path, format=format) 75 except NoKnownLoaderException as nkl_e: 76 pass # Try the ASCII reader 77 except FileContentsException as fc_exc: 78 # File has an associated reader but it failed. 79 # Save the error message to display later, but try the 3 default loaders 80 msg_from_reader = fc_exc.message 81 except Exception: 82 pass 83 84 # File has no associated reader, or the associated reader failed. 85 # Try the ASCII reader 86 try: 87 ascii_loader = ascii_reader.Reader() 88 return ascii_loader.read(path) 89 except DefaultReaderException: 90 pass # Loader specific error to try the cansas XML reader 91 except FileContentsException as e: 92 if msg_from_reader is None: 93 raise RuntimeError(e.message) 94 95 # ASCII reader failed - try CanSAS xML reader 96 try: 97 cansas_loader = cansas_reader.Reader() 98 return cansas_loader.read(path) 99 except DefaultReaderException: 100 pass # Loader specific error to try the NXcanSAS reader 101 except FileContentsException as e: 102 if msg_from_reader is None: 103 raise RuntimeError(e.message) 104 except Exception: 105 pass 106 107 # CanSAS XML reader failed - try NXcanSAS reader 108 try: 109 cansas_nexus_loader = cansas_reader_HDF5.Reader() 110 return cansas_nexus_loader.read(path) 111 except DefaultReaderException as e: 112 logging.error("No default loader can load the data") 113 # No known reader available. Give up and throw an error 114 if msg_from_reader is None: 115 msg = "\nUnknown data format: {}.\nThe file is not a ".format(path) 116 msg += "known format that can be loaded by SasView.\n" 117 raise NoKnownLoaderException(msg) 118 else: 119 # Associated reader and default readers all failed. 120 # Show error message from associated reader 121 raise RuntimeError(msg_from_reader) 122 except FileContentsException as e: 123 err_msg = msg_from_reader if msg_from_reader is not None else e.message 124 raise RuntimeError(err_msg) 71 except: 72 try: 73 # No reader was found. Default to the ascii reader. 74 ascii_loader = ascii_reader.Reader() 75 return ascii_loader.read(path) 76 except: 77 cansas_loader = cansas_reader.Reader() 78 return cansas_loader.read(path) 125 79 126 80 def find_plugins(self, dir): -
src/sas/sascalc/dataloader/readers/__init__.py
r7a5d066 r959eb01 1 # Method to associate extensions to default readers 1 # Backward compatibility with the previous implementation of the default readers 2 from associations import register_readers 3 4 # Method to associate extensions to default readers 2 5 from associations import read_associations 3 4 6 5 7 # Method to return the location of the XML settings file -
src/sas/sascalc/dataloader/readers/abs_reader.py
rad92c5a r959eb01 1 1 """ 2 IGOR 1D data reader3 2 """ 4 3 ##################################################################### 5 # 6 # 7 # 8 # 9 # 4 #This software was developed by the University of Tennessee as part of the 5 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 6 #project funded by the US National Science Foundation. 7 #See the license text in license.txt 8 #copyright 2008, University of Tennessee 10 9 ###################################################################### 11 10 12 import logging13 11 import numpy as np 14 from sas.sascalc.dataloader.file_reader_base_class import FileReader 15 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D, Data1D,\ 16 Detector 17 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 18 DefaultReaderException 12 import os 13 from sas.sascalc.dataloader.data_info import Data1D 14 from sas.sascalc.dataloader.data_info import Detector 19 15 20 logger = logging.getLogger(__name__) 21 22 23 class Reader(FileReader): 16 has_converter = True 17 try: 18 from sas.sascalc.data_util.nxsunit import Converter 19 except: 20 has_converter = False 21 22 23 class Reader: 24 24 """ 25 25 Class to load IGOR reduced .ABS files 26 26 """ 27 # File type27 ## File type 28 28 type_name = "IGOR 1D" 29 # Wildcards29 ## Wildcards 30 30 type = ["IGOR 1D files (*.abs)|*.abs"] 31 # List of allowed extensions32 ext = ['.abs' ]31 ## List of allowed extensions 32 ext = ['.abs', '.ABS'] 33 33 34 def get_file_contents(self):34 def read(self, path): 35 35 """ 36 Get the contents of the file 36 Load data file. 37 38 :param path: file path 39 40 :return: Data1D object, or None 37 41 38 42 :raise RuntimeError: when the file can't be opened 39 43 :raise ValueError: when the length of the data vectors are inconsistent 40 44 """ 41 buff = self.f_open.read() 42 filepath = self.f_open.name 43 lines = buff.splitlines() 44 self.has_converter = True 45 try: 46 from sas.sascalc.data_util.nxsunit import Converter 47 except: 48 self.has_converter = False 49 self.output = [] 50 self.current_datainfo = DataInfo() 51 self.current_datainfo.filename = filepath 52 self.reset_data_list(len(lines)) 53 detector = Detector() 54 data_line = 0 55 self.reset_data_list(len(lines)) 56 self.current_datainfo.detector.append(detector) 57 self.current_datainfo.filename = filepath 45 if os.path.isfile(path): 46 basename = os.path.basename(path) 47 root, extension = os.path.splitext(basename) 48 if extension.lower() in self.ext: 49 try: 50 input_f = open(path,'r') 51 except: 52 raise RuntimeError, "abs_reader: cannot open %s" % path 53 buff = input_f.read() 54 lines = buff.split('\n') 55 x = np.zeros(0) 56 y = np.zeros(0) 57 dy = np.zeros(0) 58 dx = np.zeros(0) 59 output = Data1D(x, y, dy=dy, dx=dx) 60 detector = Detector() 61 output.detector.append(detector) 62 output.filename = basename 63 64 is_info = False 65 is_center = False 66 is_data_started = False 67 68 data_conv_q = None 69 data_conv_i = None 70 71 if has_converter == True and output.x_unit != '1/A': 72 data_conv_q = Converter('1/A') 73 # Test it 74 data_conv_q(1.0, output.x_unit) 75 76 if has_converter == True and output.y_unit != '1/cm': 77 data_conv_i = Converter('1/cm') 78 # Test it 79 data_conv_i(1.0, output.y_unit) 80 81 for line in lines: 82 83 # Information line 1 84 if is_info == True: 85 is_info = False 86 line_toks = line.split() 87 88 # Wavelength in Angstrom 89 try: 90 value = float(line_toks[1]) 91 if has_converter == True and \ 92 output.source.wavelength_unit != 'A': 93 conv = Converter('A') 94 output.source.wavelength = conv(value, 95 units=output.source.wavelength_unit) 96 else: 97 output.source.wavelength = value 98 except: 99 #goes to ASC reader 100 msg = "abs_reader: cannot open %s" % path 101 raise RuntimeError, msg 102 103 # Distance in meters 104 try: 105 value = float(line_toks[3]) 106 if has_converter == True and \ 107 detector.distance_unit != 'm': 108 conv = Converter('m') 109 detector.distance = conv(value, 110 units=detector.distance_unit) 111 else: 112 detector.distance = value 113 except: 114 #goes to ASC reader 115 msg = "abs_reader: cannot open %s" % path 116 raise RuntimeError, msg 117 # Transmission 118 try: 119 output.sample.transmission = float(line_toks[4]) 120 except: 121 # Transmission is not a mandatory entry 122 pass 123 124 # Thickness in mm 125 try: 126 value = float(line_toks[5]) 127 if has_converter == True and \ 128 output.sample.thickness_unit != 'cm': 129 conv = Converter('cm') 130 output.sample.thickness = conv(value, 131 units=output.sample.thickness_unit) 132 else: 133 output.sample.thickness = value 134 except: 135 # Thickness is not a mandatory entry 136 pass 137 138 #MON CNT LAMBDA DET ANG DET DIST TRANS THICK 139 # AVE STEP 140 if line.count("LAMBDA") > 0: 141 is_info = True 142 143 # Find center info line 144 if is_center == True: 145 is_center = False 146 line_toks = line.split() 147 # Center in bin number 148 center_x = float(line_toks[0]) 149 center_y = float(line_toks[1]) 150 151 # Bin size 152 if has_converter == True and \ 153 detector.pixel_size_unit != 'mm': 154 conv = Converter('mm') 155 detector.pixel_size.x = conv(5.0, 156 units=detector.pixel_size_unit) 157 detector.pixel_size.y = conv(5.0, 158 units=detector.pixel_size_unit) 159 else: 160 detector.pixel_size.x = 5.0 161 detector.pixel_size.y = 5.0 162 163 # Store beam center in distance units 164 # Det 640 x 640 mm 165 if has_converter == True and \ 166 detector.beam_center_unit != 'mm': 167 conv = Converter('mm') 168 detector.beam_center.x = conv(center_x * 5.0, 169 units=detector.beam_center_unit) 170 detector.beam_center.y = conv(center_y * 5.0, 171 units=detector.beam_center_unit) 172 else: 173 detector.beam_center.x = center_x * 5.0 174 detector.beam_center.y = center_y * 5.0 175 176 # Detector type 177 try: 178 detector.name = line_toks[7] 179 except: 180 # Detector name is not a mandatory entry 181 pass 182 183 #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L 184 # BSTOP(mm) DET_TYP 185 if line.count("BCENT") > 0: 186 is_center = True 187 188 # Parse the data 189 if is_data_started == True: 190 toks = line.split() 58 191 59 is_info = False 60 is_center = False 61 is_data_started = False 62 63 base_q_unit = '1/A' 64 base_i_unit = '1/cm' 65 data_conv_q = Converter(base_q_unit) 66 data_conv_i = Converter(base_i_unit) 67 68 for line in lines: 69 # Information line 1 70 if is_info: 71 is_info = False 72 line_toks = line.split() 73 74 # Wavelength in Angstrom 75 try: 76 value = float(line_toks[1]) 77 if self.has_converter and \ 78 self.current_datainfo.source.wavelength_unit != 'A': 79 conv = Converter('A') 80 self.current_datainfo.source.wavelength = conv(value, 81 units=self.current_datainfo.source.wavelength_unit) 82 else: 83 self.current_datainfo.source.wavelength = value 84 except KeyError: 85 msg = "ABSReader cannot read wavelength from %s" % filepath 86 self.current_datainfo.errors.append(msg) 87 88 # Detector distance in meters 89 try: 90 value = float(line_toks[3]) 91 if self.has_converter and detector.distance_unit != 'm': 92 conv = Converter('m') 93 detector.distance = conv(value, 94 units=detector.distance_unit) 95 else: 96 detector.distance = value 97 except: 98 msg = "ABSReader cannot read SDD from %s" % filepath 99 self.current_datainfo.errors.append(msg) 100 101 # Transmission 102 try: 103 self.current_datainfo.sample.transmission = \ 104 float(line_toks[4]) 105 except ValueError: 106 # Transmission isn't always in the header 107 pass 108 109 # Sample thickness in mm 110 try: 111 value = float(line_toks[5]) 112 if self.has_converter and \ 113 self.current_datainfo.sample.thickness_unit != 'cm': 114 conv = Converter('cm') 115 self.current_datainfo.sample.thickness = conv(value, 116 units=self.current_datainfo.sample.thickness_unit) 117 else: 118 self.current_datainfo.sample.thickness = value 119 except ValueError: 120 # Thickness is not a mandatory entry 121 pass 122 123 # MON CNT LAMBDA DET ANG DET DIST TRANS THICK AVE STEP 124 if line.count("LAMBDA") > 0: 125 is_info = True 126 127 # Find center info line 128 if is_center: 129 is_center = False 130 line_toks = line.split() 131 # Center in bin number 132 center_x = float(line_toks[0]) 133 center_y = float(line_toks[1]) 134 135 # Bin size 136 if self.has_converter and detector.pixel_size_unit != 'mm': 137 conv = Converter('mm') 138 detector.pixel_size.x = conv(5.08, 139 units=detector.pixel_size_unit) 140 detector.pixel_size.y = conv(5.08, 141 units=detector.pixel_size_unit) 192 try: 193 _x = float(toks[0]) 194 _y = float(toks[1]) 195 _dy = float(toks[2]) 196 _dx = float(toks[3]) 197 198 if data_conv_q is not None: 199 _x = data_conv_q(_x, units=output.x_unit) 200 _dx = data_conv_i(_dx, units=output.x_unit) 201 202 if data_conv_i is not None: 203 _y = data_conv_i(_y, units=output.y_unit) 204 _dy = data_conv_i(_dy, units=output.y_unit) 205 206 x = np.append(x, _x) 207 y = np.append(y, _y) 208 dy = np.append(dy, _dy) 209 dx = np.append(dx, _dx) 210 211 except: 212 # Could not read this data line. If we are here 213 # it is because we are in the data section. Just 214 # skip it. 215 pass 216 217 #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 218 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 219 if line.count("The 6 columns") > 0: 220 is_data_started = True 221 222 # Sanity check 223 if not len(y) == len(dy): 224 msg = "abs_reader: y and dy have different length" 225 raise ValueError, msg 226 # If the data length is zero, consider this as 227 # though we were not able to read the file. 228 if len(x) == 0: 229 raise ValueError, "ascii_reader: could not load file" 230 231 output.x = x[x != 0] 232 output.y = y[x != 0] 233 output.dy = dy[x != 0] 234 output.dx = dx[x != 0] 235 if data_conv_q is not None: 236 output.xaxis("\\rm{Q}", output.x_unit) 142 237 else: 143 detector.pixel_size.x = 5.08 144 detector.pixel_size.y = 5.08 145 146 # Store beam center in distance units 147 # Det 640 x 640 mm 148 if self.has_converter and detector.beam_center_unit != 'mm': 149 conv = Converter('mm') 150 detector.beam_center.x = conv(center_x * 5.08, 151 units=detector.beam_center_unit) 152 detector.beam_center.y = conv(center_y * 5.08, 153 units=detector.beam_center_unit) 238 output.xaxis("\\rm{Q}", 'A^{-1}') 239 if data_conv_i is not None: 240 output.yaxis("\\rm{Intensity}", output.y_unit) 154 241 else: 155 detector.beam_center.x = center_x * 5.08 156 detector.beam_center.y = center_y * 5.08 157 158 # Detector type 159 try: 160 detector.name = line_toks[7] 161 except: 162 # Detector name is not a mandatory entry 163 pass 164 165 # BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L BSTOP(mm) DET_TYP 166 if line.count("BCENT") > 0: 167 is_center = True 168 169 # Parse the data 170 if is_data_started: 171 toks = line.split() 172 173 try: 174 _x = float(toks[0]) 175 _y = float(toks[1]) 176 _dy = float(toks[2]) 177 _dx = float(toks[3]) 178 179 if data_conv_q is not None: 180 _x = data_conv_q(_x, units=base_q_unit) 181 _dx = data_conv_q(_dx, units=base_q_unit) 182 183 if data_conv_i is not None: 184 _y = data_conv_i(_y, units=base_i_unit) 185 _dy = data_conv_i(_dy, units=base_i_unit) 186 187 self.current_dataset.x[data_line] = _x 188 self.current_dataset.y[data_line] = _y 189 self.current_dataset.dy[data_line] = _dy 190 self.current_dataset.dx[data_line] = _dx 191 data_line += 1 192 193 except ValueError: 194 # Could not read this data line. If we are here 195 # it is because we are in the data section. Just 196 # skip it. 197 pass 198 199 # The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 200 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 201 if line.count("The 6 columns") > 0: 202 is_data_started = True 203 204 self.remove_empty_q_values(True, True) 205 206 # Sanity check 207 if not len(self.current_dataset.y) == len(self.current_dataset.dy): 208 self.set_all_to_none() 209 msg = "abs_reader: y and dy have different length" 210 raise ValueError(msg) 211 # If the data length is zero, consider this as 212 # though we were not able to read the file. 213 if len(self.current_dataset.x) == 0: 214 self.set_all_to_none() 215 raise ValueError("ascii_reader: could not load file") 216 217 if data_conv_q is not None: 218 self.current_dataset.xaxis("\\rm{Q}", base_q_unit) 242 output.yaxis("\\rm{Intensity}", "cm^{-1}") 243 244 # Store loading process information 245 output.meta_data['loader'] = self.type_name 246 return output 219 247 else: 220 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 221 if data_conv_i is not None: 222 self.current_dataset.yaxis("\\rm{Intensity}", base_i_unit) 223 else: 224 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 225 226 # Store loading process information 227 self.current_datainfo.meta_data['loader'] = self.type_name 228 self.send_to_output() 248 raise RuntimeError, "%s is not a file" % path 249 return None -
src/sas/sascalc/dataloader/readers/anton_paar_saxs_reader.py
rfafe52a ra235f715 9 9 10 10 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, DataInfo,Sample, Source11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, Sample, Source 12 12 from sas.sascalc.dataloader.data_info import Process, Aperture, Collimation, TransmissionSpectrum, Detector 13 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 13 14 14 15 15 class Reader(XMLreader): 16 16 """ 17 A class for reading in Anton Paar .pdh files 17 A class for reading in CanSAS v2.0 data files. The existing iteration opens Mantid generated HDF5 formatted files 18 with file extension .h5/.H5. Any number of data sets may be present within the file and any dimensionality of data 19 may be used. Currently 1D and 2D SAS data sets are supported, but future implementations will include 1D and 2D 20 SESANS data. This class assumes a single data set for each sasentry. 21 22 :Dependencies: 23 The CanSAS HDF5 reader requires h5py v2.5.0 or later. 18 24 """ 19 25 … … 24 30 ## Raw file contents to be processed 25 31 raw_data = None 32 ## Data set being modified 33 current_dataset = None 26 34 ## For recursion and saving purposes, remember parent objects 27 35 parent_list = None … … 34 42 ## Flag to bypass extension check 35 43 allow_all = False 44 ## List of files to return 45 output = None 36 46 37 47 def reset_state(self): 38 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), np.empty(0), np.empty(0))39 self.current_datainfo = DataInfo()48 self.current_dataset = Data1D(np.empty(0), np.empty(0), 49 np.empty(0), np.empty(0)) 40 50 self.datasets = [] 41 51 self.raw_data = None … … 53 63 self.lower = 5 54 64 55 def get_file_contents(self):65 def read(self, filename): 56 66 """ 57 67 This is the general read method that all SasView data_loaders must have. … … 63 73 ## Reinitialize the class when loading a new data file to reset all class variables 64 74 self.reset_state() 65 buff = self.f_open.read() 66 self.raw_data = buff.splitlines() 67 self.read_data() 75 ## Check that the file exists 76 if os.path.isfile(filename): 77 basename = os.path.basename(filename) 78 _, extension = os.path.splitext(basename) 79 # If the file type is not allowed, return empty list 80 if extension in self.ext or self.allow_all: 81 ## Load the data file 82 input_f = open(filename, 'r') 83 buff = input_f.read() 84 self.raw_data = buff.splitlines() 85 self.read_data() 86 return self.output 68 87 69 88 def read_data(self): 70 correctly_loaded = True71 error_message = ""72 73 89 q_unit = "1/nm" 74 90 i_unit = "1/um^2" 75 try: 76 self.current_datainfo.title = self.raw_data[0] 77 self.current_datainfo.meta_data["Keywords"] = self.raw_data[1] 78 line3 = self.raw_data[2].split() 79 line4 = self.raw_data[3].split() 80 line5 = self.raw_data[4].split() 81 self.data_points = int(line3[0]) 82 self.lower = 5 83 self.upper = self.lower + self.data_points 84 self.source.radiation = 'x-ray' 85 normal = float(line4[3]) 86 self.current_datainfo.source.radiation = "x-ray" 87 self.current_datainfo.source.name = "Anton Paar SAXSess Instrument" 88 self.current_datainfo.source.wavelength = float(line4[4]) 89 xvals = [] 90 yvals = [] 91 dyvals = [] 92 for i in range(self.lower, self.upper): 93 index = i - self.lower 94 data = self.raw_data[i].split() 95 xvals.insert(index, normal * float(data[0])) 96 yvals.insert(index, normal * float(data[1])) 97 dyvals.insert(index, normal * float(data[2])) 98 except Exception as e: 99 error_message = "Couldn't load {}.\n".format(self.f_open.name) 100 error_message += e.message 101 raise FileContentsException(error_message) 91 self.current_dataset.title = self.raw_data[0] 92 self.current_dataset.meta_data["Keywords"] = self.raw_data[1] 93 line3 = self.raw_data[2].split() 94 line4 = self.raw_data[3].split() 95 line5 = self.raw_data[4].split() 96 self.data_points = int(line3[0]) 97 self.lower = 5 98 self.upper = self.lower + self.data_points 99 self.source.radiation = 'x-ray' 100 normal = float(line4[3]) 101 self.current_dataset.source.radiation = "x-ray" 102 self.current_dataset.source.name = "Anton Paar SAXSess Instrument" 103 self.current_dataset.source.wavelength = float(line4[4]) 104 xvals = [] 105 yvals = [] 106 dyvals = [] 107 for i in range(self.lower, self.upper): 108 index = i - self.lower 109 data = self.raw_data[i].split() 110 xvals.insert(index, normal * float(data[0])) 111 yvals.insert(index, normal * float(data[1])) 112 dyvals.insert(index, normal * float(data[2])) 102 113 self.current_dataset.x = np.append(self.current_dataset.x, xvals) 103 114 self.current_dataset.y = np.append(self.current_dataset.y, yvals) 104 115 self.current_dataset.dy = np.append(self.current_dataset.dy, dyvals) 105 116 if self.data_points != self.current_dataset.x.size: 106 error_message += "Not all data points could be loaded.\n" 107 correctly_loaded = False 117 self.errors.add("Not all data was loaded properly.") 118 if self.current_dataset.dx.size != self.current_dataset.x.size: 119 dxvals = np.zeros(self.current_dataset.x.size) 120 self.current_dataset.dx = dxvals 108 121 if self.current_dataset.x.size != self.current_dataset.y.size: 109 error_message += "The x and y data sets are not the same size.\n" 110 correctly_loaded = False 122 self.errors.add("The x and y data sets are not the same size.") 111 123 if self.current_dataset.y.size != self.current_dataset.dy.size: 112 error_message += "The y and dy datasets are not the same size.\n" 113 correctly_loaded = False 114 124 self.errors.add("The y and dy datasets are not the same size.") 125 self.current_dataset.errors = self.errors 115 126 self.current_dataset.xaxis("Q", q_unit) 116 127 self.current_dataset.yaxis("Intensity", i_unit) 117 128 xml_intermediate = self.raw_data[self.upper:] 118 129 xml = ''.join(xml_intermediate) 119 try: 120 self.set_xml_string(xml) 121 dom = self.xmlroot.xpath('/fileinfo') 122 self._parse_child(dom) 123 except Exception as e: 124 # Data loaded but XML metadata has an error 125 error_message += "Data points have been loaded but there was an " 126 error_message += "error reading XML metadata: " + e.message 127 correctly_loaded = False 128 self.send_to_output() 129 if not correctly_loaded: 130 raise DataReaderException(error_message) 130 self.set_xml_string(xml) 131 dom = self.xmlroot.xpath('/fileinfo') 132 self._parse_child(dom) 133 self.output.append(self.current_dataset) 131 134 132 135 def _parse_child(self, dom, parent=''): … … 143 146 self._parse_child(node, key) 144 147 if key == "SampleDetector": 145 self.current_data info.detector.append(self.detector)148 self.current_dataset.detector.append(self.detector) 146 149 self.detector = Detector() 147 150 else: 148 151 if key == "value": 149 152 if parent == "Wavelength": 150 self.current_data info.source.wavelength = value153 self.current_dataset.source.wavelength = value 151 154 elif parent == "SampleDetector": 152 155 self.detector.distance = value 153 156 elif parent == "Temperature": 154 self.current_data info.sample.temperature = value157 self.current_dataset.sample.temperature = value 155 158 elif parent == "CounterSlitLength": 156 159 self.detector.slit_length = value … … 158 161 value = value.replace("_", "") 159 162 if parent == "Wavelength": 160 self.current_data info.source.wavelength_unit = value163 self.current_dataset.source.wavelength_unit = value 161 164 elif parent == "SampleDetector": 162 165 self.detector.distance_unit = value … … 166 169 self.current_dataset.yaxis(self.current_dataset._yaxis, value) 167 170 elif parent == "Temperature": 168 self.current_data info.sample.temperature_unit = value171 self.current_dataset.sample.temperature_unit = value 169 172 elif parent == "CounterSlitLength": 170 173 self.detector.slit_length_unit = value -
src/sas/sascalc/dataloader/readers/ascii_reader.py
rf994e8b1 r235f514 1 1 """ 2 Generic multi-column ASCII datareader2 ASCII reader 3 3 """ 4 4 ############################################################################ 5 # 6 # 7 # project funded by the US National Science Foundation.8 # 9 # 10 # 11 # 12 # 5 #This software was developed by the University of Tennessee as part of the 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 8 #If you use DANSE applications to do scientific research that leads to 9 #publication, we ask that you acknowledge the use of the software with the 10 #following sentence: 11 #This work benefited from DANSE software developed under NSF award DMR-0520547. 12 #copyright 2008, University of Tennessee 13 13 ############################################################################# 14 14 15 import logging 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D 18 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 19 DefaultReaderException 20 21 logger = logging.getLogger(__name__) 22 23 24 class Reader(FileReader): 15 16 import numpy as np 17 import os 18 from sas.sascalc.dataloader.data_info import Data1D 19 20 # Check whether we have a converter available 21 has_converter = True 22 try: 23 from sas.sascalc.data_util.nxsunit import Converter 24 except: 25 has_converter = False 26 _ZERO = 1e-16 27 28 29 class Reader: 25 30 """ 26 31 Class to load ascii files (2, 3 or 4 columns). 27 32 """ 28 # File type33 ## File type 29 34 type_name = "ASCII" 30 # Wildcards 35 36 ## Wildcards 31 37 type = ["ASCII files (*.txt)|*.txt", 32 38 "ASCII files (*.dat)|*.dat", 33 39 "ASCII files (*.abs)|*.abs", 34 40 "CSV files (*.csv)|*.csv"] 35 # List of allowed extensions 36 ext = ['.txt', '.dat', '.abs', '.csv'] 37 # Flag to bypass extension check 41 ## List of allowed extensions 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 43 44 ## Flag to bypass extension check 38 45 allow_all = True 39 # data unless that is the only data 40 min_data_pts = 5 41 42 def get_file_contents(self): 43 """ 44 Get the contents of the file 45 """ 46 47 buff = self.f_open.read() 48 filepath = self.f_open.name 49 lines = buff.splitlines() 50 self.output = [] 51 self.current_datainfo = DataInfo() 52 self.current_datainfo.filename = filepath 53 self.reset_data_list(len(lines)) 54 55 # The first good line of data will define whether 56 # we have 2-column or 3-column ascii 57 has_error_dx = None 58 has_error_dy = None 59 60 # Initialize counters for data lines and header lines. 61 is_data = False 62 # More than "5" lines of data is considered as actual 63 # To count # of current data candidate lines 64 candidate_lines = 0 65 # To count total # of previous data candidate lines 66 candidate_lines_previous = 0 67 # Current line number 68 line_no = 0 69 # minimum required number of columns of data 70 lentoks = 2 71 for line in lines: 72 toks = self.splitline(line.strip()) 73 # To remember the number of columns in the current line of data 74 new_lentoks = len(toks) 75 try: 76 if new_lentoks == 0: 77 # If the line is blank, skip and continue on 78 # In case of breaks within data sets. 79 continue 80 elif new_lentoks != lentoks and is_data: 81 # If a footer is found, break the loop and save the data 82 break 83 elif new_lentoks != lentoks and not is_data: 84 # If header lines are numerical 85 candidate_lines = 0 86 self.reset_data_list(len(lines) - line_no) 87 88 self.current_dataset.x[candidate_lines] = float(toks[0]) 89 90 if new_lentoks > 1: 91 self.current_dataset.y[candidate_lines] = float(toks[1]) 92 93 # If a 3rd row is present, consider it dy 94 if new_lentoks > 2: 95 self.current_dataset.dy[candidate_lines] = \ 96 float(toks[2]) 97 has_error_dy = True 98 99 # If a 4th row is present, consider it dx 100 if new_lentoks > 3: 101 self.current_dataset.dx[candidate_lines] = \ 102 float(toks[3]) 103 has_error_dx = True 104 105 candidate_lines += 1 106 # If 5 or more lines, this is considering the set data 107 if candidate_lines >= self.min_data_pts: 108 is_data = True 109 110 if is_data and new_lentoks >= 8: 111 msg = "This data looks like 2D ASCII data. Use the file " 112 msg += "converter tool to convert it to NXcanSAS." 113 raise FileContentsException(msg) 114 115 # To remember the # of columns on the current line 116 # for the next line of data 117 lentoks = new_lentoks 118 line_no += 1 119 except ValueError: 120 # ValueError is raised when non numeric strings conv. to float 121 # It is data and meet non - number, then stop reading 122 if is_data: 123 break 124 # Delete the previously stored lines of data candidates if 125 # the list is not data 126 self.reset_data_list(len(lines) - line_no) 127 lentoks = 2 46 47 def read(self, path): 48 """ 49 Load data file 50 51 :param path: file path 52 :return: Data1D object, or None 53 54 :raise RuntimeError: when the file can't be opened 55 :raise ValueError: when the length of the data vectors are inconsistent 56 """ 57 if os.path.isfile(path): 58 basename = os.path.basename(path) 59 _, extension = os.path.splitext(basename) 60 if self.allow_all or extension.lower() in self.ext: 61 try: 62 # Read in binary mode since GRASP frequently has no-ascii 63 # characters that breaks the open operation 64 input_f = open(path,'rb') 65 except: 66 raise RuntimeError, "ascii_reader: cannot open %s" % path 67 buff = input_f.read() 68 lines = buff.splitlines() 69 70 # Arrays for data storage 71 tx = np.zeros(0) 72 ty = np.zeros(0) 73 tdy = np.zeros(0) 74 tdx = np.zeros(0) 75 76 # The first good line of data will define whether 77 # we have 2-column or 3-column ascii 128 78 has_error_dx = None 129 79 has_error_dy = None 130 # Reset # of lines of data candidates 80 81 #Initialize counters for data lines and header lines. 82 is_data = False 83 # More than "5" lines of data is considered as actual 84 # data unless that is the only data 85 min_data_pts = 5 86 # To count # of current data candidate lines 131 87 candidate_lines = 0 132 133 if not is_data: 134 self.set_all_to_none() 135 if self.extension in self.ext: 136 msg = "ASCII Reader error: Fewer than five Q data points found " 137 msg += "in {}.".format(filepath) 138 raise FileContentsException(msg) 139 else: 140 msg = "ASCII Reader could not load the file {}".format(filepath) 141 raise DefaultReaderException(msg) 142 # Sanity check 143 if has_error_dy and not len(self.current_dataset.y) == \ 144 len(self.current_dataset.dy): 145 msg = "ASCII Reader error: Number of I and dI data points are" 146 msg += " different in {}.".format(filepath) 147 # TODO: Add error to self.current_datainfo.errors instead? 148 self.set_all_to_none() 149 raise FileContentsException(msg) 150 if has_error_dx and not len(self.current_dataset.x) == \ 151 len(self.current_dataset.dx): 152 msg = "ASCII Reader error: Number of Q and dQ data points are" 153 msg += " different in {}.".format(filepath) 154 # TODO: Add error to self.current_datainfo.errors instead? 155 self.set_all_to_none() 156 raise FileContentsException(msg) 157 158 self.remove_empty_q_values(has_error_dx, has_error_dy) 159 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 160 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 161 162 # Store loading process information 163 self.current_datainfo.meta_data['loader'] = self.type_name 164 self.send_to_output() 88 # To count total # of previous data candidate lines 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 91 lentoks = 2 92 for line in lines: 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 96 try: 97 if new_lentoks == 1 and not is_data: 98 ## If only one item in list, no longer data 99 raise ValueError 100 elif new_lentoks == 0: 101 ## If the line is blank, skip and continue on 102 ## In case of breaks within data sets. 103 continue 104 elif new_lentoks != lentoks and is_data: 105 ## If a footer is found, break the loop and save the data 106 break 107 elif new_lentoks != lentoks and not is_data: 108 ## If header lines are numerical 109 candidate_lines = 0 110 candidate_lines_previous = 0 111 112 #Make sure that all columns are numbers. 113 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError 115 float(toks[colnum]) 116 117 candidate_lines += 1 118 _x = float(toks[0]) 119 _y = float(toks[1]) 120 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 125 is_data = True 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy is None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx is None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 140 is_data == False: 141 try: 142 tx = np.zeros(0) 143 ty = np.zeros(0) 144 tdy = np.zeros(0) 145 tdx = np.zeros(0) 146 except: 147 pass 148 149 if has_error_dy == True: 150 tdy = np.append(tdy, _dy) 151 if has_error_dx == True: 152 tdx = np.append(tdx, _dx) 153 tx = np.append(tx, _x) 154 ty = np.append(ty, _y) 155 156 #To remember the # of columns on the current line 157 # for the next line of data 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 161 # It is data and meet non - number, then stop reading 162 if is_data == True: 163 break 164 lentoks = 2 165 has_error_dx = None 166 has_error_dy = None 167 #Reset # of lines of data candidates 168 candidate_lines = 0 169 except: 170 pass 171 172 input_f.close() 173 if not is_data: 174 msg = "ascii_reader: x has no data" 175 raise RuntimeError, msg 176 # Sanity check 177 if has_error_dy == True and not len(ty) == len(tdy): 178 msg = "ascii_reader: y and dy have different length" 179 raise RuntimeError, msg 180 if has_error_dx == True and not len(tx) == len(tdx): 181 msg = "ascii_reader: y and dy have different length" 182 raise RuntimeError, msg 183 # If the data length is zero, consider this as 184 # though we were not able to read the file. 185 if len(tx) == 0: 186 raise RuntimeError, "ascii_reader: could not load file" 187 188 #Let's re-order the data to make cal. 189 # curve look better some cases 190 ind = np.lexsort((ty, tx)) 191 x = np.zeros(len(tx)) 192 y = np.zeros(len(ty)) 193 dy = np.zeros(len(tdy)) 194 dx = np.zeros(len(tdx)) 195 output = Data1D(x, y, dy=dy, dx=dx) 196 self.filename = output.filename = basename 197 198 for i in ind: 199 x[i] = tx[ind[i]] 200 y[i] = ty[ind[i]] 201 if has_error_dy == True: 202 dy[i] = tdy[ind[i]] 203 if has_error_dx == True: 204 dx[i] = tdx[ind[i]] 205 # Zeros in dx, dy 206 if has_error_dx: 207 dx[dx == 0] = _ZERO 208 if has_error_dy: 209 dy[dy == 0] = _ZERO 210 #Data 211 output.x = x[x != 0] 212 output.y = y[x != 0] 213 output.dy = dy[x != 0] if has_error_dy == True\ 214 else np.zeros(len(output.y)) 215 output.dx = dx[x != 0] if has_error_dx == True\ 216 else np.zeros(len(output.x)) 217 218 output.xaxis("\\rm{Q}", 'A^{-1}') 219 output.yaxis("\\rm{Intensity}", "cm^{-1}") 220 221 # Store loading process information 222 output.meta_data['loader'] = self.type_name 223 if len(output.x) < 1: 224 raise RuntimeError, "%s is empty" % path 225 return output 226 227 else: 228 raise RuntimeError, "%s is not a file" % path 229 return None 230 231 def splitline(self, line): 232 """ 233 Splits a line into pieces based on common delimeters 234 :param line: A single line of text 235 :return: list of values 236 """ 237 # Initial try for CSV (split on ,) 238 toks = line.split(',') 239 # Now try SCSV (split on ;) 240 if len(toks) < 2: 241 toks = line.split(';') 242 # Now go for whitespace 243 if len(toks) < 2: 244 toks = line.split() 245 return toks -
src/sas/sascalc/dataloader/readers/associations.py
rce8c7bd ra1b8fee 14 14 #copyright 2009, University of Tennessee 15 15 ############################################################################# 16 from __future__ import print_function 17 18 import os 16 19 import sys 17 20 import logging 21 import json 18 22 19 23 logger = logging.getLogger(__name__) 20 24 21 FILE_ASSOCIATIONS = { 22 ".xml": "cansas_reader", 23 ".ses": "sesans_reader", 24 ".h5": "cansas_reader_HDF5", 25 ".txt": "ascii_reader", 26 ".dat": "red2d_reader", 27 ".abs": "abs_reader", 28 ".sans": "danse_reader", 29 ".pdh": "anton_paar_saxs_reader" 30 } 25 FILE_NAME = 'defaults.json' 31 26 32 33 def read_associations(loader, settings=FILE_ASSOCIATIONS): 27 def read_associations(loader, settings=FILE_NAME): 34 28 """ 35 29 Read the specified settings file to associate 36 30 default readers to file extension. 37 31 38 32 :param loader: Loader object 39 33 :param settings: path to the json settings file [string] 40 34 """ 41 # For each FileType entry, get the associated reader and extension 42 for ext, reader in settings.iteritems(): 43 if reader is not None and ext is not None: 44 # Associate the extension with a particular reader 45 # TODO: Modify the Register code to be case-insensitive 46 # FIXME: Remove exec statements 47 # and remove the extra line below. 48 try: 49 exec "import %s" % reader 50 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 51 reader) 52 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 53 reader) 54 except: 55 msg = "read_associations: skipping association" 56 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 57 logger.error(msg) 35 reader_dir = os.path.dirname(__file__) 36 path = os.path.join(reader_dir, settings) 37 38 # If we can't find the file in the installation 39 # directory, look into the execution directory. 40 if not os.path.isfile(path): 41 path = os.path.join(os.getcwd(), settings) 42 if not os.path.isfile(path): 43 path = os.path.join(sys.path[0], settings) 44 if not os.path.isfile(path): 45 path = settings 46 if not os.path.isfile(path): 47 path = "./%s" % settings 48 if os.path.isfile(path): 49 with open(path) as fh: 50 json_tree = json.load(fh) 51 52 # Read in the file extension associations 53 entry_list = json_tree['SasLoader']['FileType'] 54 55 # For each FileType entry, get the associated reader and extension 56 for entry in entry_list: 57 reader = entry['-reader'] 58 ext = entry['-extension'] 59 60 if reader is not None and ext is not None: 61 # Associate the extension with a particular reader 62 # TODO: Modify the Register code to be case-insensitive 63 # and remove the extra line below. 64 try: 65 exec "import %s" % reader 66 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 67 reader) 68 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 69 reader) 70 except: 71 msg = "read_associations: skipping association" 72 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 73 logger.error(msg) 74 else: 75 print("Could not find reader association settings\n %s [%s]" % (__file__, os.getcwd())) 76 77 78 def register_readers(registry_function): 79 """ 80 Function called by the registry/loader object to register 81 all default readers using a call back function. 82 83 :WARNING: this method is now obsolete 84 85 :param registry_function: function to be called to register each reader 86 """ 87 logger.info("register_readers is now obsolete: use read_associations()") 88 import abs_reader 89 import ascii_reader 90 import cansas_reader 91 import danse_reader 92 import hfir1d_reader 93 import IgorReader 94 import red2d_reader 95 #import tiff_reader 96 import nexus_reader 97 import sesans_reader 98 import cansas_reader_HDF5 99 import anton_paar_saxs_reader 100 registry_function(sesans_reader) 101 registry_function(abs_reader) 102 registry_function(ascii_reader) 103 registry_function(cansas_reader) 104 registry_function(danse_reader) 105 registry_function(hfir1d_reader) 106 registry_function(IgorReader) 107 registry_function(red2d_reader) 108 #registry_function(tiff_reader) 109 registry_function(nexus_reader) 110 registry_function(cansas_reader_HDF5) 111 registry_function(anton_paar_saxs_reader) 112 return True -
src/sas/sascalc/dataloader/readers/cansas_reader.py
rdcb91cf r7432acb 1 """ 2 CanSAS data reader - new recursive cansas_version. 3 """ 4 ############################################################################ 5 #This software was developed by the University of Tennessee as part of the 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 8 #If you use DANSE applications to do scientific research that leads to 9 #publication, we ask that you acknowledge the use of the software with the 10 #following sentence: 11 #This work benefited from DANSE software developed under NSF award DMR-0520547. 12 #copyright 2008,2009 University of Tennessee 13 ############################################################################# 14 1 15 import logging 2 16 import numpy as np … … 15 29 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 16 30 from sas.sascalc.dataloader.readers.cansas_constants import CansasConstants, CurrentLevel 17 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, \18 DefaultReaderException, DataReaderException19 31 20 32 # The following 2 imports *ARE* used. Do not remove either. 21 33 import xml.dom.minidom 22 34 from xml.dom.minidom import parseString 23 24 from lxml import etree25 35 26 36 logger = logging.getLogger(__name__) … … 45 55 46 56 class Reader(XMLreader): 57 """ 58 Class to load cansas 1D XML files 59 60 :Dependencies: 61 The CanSAS reader requires PyXML 0.8.4 or later. 62 """ 63 # CanSAS version - defaults to version 1.0 47 64 cansas_version = "1.0" 48 65 base_ns = "{cansas1d/1.0}" … … 58 75 ns_list = None 59 76 # Temporary storage location for loading multiple data sets in a single file 77 current_datainfo = None 78 current_dataset = None 60 79 current_data1d = None 61 80 data = None 81 # List of data1D objects to be sent back to SasView 82 output = None 62 83 # Wildcards 63 84 type = ["XML files (*.xml)|*.xml", "SasView Save Files (*.svs)|*.svs"] … … 89 110 90 111 def read(self, xml_file, schema_path="", invalid=True): 91 if schema_path != "" or invalid != True: 92 # read has been called from self.get_file_contents because xml file doens't conform to schema 93 _, self.extension = os.path.splitext(os.path.basename(xml_file)) 94 return self.get_file_contents(xml_file=xml_file, schema_path=schema_path, invalid=invalid) 95 96 # Otherwise, read has been called by the data loader - file_reader_base_class handles this 97 return super(XMLreader, self).read(xml_file) 98 99 def get_file_contents(self, xml_file=None, schema_path="", invalid=True): 100 # Reset everything since we're loading a new file 112 """ 113 Validate and read in an xml_file file in the canSAS format. 114 115 :param xml_file: A canSAS file path in proper XML format 116 :param schema_path: A file path to an XML schema to validate the xml_file against 117 """ 118 # For every file loaded, reset everything to a base state 101 119 self.reset_state() 102 120 self.invalid = invalid 103 if xml_file is None: 104 xml_file = self.f_open.name 105 # We don't sure f_open since lxml handles opnening/closing files 106 if not self.f_open.closed: 107 self.f_open.close() 108 109 basename, _ = os.path.splitext(os.path.basename(xml_file)) 110 111 try: 112 # Raises FileContentsException 113 self.load_file_and_schema(xml_file, schema_path) 114 self.current_datainfo = DataInfo() 115 # Raises FileContentsException if file doesn't meet CanSAS schema 116 self.is_cansas(self.extension) 117 self.invalid = False # If we reach this point then file must be valid CanSAS 118 119 # Parse each SASentry 120 entry_list = self.xmlroot.xpath('/ns:SASroot/ns:SASentry', namespaces={ 121 'ns': self.cansas_defaults.get("ns") 122 }) 123 # Look for a SASentry 124 self.names.append("SASentry") 125 self.set_processing_instructions() 126 127 for entry in entry_list: 128 self.current_datainfo.filename = basename + self.extension 129 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 130 self.current_datainfo.meta_data[PREPROCESS] = self.processing_instructions 131 self._parse_entry(entry) 132 has_error_dx = self.current_dataset.dx is not None 133 has_error_dy = self.current_dataset.dy is not None 134 self.remove_empty_q_values(has_error_dx=has_error_dx, 135 has_error_dy=has_error_dy) 136 self.send_to_output() # Combine datasets with DataInfo 137 self.current_datainfo = DataInfo() # Reset DataInfo 138 except FileContentsException as fc_exc: 139 # File doesn't meet schema - try loading with a less strict schema 140 base_name = xml_reader.__file__ 141 base_name = base_name.replace("\\", "/") 142 base = base_name.split("/sas/")[0] 143 if self.cansas_version == "1.1": 144 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 145 else: 146 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 147 self.set_schema(invalid_schema) 148 if self.invalid: 121 # Check that the file exists 122 if os.path.isfile(xml_file): 123 basename, extension = os.path.splitext(os.path.basename(xml_file)) 124 # If the file type is not allowed, return nothing 125 if extension in self.ext or self.allow_all: 126 # Get the file location of 127 self.load_file_and_schema(xml_file, schema_path) 128 self.add_data_set() 129 # Try to load the file, but raise an error if unable to. 130 # Check the file matches the XML schema 149 131 try: 150 # Load data with less strict schema 151 self.read(xml_file, invalid_schema, False) 152 153 # File can still be read but doesn't match schema, so raise exception 154 self.load_file_and_schema(xml_file) # Reload strict schema so we can find where error are in file 132 self.is_cansas(extension) 133 self.invalid = False 134 # Get each SASentry from XML file and add it to a list. 135 entry_list = self.xmlroot.xpath( 136 '/ns:SASroot/ns:SASentry', 137 namespaces={'ns': self.cansas_defaults.get("ns")}) 138 self.names.append("SASentry") 139 140 # Get all preprocessing events and encoding 141 self.set_processing_instructions() 142 143 # Parse each <SASentry> item 144 for entry in entry_list: 145 # Create a new DataInfo object for every <SASentry> 146 147 # Set the file name and then parse the entry. 148 self.current_datainfo.filename = basename + extension 149 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 150 self.current_datainfo.meta_data[PREPROCESS] = \ 151 self.processing_instructions 152 153 # Parse the XML SASentry 154 self._parse_entry(entry) 155 # Combine datasets with datainfo 156 self.add_data_set() 157 except RuntimeError: 158 # If the file does not match the schema, raise this error 155 159 invalid_xml = self.find_invalid_xml() 156 invalid_xml = INVALID_XML.format(basename + self.extension) + invalid_xml 157 raise DataReaderException(invalid_xml) # Handled by base class 158 except FileContentsException as fc_exc: 159 msg = "CanSAS Reader could not load the file {}".format(xml_file) 160 if fc_exc.message is not None: # Propagate error messages from earlier 161 msg = fc_exc.message 162 if not self.extension in self.ext: # If the file has no associated loader 163 raise DefaultReaderException(msg) 164 raise FileContentsException(msg) 165 pass 166 else: 167 raise fc_exc 168 except Exception as e: # Convert all other exceptions to FileContentsExceptions 169 raise FileContentsException(e.message) 170 171 172 def load_file_and_schema(self, xml_file, schema_path=""): 173 base_name = xml_reader.__file__ 174 base_name = base_name.replace("\\", "/") 175 base = base_name.split("/sas/")[0] 176 177 # Try and parse the XML file 178 try: 179 self.set_xml_file(xml_file) 180 except etree.XMLSyntaxError: # File isn't valid XML so can't be loaded 181 msg = "SasView cannot load {}.\nInvalid XML syntax".format(xml_file) 182 raise FileContentsException(msg) 183 184 self.cansas_version = self.xmlroot.get("version", "1.0") 185 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 186 187 if schema_path == "": 188 schema_path = "{}/sas/sascalc/dataloader/readers/schema/{}".format( 189 base, self.cansas_defaults.get("schema").replace("\\", "/") 190 ) 191 self.set_schema(schema_path) 192 193 def is_cansas(self, ext="xml"): 194 """ 195 Checks to see if the XML file is a CanSAS file 196 197 :param ext: The file extension of the data file 198 :raises FileContentsException: Raised if XML file isn't valid CanSAS 199 """ 200 if self.validate_xml(): # Check file is valid XML 201 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 202 value = self.xmlroot.get(name) 203 # Check schema CanSAS version matches file CanSAS version 204 if CANSAS_NS.get(self.cansas_version).get("ns") == value.rsplit(" ")[0]: 205 return True 206 if ext == "svs": 207 return True # Why is this required? 208 # If we get to this point then file isn't valid CanSAS 209 logger.warning("File doesn't meet CanSAS schema. Trying to load anyway.") 210 raise FileContentsException("The file is not valid CanSAS") 160 invalid_xml = INVALID_XML.format(basename + extension) + invalid_xml 161 self.errors.add(invalid_xml) 162 # Try again with an invalid CanSAS schema, that requires only a data set in each 163 base_name = xml_reader.__file__ 164 base_name = base_name.replace("\\", "/") 165 base = base_name.split("/sas/")[0] 166 if self.cansas_version == "1.1": 167 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 168 else: 169 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 170 self.set_schema(invalid_schema) 171 try: 172 if self.invalid: 173 if self.is_cansas(): 174 self.output = self.read(xml_file, invalid_schema, False) 175 else: 176 raise RuntimeError 177 else: 178 raise RuntimeError 179 except RuntimeError: 180 x = np.zeros(1) 181 y = np.zeros(1) 182 self.current_data1d = Data1D(x,y) 183 self.current_data1d.errors = self.errors 184 return [self.current_data1d] 185 else: 186 self.output.append("Not a valid file path.") 187 # Return a list of parsed entries that dataloader can manage 188 return self.output 211 189 212 190 def _parse_entry(self, dom, recurse=False): 191 """ 192 Parse a SASEntry - new recursive method for parsing the dom of 193 the CanSAS data format. This will allow multiple data files 194 and extra nodes to be read in simultaneously. 195 196 :param dom: dom object with a namespace base of names 197 """ 198 213 199 if not self._is_call_local() and not recurse: 214 200 self.reset_state() 215 self.data = [] 216 self.current_datainfo = DataInfo() 201 self.add_data_set() 217 202 self.names.append("SASentry") 218 203 self.parent_class = "SASentry" 219 # Create an empty dataset if no data has been passed to the reader 220 if self.current_dataset is None: 221 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), 222 np.empty(0), np.empty(0)) 223 self.base_ns = "{" + CANSAS_NS.get(self.cansas_version).get("ns") + "}" 224 225 # Loop through each child in the parent element 204 self._check_for_empty_data() 205 self.base_ns = "{0}{1}{2}".format("{", \ 206 CANSAS_NS.get(self.cansas_version).get("ns"), "}") 207 208 # Go through each child in the parent element 226 209 for node in dom: 227 210 attr = node.attrib … … 234 217 if tagname == "fitting_plug_in" or tagname == "pr_inversion" or tagname == "invariant": 235 218 continue 219 236 220 # Get where to store content 237 221 self.names.append(tagname_original) … … 249 233 else: 250 234 self.current_dataset.shape = () 251 # Recurs eto access data within the group252 self._parse_entry(node, recurse=True)235 # Recursion step to access data within the group 236 self._parse_entry(node, True) 253 237 if tagname == "SASsample": 254 238 self.current_datainfo.sample.name = name … … 260 244 self.aperture.name = name 261 245 self.aperture.type = type 262 self. _add_intermediate()246 self.add_intermediate() 263 247 else: 264 248 if isinstance(self.current_dataset, plottable_2D): … … 277 261 self.current_datainfo.notes.append(data_point) 278 262 279 # I and Q points263 # I and Q - 1D data 280 264 elif tagname == 'I' and isinstance(self.current_dataset, plottable_1D): 281 265 unit_list = unit.split("|") … … 299 283 self.current_dataset.dx = np.append(self.current_dataset.dx, data_point) 300 284 elif tagname == 'dQw': 301 if self.current_dataset.dqw is None: self.current_dataset.dqw = np.empty(0)302 285 self.current_dataset.dxw = np.append(self.current_dataset.dxw, data_point) 303 286 elif tagname == 'dQl': 304 if self.current_dataset.dxl is None: self.current_dataset.dxl = np.empty(0)305 287 self.current_dataset.dxl = np.append(self.current_dataset.dxl, data_point) 306 288 elif tagname == 'Qmean': … … 374 356 elif tagname == 'name' and self.parent_class == 'SASinstrument': 375 357 self.current_datainfo.instrument = data_point 376 377 358 # Detector Information 378 359 elif tagname == 'name' and self.parent_class == 'SASdetector': … … 420 401 self.detector.orientation.z = data_point 421 402 self.detector.orientation_unit = unit 422 423 403 # Collimation and Aperture 424 404 elif tagname == 'length' and self.parent_class == 'SAScollimation': … … 454 434 elif tagname == 'term' and self.parent_class == 'SASprocess': 455 435 unit = attr.get("unit", "") 456 dic = { "name": name, "value": data_point, "unit": unit } 436 dic = {} 437 dic["name"] = name 438 dic["value"] = data_point 439 dic["unit"] = unit 457 440 self.process.term.append(dic) 458 441 … … 507 490 if not self._is_call_local() and not recurse: 508 491 self.frm = "" 509 self.current_datainfo.errors = set() 510 for error in self.errors: 511 self.current_datainfo.errors.add(error) 512 self.errors.clear() 513 self.send_to_output() 492 self.add_data_set() 514 493 empty = None 515 494 return self.output[0], empty 516 495 496 517 497 def _is_call_local(self): 498 """ 499 500 """ 518 501 if self.frm == "": 519 502 inter = inspect.stack() … … 527 510 return True 528 511 529 def _add_intermediate(self): 512 def is_cansas(self, ext="xml"): 513 """ 514 Checks to see if the xml file is a CanSAS file 515 516 :param ext: The file extension of the data file 517 """ 518 if self.validate_xml(): 519 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 520 value = self.xmlroot.get(name) 521 if CANSAS_NS.get(self.cansas_version).get("ns") == \ 522 value.rsplit(" ")[0]: 523 return True 524 if ext == "svs": 525 return True 526 raise RuntimeError 527 528 def load_file_and_schema(self, xml_file, schema_path=""): 529 """ 530 Loads the file and associates a schema, if a schema is passed in or if one already exists 531 532 :param xml_file: The xml file path sent to Reader.read 533 :param schema_path: The path to a schema associated with the xml_file, or find one based on the file 534 """ 535 base_name = xml_reader.__file__ 536 base_name = base_name.replace("\\", "/") 537 base = base_name.split("/sas/")[0] 538 539 # Load in xml file and get the cansas version from the header 540 self.set_xml_file(xml_file) 541 self.cansas_version = self.xmlroot.get("version", "1.0") 542 543 # Generic values for the cansas file based on the version 544 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 545 if schema_path == "": 546 schema_path = "{0}/sas/sascalc/dataloader/readers/schema/{1}".format \ 547 (base, self.cansas_defaults.get("schema")).replace("\\", "/") 548 549 # Link a schema to the XML file. 550 self.set_schema(schema_path) 551 552 def add_data_set(self): 553 """ 554 Adds the current_dataset to the list of outputs after preforming final processing on the data and then calls a 555 private method to generate a new data set. 556 557 :param key: NeXus group name for current tree level 558 """ 559 560 if self.current_datainfo and self.current_dataset: 561 self._final_cleanup() 562 self.data = [] 563 self.current_datainfo = DataInfo() 564 565 def _initialize_new_data_set(self, node=None): 566 """ 567 A private class method to generate a new 1D data object. 568 Outside methods should call add_data_set() to be sure any existing data is stored properly. 569 570 :param node: XML node to determine if 1D or 2D data 571 """ 572 x = np.array(0) 573 y = np.array(0) 574 for child in node: 575 if child.tag.replace(self.base_ns, "") == "Idata": 576 for i_child in child: 577 if i_child.tag.replace(self.base_ns, "") == "Qx": 578 self.current_dataset = plottable_2D() 579 return 580 self.current_dataset = plottable_1D(x, y) 581 582 def add_intermediate(self): 530 583 """ 531 584 This method stores any intermediate objects within the final data set after fully reading the set. 532 """ 585 586 :param parent: The NXclass name for the h5py Group object that just finished being processed 587 """ 588 533 589 if self.parent_class == 'SASprocess': 534 590 self.current_datainfo.process.append(self.process) … … 549 605 self._check_for_empty_resolution() 550 606 self.data.append(self.current_dataset) 607 608 def _final_cleanup(self): 609 """ 610 Final cleanup of the Data1D object to be sure it has all the 611 appropriate information needed for perspectives 612 """ 613 614 # Append errors to dataset and reset class errors 615 self.current_datainfo.errors = set() 616 for error in self.errors: 617 self.current_datainfo.errors.add(error) 618 self.errors.clear() 619 620 # Combine all plottables with datainfo and append each to output 621 # Type cast data arrays to float64 and find min/max as appropriate 622 for dataset in self.data: 623 if isinstance(dataset, plottable_1D): 624 if dataset.x is not None: 625 dataset.x = np.delete(dataset.x, [0]) 626 dataset.x = dataset.x.astype(np.float64) 627 dataset.xmin = np.min(dataset.x) 628 dataset.xmax = np.max(dataset.x) 629 if dataset.y is not None: 630 dataset.y = np.delete(dataset.y, [0]) 631 dataset.y = dataset.y.astype(np.float64) 632 dataset.ymin = np.min(dataset.y) 633 dataset.ymax = np.max(dataset.y) 634 if dataset.dx is not None: 635 dataset.dx = np.delete(dataset.dx, [0]) 636 dataset.dx = dataset.dx.astype(np.float64) 637 if dataset.dxl is not None: 638 dataset.dxl = np.delete(dataset.dxl, [0]) 639 dataset.dxl = dataset.dxl.astype(np.float64) 640 if dataset.dxw is not None: 641 dataset.dxw = np.delete(dataset.dxw, [0]) 642 dataset.dxw = dataset.dxw.astype(np.float64) 643 if dataset.dy is not None: 644 dataset.dy = np.delete(dataset.dy, [0]) 645 dataset.dy = dataset.dy.astype(np.float64) 646 np.trim_zeros(dataset.x) 647 np.trim_zeros(dataset.y) 648 np.trim_zeros(dataset.dy) 649 elif isinstance(dataset, plottable_2D): 650 dataset.data = dataset.data.astype(np.float64) 651 dataset.qx_data = dataset.qx_data.astype(np.float64) 652 dataset.xmin = np.min(dataset.qx_data) 653 dataset.xmax = np.max(dataset.qx_data) 654 dataset.qy_data = dataset.qy_data.astype(np.float64) 655 dataset.ymin = np.min(dataset.qy_data) 656 dataset.ymax = np.max(dataset.qy_data) 657 dataset.q_data = np.sqrt(dataset.qx_data * dataset.qx_data 658 + dataset.qy_data * dataset.qy_data) 659 if dataset.err_data is not None: 660 dataset.err_data = dataset.err_data.astype(np.float64) 661 if dataset.dqx_data is not None: 662 dataset.dqx_data = dataset.dqx_data.astype(np.float64) 663 if dataset.dqy_data is not None: 664 dataset.dqy_data = dataset.dqy_data.astype(np.float64) 665 if dataset.mask is not None: 666 dataset.mask = dataset.mask.astype(dtype=bool) 667 668 if len(dataset.shape) == 2: 669 n_rows, n_cols = dataset.shape 670 dataset.y_bins = dataset.qy_data[0::int(n_cols)] 671 dataset.x_bins = dataset.qx_data[:int(n_cols)] 672 dataset.data = dataset.data.flatten() 673 else: 674 dataset.y_bins = [] 675 dataset.x_bins = [] 676 dataset.data = dataset.data.flatten() 677 678 final_dataset = combine_data(dataset, self.current_datainfo) 679 self.output.append(final_dataset) 680 681 def _create_unique_key(self, dictionary, name, numb=0): 682 """ 683 Create a unique key value for any dictionary to prevent overwriting 684 Recurse until a unique key value is found. 685 686 :param dictionary: A dictionary with any number of entries 687 :param name: The index of the item to be added to dictionary 688 :param numb: The number to be appended to the name, starts at 0 689 """ 690 if dictionary.get(name) is not None: 691 numb += 1 692 name = name.split("_")[0] 693 name += "_{0}".format(numb) 694 name = self._create_unique_key(dictionary, name, numb) 695 return name 551 696 552 697 def _get_node_value(self, node, tagname): … … 656 801 return node_value, value_unit 657 802 803 def _check_for_empty_data(self): 804 """ 805 Creates an empty data set if no data is passed to the reader 806 807 :param data1d: presumably a Data1D object 808 """ 809 if self.current_dataset is None: 810 x_vals = np.empty(0) 811 y_vals = np.empty(0) 812 dx_vals = np.empty(0) 813 dy_vals = np.empty(0) 814 dxl = np.empty(0) 815 dxw = np.empty(0) 816 self.current_dataset = plottable_1D(x_vals, y_vals, dx_vals, dy_vals) 817 self.current_dataset.dxl = dxl 818 self.current_dataset.dxw = dxw 819 658 820 def _check_for_empty_resolution(self): 659 821 """ 660 a method to check all resolution data sets are the same size as I and q 661 """ 662 dql_exists = False 663 dqw_exists = False 664 dq_exists = False 665 di_exists = False 666 if self.current_dataset.dxl is not None: 667 dql_exists = True 668 if self.current_dataset.dxw is not None: 669 dqw_exists = True 670 if self.current_dataset.dx is not None: 671 dq_exists = True 672 if self.current_dataset.dy is not None: 673 di_exists = True 674 if dqw_exists and not dql_exists: 675 array_size = self.current_dataset.dxw.size - 1 676 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 677 np.zeros([array_size])) 678 elif dql_exists and not dqw_exists: 679 array_size = self.current_dataset.dxl.size - 1 680 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 681 np.zeros([array_size])) 682 elif not dql_exists and not dqw_exists and not dq_exists: 683 array_size = self.current_dataset.x.size - 1 684 self.current_dataset.dx = np.append(self.current_dataset.dx, 685 np.zeros([array_size])) 686 if not di_exists: 687 array_size = self.current_dataset.y.size - 1 688 self.current_dataset.dy = np.append(self.current_dataset.dy, 689 np.zeros([array_size])) 690 691 def _initialize_new_data_set(self, node=None): 692 if node is not None: 693 for child in node: 694 if child.tag.replace(self.base_ns, "") == "Idata": 695 for i_child in child: 696 if i_child.tag.replace(self.base_ns, "") == "Qx": 697 self.current_dataset = plottable_2D() 698 return 699 self.current_dataset = plottable_1D(np.array(0), np.array(0)) 700 701 ## Writing Methods 822 A method to check all resolution data sets are the same size as I and Q 823 """ 824 if isinstance(self.current_dataset, plottable_1D): 825 dql_exists = False 826 dqw_exists = False 827 dq_exists = False 828 di_exists = False 829 if self.current_dataset.dxl is not None: 830 dql_exists = True 831 if self.current_dataset.dxw is not None: 832 dqw_exists = True 833 if self.current_dataset.dx is not None: 834 dq_exists = True 835 if self.current_dataset.dy is not None: 836 di_exists = True 837 if dqw_exists and not dql_exists: 838 array_size = self.current_dataset.dxw.size - 1 839 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 840 np.zeros([array_size])) 841 elif dql_exists and not dqw_exists: 842 array_size = self.current_dataset.dxl.size - 1 843 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 844 np.zeros([array_size])) 845 elif not dql_exists and not dqw_exists and not dq_exists: 846 array_size = self.current_dataset.x.size - 1 847 self.current_dataset.dx = np.append(self.current_dataset.dx, 848 np.zeros([array_size])) 849 if not di_exists: 850 array_size = self.current_dataset.y.size - 1 851 self.current_dataset.dy = np.append(self.current_dataset.dy, 852 np.zeros([array_size])) 853 elif isinstance(self.current_dataset, plottable_2D): 854 dqx_exists = False 855 dqy_exists = False 856 di_exists = False 857 mask_exists = False 858 if self.current_dataset.dqx_data is not None: 859 dqx_exists = True 860 if self.current_dataset.dqy_data is not None: 861 dqy_exists = True 862 if self.current_dataset.err_data is not None: 863 di_exists = True 864 if self.current_dataset.mask is not None: 865 mask_exists = True 866 if not dqy_exists: 867 array_size = self.current_dataset.qy_data.size - 1 868 self.current_dataset.dqy_data = np.append( 869 self.current_dataset.dqy_data, np.zeros([array_size])) 870 if not dqx_exists: 871 array_size = self.current_dataset.qx_data.size - 1 872 self.current_dataset.dqx_data = np.append( 873 self.current_dataset.dqx_data, np.zeros([array_size])) 874 if not di_exists: 875 array_size = self.current_dataset.data.size - 1 876 self.current_dataset.err_data = np.append( 877 self.current_dataset.err_data, np.zeros([array_size])) 878 if not mask_exists: 879 array_size = self.current_dataset.data.size - 1 880 self.current_dataset.mask = np.append( 881 self.current_dataset.mask, 882 np.ones([array_size] ,dtype=bool)) 883 884 ####### All methods below are for writing CanSAS XML files ####### 885 702 886 def write(self, filename, datainfo): 703 887 """ … … 1330 1514 exec "storage.%s = entry.text.strip()" % variable 1331 1515 1516 1332 1517 # DO NOT REMOVE Called by outside packages: 1333 1518 # sas.sasgui.perspectives.invariant.invariant_state -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
rdcb91cf rc94280c 13 13 TransmissionSpectrum, Detector 14 14 from sas.sascalc.dataloader.data_info import combine_data_info_with_plottable 15 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DefaultReaderException 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 18 19 class Reader(FileReader): 15 16 17 class Reader(): 20 18 """ 21 19 A class for reading in CanSAS v2.0 data files. The existing iteration opens … … 42 40 # Raw file contents to be processed 43 41 raw_data = None 42 # Data info currently being read in 43 current_datainfo = None 44 # SASdata set currently being read in 45 current_dataset = None 44 46 # List of plottable1D objects that should be linked to the current_datainfo 45 47 data1d = None … … 54 56 # Flag to bypass extension check 55 57 allow_all = True 56 57 def get_file_contents(self): 58 # List of files to return 59 output = None 60 61 def read(self, filename): 58 62 """ 59 63 This is the general read method that all SasView data_loaders must have. … … 64 68 # Reinitialize when loading a new data file to reset all class variables 65 69 self.reset_class_variables() 66 67 filename = self.f_open.name68 self.f_open.close() # IO handled by h5py69 70 70 # Check that the file exists 71 71 if os.path.isfile(filename): … … 75 75 if extension in self.ext or self.allow_all: 76 76 # Load the data file 77 try: 78 self.raw_data = h5py.File(filename, 'r') 79 except Exception as e: 80 if extension not in self.ext: 81 msg = "CanSAS2.0 HDF5 Reader could not load file {}".format(basename + extension) 82 raise DefaultReaderException(msg) 83 raise FileContentsException(e.message) 84 try: 85 # Read in all child elements of top level SASroot 86 self.read_children(self.raw_data, []) 87 # Add the last data set to the list of outputs 88 self.add_data_set() 89 except Exception as exc: 90 raise FileContentsException(exc.message) 91 finally: 92 # Close the data file 93 self.raw_data.close() 94 95 for dataset in self.output: 96 if isinstance(dataset, Data1D): 97 if dataset.x.size < 5: 98 self.output = [] 99 raise FileContentsException("Fewer than 5 data points found.") 77 self.raw_data = h5py.File(filename, 'r') 78 # Read in all child elements of top level SASroot 79 self.read_children(self.raw_data, []) 80 # Add the last data set to the list of outputs 81 self.add_data_set() 82 # Close the data file 83 self.raw_data.close() 84 # Return data set(s) 85 return self.output 100 86 101 87 def reset_class_variables(self): … … 441 427 Data1D and Data2D objects 442 428 """ 429 443 430 # Type cast data arrays to float64 444 431 if len(self.current_datainfo.trans_spectrum) > 0: … … 464 451 # Type cast data arrays to float64 and find min/max as appropriate 465 452 for dataset in self.data2d: 453 dataset.data = dataset.data.astype(np.float64) 454 dataset.err_data = dataset.err_data.astype(np.float64) 455 if dataset.qx_data is not None: 456 dataset.xmin = np.min(dataset.qx_data) 457 dataset.xmax = np.max(dataset.qx_data) 458 dataset.qx_data = dataset.qx_data.astype(np.float64) 459 if dataset.dqx_data is not None: 460 dataset.dqx_data = dataset.dqx_data.astype(np.float64) 461 if dataset.qy_data is not None: 462 dataset.ymin = np.min(dataset.qy_data) 463 dataset.ymax = np.max(dataset.qy_data) 464 dataset.qy_data = dataset.qy_data.astype(np.float64) 465 if dataset.dqy_data is not None: 466 dataset.dqy_data = dataset.dqy_data.astype(np.float64) 467 if dataset.q_data is not None: 468 dataset.q_data = dataset.q_data.astype(np.float64) 466 469 zeros = np.ones(dataset.data.size, dtype=bool) 467 470 try: … … 486 489 dataset.x_bins = dataset.qx_data[:n_cols] 487 490 dataset.data = dataset.data.flatten() 488 self.current_dataset = dataset 489 self.send_to_output() 491 492 final_dataset = combine_data_info_with_plottable( 493 dataset, self.current_datainfo) 494 self.output.append(final_dataset) 490 495 491 496 for dataset in self.data1d: 492 self.current_dataset = dataset 493 self.send_to_output() 497 if dataset.x is not None: 498 dataset.x = dataset.x.astype(np.float64) 499 dataset.xmin = np.min(dataset.x) 500 dataset.xmax = np.max(dataset.x) 501 if dataset.y is not None: 502 dataset.y = dataset.y.astype(np.float64) 503 dataset.ymin = np.min(dataset.y) 504 dataset.ymax = np.max(dataset.y) 505 if dataset.dx is not None: 506 dataset.dx = dataset.dx.astype(np.float64) 507 if dataset.dxl is not None: 508 dataset.dxl = dataset.dxl.astype(np.float64) 509 if dataset.dxw is not None: 510 dataset.dxw = dataset.dxw.astype(np.float64) 511 if dataset.dy is not None: 512 dataset.dy = dataset.dy.astype(np.float64) 513 final_dataset = combine_data_info_with_plottable( 514 dataset, self.current_datainfo) 515 self.output.append(final_dataset) 494 516 495 517 def add_data_set(self, key=""): -
src/sas/sascalc/dataloader/readers/danse_reader.py
r713a047 r235f514 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #If you use DANSE applications to do scientific research that leads to 9 9 #publication, we ask that you acknowledge the use of the software with the … … 14 14 import math 15 15 import os 16 import sys 16 17 import numpy as np 17 18 import logging 18 from sas.sascalc.dataloader.data_info import plottable_2D, DataInfo, Detector19 from sas.sascalc.dataloader.data_info import Data2D, Detector 19 20 from sas.sascalc.dataloader.manipulations import reader2D_converter 20 from sas.sascalc.dataloader.file_reader_base_class import FileReader21 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException22 21 23 22 logger = logging.getLogger(__name__) … … 31 30 32 31 33 class Reader (FileReader):32 class Reader: 34 33 """ 35 34 Example data manipulation … … 41 40 ## Extension 42 41 ext = ['.sans', '.SANS'] 43 44 def get_file_contents(self): 45 self.current_datainfo = DataInfo() 46 self.current_dataset = plottable_2D() 47 self.output = [] 48 49 loaded_correctly = True 50 error_message = "" 51 52 # defaults 53 # wavelength in Angstrom 54 wavelength = 10.0 55 # Distance in meter 56 distance = 11.0 57 # Pixel number of center in x 58 center_x = 65 59 # Pixel number of center in y 60 center_y = 65 61 # Pixel size [mm] 62 pixel = 5.0 63 # Size in x, in pixels 64 size_x = 128 65 # Size in y, in pixels 66 size_y = 128 67 # Format version 68 fversion = 1.0 69 70 self.current_datainfo.filename = os.path.basename(self.f_open.name) 71 detector = Detector() 72 self.current_datainfo.detector.append(detector) 73 74 self.current_dataset.data = np.zeros([size_x, size_y]) 75 self.current_dataset.err_data = np.zeros([size_x, size_y]) 76 77 read_on = True 78 data_start_line = 1 79 while read_on: 80 line = self.f_open.readline() 81 data_start_line += 1 82 if line.find("DATA:") >= 0: 83 read_on = False 84 break 85 toks = line.split(':') 42 43 def read(self, filename=None): 44 """ 45 Open and read the data in a file 46 @param file: path of the file 47 """ 48 49 read_it = False 50 for item in self.ext: 51 if filename.lower().find(item) >= 0: 52 read_it = True 53 54 if read_it: 86 55 try: 56 datafile = open(filename, 'r') 57 except: 58 raise RuntimeError,"danse_reader cannot open %s" % (filename) 59 60 # defaults 61 # wavelength in Angstrom 62 wavelength = 10.0 63 # Distance in meter 64 distance = 11.0 65 # Pixel number of center in x 66 center_x = 65 67 # Pixel number of center in y 68 center_y = 65 69 # Pixel size [mm] 70 pixel = 5.0 71 # Size in x, in pixels 72 size_x = 128 73 # Size in y, in pixels 74 size_y = 128 75 # Format version 76 fversion = 1.0 77 78 output = Data2D() 79 output.filename = os.path.basename(filename) 80 detector = Detector() 81 output.detector.append(detector) 82 83 output.data = np.zeros([size_x,size_y]) 84 output.err_data = np.zeros([size_x, size_y]) 85 86 data_conv_q = None 87 data_conv_i = None 88 89 if has_converter == True and output.Q_unit != '1/A': 90 data_conv_q = Converter('1/A') 91 # Test it 92 data_conv_q(1.0, output.Q_unit) 93 94 if has_converter == True and output.I_unit != '1/cm': 95 data_conv_i = Converter('1/cm') 96 # Test it 97 data_conv_i(1.0, output.I_unit) 98 99 read_on = True 100 while read_on: 101 line = datafile.readline() 102 if line.find("DATA:") >= 0: 103 read_on = False 104 break 105 toks = line.split(':') 87 106 if toks[0] == "FORMATVERSION": 88 107 fversion = float(toks[1]) 89 elif toks[0] == "WAVELENGTH":108 if toks[0] == "WAVELENGTH": 90 109 wavelength = float(toks[1]) 91 110 elif toks[0] == "DISTANCE": … … 101 120 elif toks[0] == "SIZE_Y": 102 121 size_y = int(toks[1]) 103 except ValueError as e: 104 error_message += "Unable to parse {}. Default value used.\n".format(toks[0]) 105 loaded_correctly = False 106 107 # Read the data 108 data = [] 109 error = [] 110 if not fversion >= 1.0: 111 msg = "danse_reader can't read this file {}".format(self.f_open.name) 112 raise FileContentsException(msg) 113 114 for line_num, data_str in enumerate(self.f_open.readlines()): 115 toks = data_str.split() 116 try: 117 val = float(toks[0]) 118 err = float(toks[1]) 119 data.append(val) 120 error.append(err) 121 except ValueError as exc: 122 msg = "Unable to parse line {}: {}".format(line_num + data_start_line, data_str.strip()) 123 raise FileContentsException(msg) 124 125 num_pts = size_x * size_y 126 if len(data) < num_pts: 127 msg = "Not enough data points provided. Expected {} but got {}".format( 128 size_x * size_y, len(data)) 129 raise FileContentsException(msg) 130 elif len(data) > num_pts: 131 error_message += ("Too many data points provided. Expected {0} but" 132 " got {1}. Only the first {0} will be used.\n").format(num_pts, len(data)) 133 loaded_correctly = False 134 data = data[:num_pts] 135 error = error[:num_pts] 136 137 # Qx and Qy vectors 138 theta = pixel / distance / 100.0 139 i_x = np.arange(size_x) 140 theta = (i_x - center_x + 1) * pixel / distance / 100.0 141 x_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 142 xmin = x_vals.min() 143 xmax = x_vals.max() 144 145 i_y = np.arange(size_y) 146 theta = (i_y - center_y + 1) * pixel / distance / 100.0 147 y_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 148 ymin = y_vals.min() 149 ymax = y_vals.max() 150 151 self.current_dataset.data = np.array(data, dtype=np.float64).reshape((size_y, size_x)) 152 if fversion > 1.0: 153 self.current_dataset.err_data = np.array(error, dtype=np.float64).reshape((size_y, size_x)) 154 155 # Store all data 156 # Store wavelength 157 if has_converter == True and self.current_datainfo.source.wavelength_unit != 'A': 158 conv = Converter('A') 159 wavelength = conv(wavelength, 160 units=self.current_datainfo.source.wavelength_unit) 161 self.current_datainfo.source.wavelength = wavelength 162 163 # Store distance 164 if has_converter == True and detector.distance_unit != 'm': 165 conv = Converter('m') 166 distance = conv(distance, units=detector.distance_unit) 167 detector.distance = distance 168 169 # Store pixel size 170 if has_converter == True and detector.pixel_size_unit != 'mm': 171 conv = Converter('mm') 172 pixel = conv(pixel, units=detector.pixel_size_unit) 173 detector.pixel_size.x = pixel 174 detector.pixel_size.y = pixel 175 176 # Store beam center in distance units 177 detector.beam_center.x = center_x * pixel 178 detector.beam_center.y = center_y * pixel 179 180 181 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 182 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 183 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 184 185 self.current_dataset.x_bins = x_vals 186 self.current_dataset.y_bins = y_vals 187 188 # Reshape data 189 x_vals = np.tile(x_vals, (size_y, 1)).flatten() 190 y_vals = np.tile(y_vals, (size_x, 1)).T.flatten() 191 if self.current_dataset.err_data == np.all(np.array(None)) or np.any(self.current_dataset.err_data <= 0): 192 new_err_data = np.sqrt(np.abs(self.current_dataset.data)) 193 else: 194 new_err_data = self.current_dataset.err_data.flatten() 195 196 self.current_dataset.err_data = new_err_data 197 self.current_dataset.qx_data = x_vals 198 self.current_dataset.qy_data = y_vals 199 self.current_dataset.q_data = np.sqrt(x_vals**2 + y_vals**2) 200 self.current_dataset.mask = np.ones(len(x_vals), dtype=bool) 201 202 # Store loading process information 203 self.current_datainfo.meta_data['loader'] = self.type_name 204 205 self.send_to_output() 206 207 if not loaded_correctly: 208 raise DataReaderException(error_message) 122 123 # Read the data 124 data = [] 125 error = [] 126 if fversion == 1.0: 127 data_str = datafile.readline() 128 data = data_str.split(' ') 129 else: 130 read_on = True 131 while read_on: 132 data_str = datafile.readline() 133 if len(data_str) == 0: 134 read_on = False 135 else: 136 toks = data_str.split() 137 try: 138 val = float(toks[0]) 139 err = float(toks[1]) 140 if data_conv_i is not None: 141 val = data_conv_i(val, units=output._yunit) 142 err = data_conv_i(err, units=output._yunit) 143 data.append(val) 144 error.append(err) 145 except: 146 logger.info("Skipping line:%s,%s" %(data_str, 147 sys.exc_value)) 148 149 # Initialize 150 x_vals = [] 151 y_vals = [] 152 ymin = None 153 ymax = None 154 xmin = None 155 xmax = None 156 157 # Qx and Qy vectors 158 theta = pixel / distance / 100.0 159 stepq = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 160 for i_x in range(size_x): 161 theta = (i_x - center_x + 1) * pixel / distance / 100.0 162 qx = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 163 164 if has_converter == True and output.Q_unit != '1/A': 165 qx = data_conv_q(qx, units=output.Q_unit) 166 167 x_vals.append(qx) 168 if xmin is None or qx < xmin: 169 xmin = qx 170 if xmax is None or qx > xmax: 171 xmax = qx 172 173 ymin = None 174 ymax = None 175 for i_y in range(size_y): 176 theta = (i_y - center_y + 1) * pixel / distance / 100.0 177 qy = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 178 179 if has_converter == True and output.Q_unit != '1/A': 180 qy = data_conv_q(qy, units=output.Q_unit) 181 182 y_vals.append(qy) 183 if ymin is None or qy < ymin: 184 ymin = qy 185 if ymax is None or qy > ymax: 186 ymax = qy 187 188 # Store the data in the 2D array 189 i_x = 0 190 i_y = -1 191 192 for i_pt in range(len(data)): 193 try: 194 value = float(data[i_pt]) 195 except: 196 # For version 1.0, the data were still 197 # stored as strings at this point. 198 msg = "Skipping entry (v1.0):%s,%s" % (str(data[i_pt]), 199 sys.exc_value) 200 logger.info(msg) 201 202 # Get bin number 203 if math.fmod(i_pt, size_x) == 0: 204 i_x = 0 205 i_y += 1 206 else: 207 i_x += 1 208 209 output.data[i_y][i_x] = value 210 if fversion>1.0: 211 output.err_data[i_y][i_x] = error[i_pt] 212 213 # Store all data 214 # Store wavelength 215 if has_converter == True and output.source.wavelength_unit != 'A': 216 conv = Converter('A') 217 wavelength = conv(wavelength, 218 units=output.source.wavelength_unit) 219 output.source.wavelength = wavelength 220 221 # Store distance 222 if has_converter == True and detector.distance_unit != 'm': 223 conv = Converter('m') 224 distance = conv(distance, units=detector.distance_unit) 225 detector.distance = distance 226 227 # Store pixel size 228 if has_converter == True and detector.pixel_size_unit != 'mm': 229 conv = Converter('mm') 230 pixel = conv(pixel, units=detector.pixel_size_unit) 231 detector.pixel_size.x = pixel 232 detector.pixel_size.y = pixel 233 234 # Store beam center in distance units 235 detector.beam_center.x = center_x * pixel 236 detector.beam_center.y = center_y * pixel 237 238 # Store limits of the image (2D array) 239 xmin = xmin - stepq / 2.0 240 xmax = xmax + stepq / 2.0 241 ymin = ymin - stepq /2.0 242 ymax = ymax + stepq / 2.0 243 244 if has_converter == True and output.Q_unit != '1/A': 245 xmin = data_conv_q(xmin, units=output.Q_unit) 246 xmax = data_conv_q(xmax, units=output.Q_unit) 247 ymin = data_conv_q(ymin, units=output.Q_unit) 248 ymax = data_conv_q(ymax, units=output.Q_unit) 249 output.xmin = xmin 250 output.xmax = xmax 251 output.ymin = ymin 252 output.ymax = ymax 253 254 # Store x and y axis bin centers 255 output.x_bins = x_vals 256 output.y_bins = y_vals 257 258 # Units 259 if data_conv_q is not None: 260 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 261 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 262 else: 263 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 264 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 265 266 if data_conv_i is not None: 267 output.zaxis("\\rm{Intensity}", output.I_unit) 268 else: 269 output.zaxis("\\rm{Intensity}", "cm^{-1}") 270 271 if not fversion >= 1.0: 272 msg = "Danse_reader can't read this file %s" % filename 273 raise ValueError, msg 274 else: 275 logger.info("Danse_reader Reading %s \n" % filename) 276 277 # Store loading process information 278 output.meta_data['loader'] = self.type_name 279 output = reader2D_converter(output) 280 return output 281 282 return None -
src/sas/sascalc/dataloader/readers/red2d_reader.py
r2f85af7 ra1b8fee 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #See the license text in license.txt 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 from __future__ import print_function 12 11 13 import os 12 14 import numpy as np 13 15 import math 14 from sas.sascalc.dataloader.data_info import plottable_2D, DataInfo, Detector 15 from sas.sascalc.dataloader.file_reader_base_class import FileReader 16 from sas.sascalc.dataloader.loader_exceptions import FileContentsException 16 from sas.sascalc.dataloader.data_info import Data2D, Detector 17 17 18 18 # Look for unit converter … … 22 22 except: 23 23 has_converter = False 24 25 24 25 26 26 def check_point(x_point): 27 27 """ … … 33 33 except: 34 34 return 0 35 36 37 class Reader (FileReader):35 36 37 class Reader: 38 38 """ Simple data reader for Igor data files """ 39 39 ## File type … … 43 43 ## Extension 44 44 ext = ['.DAT', '.dat'] 45 45 46 46 def write(self, filename, data): 47 47 """ 48 48 Write to .dat 49 49 50 50 :param filename: file name to write 51 51 :param data: data2D … … 53 53 import time 54 54 # Write the file 55 try: 56 fd = open(filename, 'w') 57 t = time.localtime() 58 time_str = time.strftime("%H:%M on %b %d %y", t) 59 60 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 61 header_str += " created at %s \n\n" % time_str 62 # simple 2D header 63 fd.write(header_str) 64 # write qx qy I values 65 for i in range(len(data.data)): 66 fd.write("%g %g %g\n" % (data.qx_data[i], 67 data.qy_data[i], 68 data.data[i])) 69 finally: 70 fd.close() 71 72 def get_file_contents(self): 55 fd = open(filename, 'w') 56 t = time.localtime() 57 time_str = time.strftime("%H:%M on %b %d %y", t) 58 59 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 60 header_str += " created at %s \n\n" % time_str 61 # simple 2D header 62 fd.write(header_str) 63 # write qx qy I values 64 for i in range(len(data.data)): 65 fd.write("%g %g %g\n" % (data.qx_data[i], 66 data.qy_data[i], 67 data.data[i])) 68 # close 69 fd.close() 70 71 def read(self, filename=None): 72 """ Read file """ 73 if not os.path.isfile(filename): 74 raise ValueError, \ 75 "Specified file %s is not a regular file" % filename 76 73 77 # Read file 74 buf = self.f_open.read() 75 self.f_open.close() 78 f = open(filename, 'r') 79 buf = f.read() 80 f.close() 76 81 # Instantiate data object 77 self.current_dataset = plottable_2D() 78 self.current_datainfo = DataInfo() 79 self.current_datainfo.filename = os.path.basename(self.f_open.name) 80 self.current_datainfo.detector.append(Detector()) 81 82 output = Data2D() 83 output.filename = os.path.basename(filename) 84 detector = Detector() 85 if len(output.detector) > 0: 86 print(str(output.detector[0])) 87 output.detector.append(detector) 88 82 89 # Get content 83 data _started = False84 90 dataStarted = False 91 85 92 ## Defaults 86 93 lines = buf.split('\n') 87 94 x = [] 88 95 y = [] 89 96 90 97 wavelength = None 91 98 distance = None 92 99 transmission = None 93 100 94 101 pixel_x = None 95 102 pixel_y = None 96 97 is_info = False 98 is_center = False 99 103 104 isInfo = False 105 isCenter = False 106 107 data_conv_q = None 108 data_conv_i = None 109 110 # Set units: This is the unit assumed for Q and I in the data file. 111 if has_converter == True and output.Q_unit != '1/A': 112 data_conv_q = Converter('1/A') 113 # Test it 114 data_conv_q(1.0, output.Q_unit) 115 116 if has_converter == True and output.I_unit != '1/cm': 117 data_conv_i = Converter('1/cm') 118 # Test it 119 data_conv_i(1.0, output.I_unit) 120 121 100 122 # Remove the last lines before the for loop if the lines are empty 101 123 # to calculate the exact number of data points … … 113 135 ## Reading the header applies only to IGOR/NIST 2D q_map data files 114 136 # Find setup info line 115 if is _info:116 is _info = False137 if isInfo: 138 isInfo = False 117 139 line_toks = line.split() 118 140 # Wavelength in Angstrom … … 121 143 # Units 122 144 if has_converter == True and \ 123 self.current_datainfo.source.wavelength_unit != 'A':145 output.source.wavelength_unit != 'A': 124 146 conv = Converter('A') 125 147 wavelength = conv(wavelength, 126 units= self.current_datainfo.source.wavelength_unit)148 units=output.source.wavelength_unit) 127 149 except: 128 150 #Not required … … 132 154 distance = float(line_toks[3]) 133 155 # Units 134 if has_converter == True and self.current_datainfo.detector[0].distance_unit != 'm':156 if has_converter == True and detector.distance_unit != 'm': 135 157 conv = Converter('m') 136 distance = conv(distance, 137 units=self.current_datainfo.detector[0].distance_unit) 158 distance = conv(distance, units=detector.distance_unit) 138 159 except: 139 160 #Not required 140 161 pass 141 162 142 163 # Distance in meters 143 164 try: … … 146 167 #Not required 147 168 pass 148 169 149 170 if line.count("LAMBDA") > 0: 150 is _info = True151 171 isInfo = True 172 152 173 # Find center info line 153 if is _center:154 is _center = False174 if isCenter: 175 isCenter = False 155 176 line_toks = line.split() 156 177 # Center in bin number … … 159 180 160 181 if line.count("BCENT") > 0: 161 is _center = True182 isCenter = True 162 183 # Check version 163 184 if line.count("Data columns") > 0: … … 166 187 # Find data start 167 188 if line.count("ASCII data") > 0: 168 data _started = True189 dataStarted = True 169 190 continue 170 191 171 192 ## Read and get data. 172 if data _started == True:193 if dataStarted == True: 173 194 line_toks = line.split() 174 195 if len(line_toks) == 0: 175 196 #empty line 176 197 continue 177 # the number of columns must be stayed same 198 # the number of columns must be stayed same 178 199 col_num = len(line_toks) 179 200 break … … 183 204 # index for lines_array 184 205 lines_index = np.arange(len(lines)) 185 206 186 207 # get the data lines 187 208 data_lines = lines_array[lines_index >= (line_num - 1)] … … 192 213 # split all data to one big list w/" "separator 193 214 data_list = data_list.split() 194 215 195 216 # Check if the size is consistent with data, otherwise 196 217 #try the tab(\t) separator … … 212 233 data_point = data_array.reshape(row_num, col_num).transpose() 213 234 except: 214 msg = "red2d_reader can't read this file: Incorrect number of data points provided."215 raise FileContentsException(msg)235 msg = "red2d_reader: Can't read this file: Not a proper file format" 236 raise ValueError, msg 216 237 ## Get the all data: Let's HARDcoding; Todo find better way 217 238 # Defaults … … 236 257 #if col_num > (6 + ver): mask[data_point[(6 + ver)] < 1] = False 237 258 q_data = np.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 238 239 # Extra protection(it is needed for some data files): 259 260 # Extra protection(it is needed for some data files): 240 261 # If all mask elements are False, put all True 241 262 if not mask.any(): 242 263 mask[mask == False] = True 243 264 244 265 # Store limits of the image in q space 245 266 xmin = np.min(qx_data) … … 248 269 ymax = np.max(qy_data) 249 270 271 # units 272 if has_converter == True and output.Q_unit != '1/A': 273 xmin = data_conv_q(xmin, units=output.Q_unit) 274 xmax = data_conv_q(xmax, units=output.Q_unit) 275 ymin = data_conv_q(ymin, units=output.Q_unit) 276 ymax = data_conv_q(ymax, units=output.Q_unit) 277 250 278 ## calculate the range of the qx and qy_data 251 279 x_size = math.fabs(xmax - xmin) 252 280 y_size = math.fabs(ymax - ymin) 253 281 254 282 # calculate the number of pixels in the each axes 255 283 npix_y = math.floor(math.sqrt(len(data))) 256 284 npix_x = math.floor(len(data) / npix_y) 257 285 258 286 # calculate the size of bins 259 287 xstep = x_size / (npix_x - 1) 260 288 ystep = y_size / (npix_y - 1) 261 289 262 290 # store x and y axis bin centers in q space 263 291 x_bins = np.arange(xmin, xmax + xstep, xstep) 264 292 y_bins = np.arange(ymin, ymax + ystep, ystep) 265 293 266 294 # get the limits of q values 267 295 xmin = xmin - xstep / 2 … … 269 297 ymin = ymin - ystep / 2 270 298 ymax = ymax + ystep / 2 271 299 272 300 #Store data in outputs 273 301 #TODO: Check the lengths 274 self.current_dataset.data = data302 output.data = data 275 303 if (err_data == 1).all(): 276 self.current_dataset.err_data = np.sqrt(np.abs(data))277 self.current_dataset.err_data[self.current_dataset.err_data == 0.0] = 1.0304 output.err_data = np.sqrt(np.abs(data)) 305 output.err_data[output.err_data == 0.0] = 1.0 278 306 else: 279 self.current_dataset.err_data = err_data280 281 self.current_dataset.qx_data = qx_data282 self.current_dataset.qy_data = qy_data283 self.current_dataset.q_data = q_data284 self.current_dataset.mask = mask285 286 self.current_dataset.x_bins = x_bins287 self.current_dataset.y_bins = y_bins288 289 self.current_dataset.xmin = xmin290 self.current_dataset.xmax = xmax291 self.current_dataset.ymin = ymin292 self.current_dataset.ymax = ymax293 294 self.current_datainfo.source.wavelength = wavelength295 307 output.err_data = err_data 308 309 output.qx_data = qx_data 310 output.qy_data = qy_data 311 output.q_data = q_data 312 output.mask = mask 313 314 output.x_bins = x_bins 315 output.y_bins = y_bins 316 317 output.xmin = xmin 318 output.xmax = xmax 319 output.ymin = ymin 320 output.ymax = ymax 321 322 output.source.wavelength = wavelength 323 296 324 # Store pixel size in mm 297 self.current_datainfo.detector[0].pixel_size.x = pixel_x298 self.current_datainfo.detector[0].pixel_size.y = pixel_y299 325 detector.pixel_size.x = pixel_x 326 detector.pixel_size.y = pixel_y 327 300 328 # Store the sample to detector distance 301 self.current_datainfo.detector[0].distance = distance302 329 detector.distance = distance 330 303 331 # optional data: if all of dq data == 0, do not pass to output 304 332 if len(dqx_data) == len(qx_data) and dqx_data.any() != 0: … … 312 340 cos_th = qx_data / diag 313 341 sin_th = qy_data / diag 314 self.current_dataset.dqx_data = np.sqrt((dqx_data * cos_th) * \342 output.dqx_data = np.sqrt((dqx_data * cos_th) * \ 315 343 (dqx_data * cos_th) \ 316 344 + (dqy_data * sin_th) * \ 317 345 (dqy_data * sin_th)) 318 self.current_dataset.dqy_data = np.sqrt((dqx_data * sin_th) * \346 output.dqy_data = np.sqrt((dqx_data * sin_th) * \ 319 347 (dqx_data * sin_th) \ 320 348 + (dqy_data * cos_th) * \ 321 349 (dqy_data * cos_th)) 322 350 else: 323 self.current_dataset.dqx_data = dqx_data324 self.current_dataset.dqy_data = dqy_data351 output.dqx_data = dqx_data 352 output.dqy_data = dqy_data 325 353 326 354 # Units of axes 327 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 328 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 329 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 330 355 if data_conv_q is not None: 356 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 357 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 358 else: 359 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 360 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 361 if data_conv_i is not None: 362 output.zaxis("\\rm{Intensity}", output.I_unit) 363 else: 364 output.zaxis("\\rm{Intensity}", "cm^{-1}") 365 331 366 # Store loading process information 332 self.current_datainfo.meta_data['loader'] = self.type_name333 334 self.send_to_output()367 output.meta_data['loader'] = self.type_name 368 369 return output -
src/sas/sascalc/dataloader/readers/sesans_reader.py
rbe43448 r149b8f6 8 8 import numpy as np 9 9 import os 10 from sas.sascalc.dataloader.file_reader_base_class import FileReader 11 from sas.sascalc.dataloader.data_info import plottable_1D, DataInfo 12 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 10 from sas.sascalc.dataloader.data_info import Data1D 13 11 14 12 # Check whether we have a converter available … … 20 18 _ZERO = 1e-16 21 19 22 class Reader(FileReader): 20 21 class Reader: 23 22 """ 24 23 Class to load sesans files (6 columns). … … 27 26 type_name = "SESANS" 28 27 29 # #Wildcards28 # Wildcards 30 29 type = ["SESANS files (*.ses)|*.ses", 31 30 "SESANS files (*..sesans)|*.sesans"] … … 36 35 allow_all = True 37 36 38 def get_file_contents(self): 39 self.current_datainfo = DataInfo() 40 self.current_dataset = plottable_1D(np.array([]), np.array([])) 41 self.current_datainfo.isSesans = True 42 self.output = [] 37 def read(self, path): 38 """ 39 Load data file 43 40 44 line = self.f_open.readline() 45 params = {} 46 while not line.startswith("BEGIN_DATA"): 47 terms = line.split() 48 if len(terms) >= 2: 49 params[terms[0]] = " ".join(terms[1:]) 50 line = self.f_open.readline() 51 self.params = params 41 :param path: file path 52 42 53 if "FileFormatVersion" not in self.params: 54 raise FileContentsException("SES file missing FileFormatVersion") 55 if float(self.params["FileFormatVersion"]) >= 2.0: 56 raise FileContentsException("SASView only supports SES version 1") 43 :return: SESANSData1D object, or None 57 44 58 if "SpinEchoLength_unit" not in self.params: 59 raise FileContentsException("SpinEchoLength has no units") 60 if "Wavelength_unit" not in self.params: 61 raise FileContentsException("Wavelength has no units") 62 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 63 raise FileContentsException("The spin echo data has rudely used " 64 "different units for the spin echo length " 65 "and the wavelength. While sasview could " 66 "handle this instance, it is a violation " 67 "of the file format and will not be " 68 "handled by other software.") 45 :raise RuntimeError: when the file can't be opened 46 :raise ValueError: when the length of the data vectors are inconsistent 47 """ 48 if os.path.isfile(path): 49 basename = os.path.basename(path) 50 _, extension = os.path.splitext(basename) 51 if not (self.allow_all or extension.lower() in self.ext): 52 raise RuntimeError( 53 "{} has an unrecognized file extension".format(path)) 54 else: 55 raise RuntimeError("{} is not a file".format(path)) 56 with open(path, 'r') as input_f: 57 line = input_f.readline() 58 params = {} 59 while not line.startswith("BEGIN_DATA"): 60 terms = line.split() 61 if len(terms) >= 2: 62 params[terms[0]] = " ".join(terms[1:]) 63 line = input_f.readline() 64 self.params = params 69 65 70 headers = self.f_open.readline().split() 66 if "FileFormatVersion" not in self.params: 67 raise RuntimeError("SES file missing FileFormatVersion") 68 if float(self.params["FileFormatVersion"]) >= 2.0: 69 raise RuntimeError("SASView only supports SES version 1") 71 70 72 self._insist_header(headers, "SpinEchoLength") 73 self._insist_header(headers, "Depolarisation") 74 self._insist_header(headers, "Depolarisation_error") 75 self._insist_header(headers, "Wavelength") 71 if "SpinEchoLength_unit" not in self.params: 72 raise RuntimeError("SpinEchoLength has no units") 73 if "Wavelength_unit" not in self.params: 74 raise RuntimeError("Wavelength has no units") 75 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 76 raise RuntimeError("The spin echo data has rudely used " 77 "different units for the spin echo length " 78 "and the wavelength. While sasview could " 79 "handle this instance, it is a violation " 80 "of the file format and will not be " 81 "handled by other software.") 76 82 77 data = np.loadtxt(self.f_open)83 headers = input_f.readline().split() 78 84 79 if data.shape[1] != len(headers): 80 raise FileContentsException( 81 "File has {} headers, but {} columns".format( 82 len(headers), 83 data.shape[1])) 85 self._insist_header(headers, "SpinEchoLength") 86 self._insist_header(headers, "Depolarisation") 87 self._insist_header(headers, "Depolarisation_error") 88 self._insist_header(headers, "Wavelength") 84 89 85 if not data.size: 86 raise FileContentsException("{} is empty".format(path)) 87 x = data[:, headers.index("SpinEchoLength")] 88 if "SpinEchoLength_error" in headers: 89 dx = data[:, headers.index("SpinEchoLength_error")] 90 else: 91 dx = x * 0.05 92 lam = data[:, headers.index("Wavelength")] 93 if "Wavelength_error" in headers: 94 dlam = data[:, headers.index("Wavelength_error")] 95 else: 96 dlam = lam * 0.05 97 y = data[:, headers.index("Depolarisation")] 98 dy = data[:, headers.index("Depolarisation_error")] 90 data = np.loadtxt(input_f) 99 91 100 lam_unit = self._unit_fetch("Wavelength") 101 x, x_unit = self._unit_conversion(x, "A", 102 self._unit_fetch( 103 "SpinEchoLength")) 104 dx, dx_unit = self._unit_conversion( 105 dx, lam_unit, 106 self._unit_fetch("SpinEchoLength")) 107 dlam, dlam_unit = self._unit_conversion( 108 dlam, lam_unit, 109 self._unit_fetch("Wavelength")) 110 y_unit = self._unit_fetch("Depolarisation") 92 if data.shape[1] != len(headers): 93 raise RuntimeError( 94 "File has {} headers, but {} columns".format( 95 len(headers), 96 data.shape[1])) 111 97 112 self.current_dataset.x = x 113 self.current_dataset.y = y 114 self.current_dataset.lam = lam 115 self.current_dataset.dy = dy 116 self.current_dataset.dx = dx 117 self.current_dataset.dlam = dlam 118 self.current_datainfo.isSesans = True 98 if not data.size: 99 raise RuntimeError("{} is empty".format(path)) 100 x = data[:, headers.index("SpinEchoLength")] 101 if "SpinEchoLength_error" in headers: 102 dx = data[:, headers.index("SpinEchoLength_error")] 103 else: 104 dx = x * 0.05 105 lam = data[:, headers.index("Wavelength")] 106 if "Wavelength_error" in headers: 107 dlam = data[:, headers.index("Wavelength_error")] 108 else: 109 dlam = lam * 0.05 110 y = data[:, headers.index("Depolarisation")] 111 dy = data[:, headers.index("Depolarisation_error")] 119 112 120 self.current_datainfo._yunit = y_unit 121 self.current_datainfo._xunit = x_unit 122 self.current_datainfo.source.wavelength_unit = lam_unit 123 self.current_datainfo.source.wavelength = lam 124 self.current_datainfo.filename = os.path.basename(self.f_open.name) 125 self.current_dataset.xaxis(r"\rm{z}", x_unit) 126 # Adjust label to ln P/(lam^2 t), remove lam column refs 127 self.current_dataset.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 128 # Store loading process information 129 self.current_datainfo.meta_data['loader'] = self.type_name 130 self.current_datainfo.sample.name = params["Sample"] 131 self.current_datainfo.sample.ID = params["DataFileTitle"] 132 self.current_datainfo.sample.thickness = self._unit_conversion( 133 float(params["Thickness"]), "cm", 134 self._unit_fetch("Thickness"))[0] 113 lam_unit = self._unit_fetch("Wavelength") 114 x, x_unit = self._unit_conversion(x, "A", 115 self._unit_fetch( 116 "SpinEchoLength")) 117 dx, dx_unit = self._unit_conversion( 118 dx, lam_unit, 119 self._unit_fetch("SpinEchoLength")) 120 dlam, dlam_unit = self._unit_conversion( 121 dlam, lam_unit, 122 self._unit_fetch("Wavelength")) 123 y_unit = self._unit_fetch("Depolarisation") 135 124 136 self.current_datainfo.sample.zacceptance = ( 137 float(params["Theta_zmax"]), 138 self._unit_fetch("Theta_zmax")) 125 output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, 126 isSesans=True) 139 127 140 self.current_datainfo.sample.yacceptance = ( 141 float(params["Theta_ymax"]), 142 self._unit_fetch("Theta_ymax")) 128 output.y_unit = y_unit 129 output.x_unit = x_unit 130 output.source.wavelength_unit = lam_unit 131 output.source.wavelength = lam 132 self.filename = output.filename = basename 133 output.xaxis(r"\rm{z}", x_unit) 134 # Adjust label to ln P/(lam^2 t), remove lam column refs 135 output.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 136 # Store loading process information 137 output.meta_data['loader'] = self.type_name 138 output.sample.name = params["Sample"] 139 output.sample.ID = params["DataFileTitle"] 140 output.sample.thickness = self._unit_conversion( 141 float(params["Thickness"]), "cm", 142 self._unit_fetch("Thickness"))[0] 143 143 144 self.send_to_output() 144 output.sample.zacceptance = ( 145 float(params["Theta_zmax"]), 146 self._unit_fetch("Theta_zmax")) 147 148 output.sample.yacceptance = ( 149 float(params["Theta_ymax"]), 150 self._unit_fetch("Theta_ymax")) 151 return output 145 152 146 153 @staticmethod 147 154 def _insist_header(headers, name): 148 155 if name not in headers: 149 raise FileContentsException(156 raise RuntimeError( 150 157 "Missing {} column in spin echo data".format(name)) 151 158 -
src/sas/sascalc/dataloader/readers/xml_reader.py
rfafe52a r235f514 18 18 from lxml import etree 19 19 from lxml.builder import E 20 from sas.sascalc.dataloader.file_reader_base_class import FileReader21 20 22 21 logger = logging.getLogger(__name__) … … 24 23 PARSER = etree.ETCompatXMLParser(remove_comments=True, remove_pis=False) 25 24 26 class XMLreader( FileReader):25 class XMLreader(): 27 26 """ 28 27 Generic XML read and write class. Mostly helper functions. … … 75 74 except etree.XMLSyntaxError as xml_error: 76 75 logger.info(xml_error) 77 raise xml_error78 76 except Exception: 79 77 self.xml = None … … 93 91 except etree.XMLSyntaxError as xml_error: 94 92 logger.info(xml_error) 95 raise xml_error 96 except Exception as exc: 93 except Exception: 97 94 self.xml = None 98 95 self.xmldoc = None 99 96 self.xmlroot = None 100 raise exc101 97 102 98 def set_schema(self, schema): … … 210 206 Create a unique key value for any dictionary to prevent overwriting 211 207 Recurses until a unique key value is found. 212 208 213 209 :param dictionary: A dictionary with any number of entries 214 210 :param name: The index of the item to be added to dictionary … … 226 222 Create an element tree for processing from an etree element 227 223 228 :param root: etree Element(s) 224 :param root: etree Element(s) 229 225 """ 230 226 return etree.ElementTree(root) -
src/sas/sasgui/guiframe/local_perspectives/data_loader/data_loader.py
rdcb91cf r235f514 11 11 12 12 from sas.sascalc.dataloader.loader import Loader 13 from sas.sascalc.dataloader.loader_exceptions import NoKnownLoaderException14 13 from sas.sasgui.guiframe.plugin_base import PluginBase 15 14 from sas.sasgui.guiframe.events import StatusEvent … … 42 41 APPLICATION_WLIST = config.APPLICATION_WLIST 43 42 44 45 43 class Plugin(PluginBase): 46 44 … … 58 56 """ 59 57 # menu for data files 58 menu_list = [] 60 59 data_file_hint = "load one or more data in the application" 61 60 menu_list = [('&Load Data File(s)', data_file_hint, self.load_data)] 62 61 gui_style = self.parent.get_style() 63 62 style = gui_style & GUIFRAME.MULTIPLE_APPLICATIONS 63 style1 = gui_style & GUIFRAME.DATALOADER_ON 64 64 if style == GUIFRAME.MULTIPLE_APPLICATIONS: 65 65 # menu for data from folder … … 102 102 self.get_data(file_list) 103 103 104 104 105 def can_load_data(self): 105 106 """ … … 107 108 """ 108 109 return True 110 109 111 110 112 def _load_folder(self, event): … … 138 140 """ 139 141 if error is not None or str(error).strip() != "": 140 dial = wx.MessageDialog(self.parent, str(error), 141 'Error Loading File', 142 dial = wx.MessageDialog(self.parent, str(error), 'Error Loading File', 142 143 wx.OK | wx.ICON_EXCLAMATION) 143 144 dial.ShowModal() … … 148 149 """ 149 150 if os.path.isdir(path): 150 return [os.path.join(os.path.abspath(path), filename) for filename 151 in os.listdir(path)] 151 return [os.path.join(os.path.abspath(path), filename) for filename in os.listdir(path)] 152 152 153 153 def _process_data_and_errors(self, item, p_file, output, message): … … 178 178 for p_file in path: 179 179 basename = os.path.basename(p_file) 180 # Skip files that start with a period181 if basename.startswith("."):182 msg = "The folder included a potential hidden file - %s." \183 % basename184 msg += " Do you wish to load this file as data?"185 msg_box = wx.MessageDialog(None, msg, 'Warning',186 wx.OK | wx.CANCEL)187 if msg_box.ShowModal() == wx.ID_CANCEL:188 continue189 180 _, extension = os.path.splitext(basename) 190 181 if extension.lower() in EXTENSIONS: … … 222 213 info="info") 223 214 224 except NoKnownLoaderException as e: 225 exception_occurred = True 226 logger.error(e.message) 227 228 error_message = "Loading data failed!\n" + e.message 229 self.load_update(output=None, message=e.message, info="warning") 230 231 except Exception as e: 232 exception_occurred = True 233 logger.error(e.message) 234 235 file_err = "The Data file you selected could not be " 236 file_err += "loaded.\nMake sure the content of your file" 237 file_err += " is properly formatted.\n" 238 file_err += "When contacting the SasView team, mention the" 239 file_err += " following:\n" 240 file_err += e.message 241 file_errors[basename] = [file_err] 215 except: 216 logger.error(sys.exc_value) 217 218 error_message = "The Data file you selected could not be loaded.\n" 219 error_message += "Make sure the content of your file" 220 error_message += " is properly formatted.\n" 221 error_message += "When contacting the SasView team, mention the" 222 error_message += " following:\n" 223 error_message += "Error: " + str(sys.exc_info()[1]) 224 file_errors[basename] = [error_message] 225 self.load_update(output=output, message=error_message, info="warning") 242 226 243 227 if len(file_errors) > 0: … … 249 233 error_message += message + "\n" 250 234 error_message += "\n" 251 if not exception_occurred: # Some data loaded but with errors 252 self.load_update(output=output, message=error_message, info="error") 253 254 if not exception_occurred: # Everything loaded as expected 255 self.load_complete(output=output, message="Loading data complete!", 256 info="info") 257 else: 258 self.load_complete(output=None, message=error_message, info="error") 259 235 self.load_update(output=output, message=error_message, info="error") 236 237 self.load_complete(output=output, message="Loading data complete!", 238 info="info") 260 239 261 240 def load_update(self, output=None, message="", info="warning"): … … 266 245 wx.PostEvent(self.parent, StatusEvent(status=message, info=info, 267 246 type="progress")) 268 269 def load_complete(self, output, message="", info="warning"): 270 """ 271 post message to status bar and return list of data 272 """ 273 wx.PostEvent(self.parent, StatusEvent(status=message, info=info, 247 def load_complete(self, output, message="", error_message="", path=None, 248 info="warning"): 249 """ 250 post message to status bar and return list of data 251 """ 252 wx.PostEvent(self.parent, StatusEvent(status=message, 253 info=info, 274 254 type="stop")) 275 if output is not None: 276 self.parent.add_data(data_list=output) 255 # if error_message != "": 256 # self.load_error(error_message) 257 self.parent.add_data(data_list=output) -
src/sas/sasgui/perspectives/file_converter/converter_panel.py
r19296dc red9f872 24 24 from sas.sascalc.file_converter.otoko_loader import OTOKOLoader 25 25 from sas.sascalc.file_converter.bsl_loader import BSLLoader 26 from sas.sascalc.file_converter.ascii2d_loader import ASCII2DLoader27 26 from sas.sascalc.file_converter.nxcansas_writer import NXcanSASWriter 28 27 from sas.sascalc.dataloader.data_info import Detector … … 36 35 _STATICBOX_WIDTH = 410 37 36 _BOX_WIDTH = 200 38 PANEL_SIZE = 52037 PANEL_SIZE = 480 39 38 FONT_VARIANT = 0 40 39 else: … … 42 41 _STATICBOX_WIDTH = 430 43 42 _BOX_WIDTH = 200 44 PANEL_SIZE = 5 4043 PANEL_SIZE = 500 45 44 FONT_VARIANT = 1 46 45 … … 353 352 w.write(frame_data, output_path) 354 353 355 def convert_2d_data(self, dataset):356 metadata = self.get_metadata()357 for key, value in metadata.iteritems():358 setattr(dataset[0], key, value)359 360 w = NXcanSASWriter()361 w.write(dataset, self.output.GetPath())362 363 354 def on_convert(self, event): 364 355 """Called when the Convert button is clicked""" … … 376 367 qdata, iqdata = self.extract_otoko_data(self.q_input.GetPath()) 377 368 self.convert_1d_data(qdata, iqdata) 378 elif self.data_type == 'ascii2d':379 loader = ASCII2DLoader(self.iq_input.GetPath())380 data = loader.load()381 dataset = [data] # ASCII 2D only ever contains 1 frame382 self.convert_2d_data(dataset)383 369 else: # self.data_type == 'bsl' 384 370 dataset = self.extract_bsl_data(self.iq_input.GetPath()) … … 386 372 # Cancelled by user 387 373 return 388 self.convert_2d_data(dataset) 389 374 375 metadata = self.get_metadata() 376 for key, value in metadata.iteritems(): 377 setattr(dataset[0], key, value) 378 379 w = NXcanSASWriter() 380 w.write(dataset, self.output.GetPath()) 390 381 except Exception as ex: 391 382 msg = str(ex) … … 408 399 def validate_inputs(self): 409 400 msg = "You must select a" 410 if self.q_input.GetPath() == '' and self.data_type != 'bsl' \ 411 and self.data_type != 'ascii2d': 401 if self.q_input.GetPath() == '' and self.data_type != 'bsl': 412 402 msg += " Q Axis input file." 413 403 elif self.iq_input.GetPath() == '': … … 482 472 dtype = event.GetEventObject().GetName() 483 473 self.data_type = dtype 484 if dtype == 'bsl' or dtype == 'ascii2d':474 if dtype == 'bsl': 485 475 self.q_input.SetPath("") 486 476 self.q_input.Disable() … … 510 500 511 501 instructions = ( 512 "If converting a 1D dataset, select linked single-column ASCII files " 513 "containing the Q-axis and intensity-axis data, or a 1D BSL/OTOKO file." 514 " If converting 2D data, select an ASCII file in the ISIS 2D file " 515 "format, or a 2D BSL/OTOKO file. Choose where to save the converted " 516 "file and click convert.\n" 517 "One dimensional ASCII and BSL/OTOKO files can be converted to CanSAS " 518 "(XML) or NXcanSAS (HDF5) formats. Two dimensional datasets can only be" 519 " converted to the NXcanSAS format.\n" 520 "Metadata can also be optionally added to the output file." 502 "Select linked single column 1D ASCII files containing the Q-axis and " 503 "Intensity-axis data, or 1D BSL/OTOKO files, or a 2D BSL/OTOKO file, " 504 "then choose where to save the converted file, and click Convert.\n" 505 "1D ASCII and BSL/OTOKO files can be converted to CanSAS (XML) or " 506 "NXcanSAS (HDF5) formats. 2D BSL/OTOKO files can only be converted to " 507 "the NXcanSAS format.\n" 508 "Metadata can be optionally added for the CanSAS XML format." 521 509 ) 522 510 … … 538 526 wx.ALIGN_CENTER_VERTICAL, 5) 539 527 radio_sizer = wx.BoxSizer(wx.HORIZONTAL) 540 ascii_btn = wx.RadioButton(self, -1, "ASCII 1D", name="ascii",528 ascii_btn = wx.RadioButton(self, -1, "ASCII", name="ascii", 541 529 style=wx.RB_GROUP) 542 530 ascii_btn.Bind(wx.EVT_RADIOBUTTON, self.datatype_changed) 543 531 radio_sizer.Add(ascii_btn) 544 ascii2d_btn = wx.RadioButton(self, -1, "ASCII 2D", name="ascii2d")545 ascii2d_btn.Bind(wx.EVT_RADIOBUTTON, self.datatype_changed)546 radio_sizer.Add(ascii2d_btn)547 532 otoko_btn = wx.RadioButton(self, -1, "BSL 1D", name="otoko") 548 533 otoko_btn.Bind(wx.EVT_RADIOBUTTON, self.datatype_changed) 549 534 radio_sizer.Add(otoko_btn) 535 input_grid.Add(radio_sizer, (y,1), (1,1), wx.ALL, 5) 550 536 bsl_btn = wx.RadioButton(self, -1, "BSL 2D", name="bsl") 551 537 bsl_btn.Bind(wx.EVT_RADIOBUTTON, self.datatype_changed) 552 538 radio_sizer.Add(bsl_btn) 553 input_grid.Add(radio_sizer, (y,1), (1,1), wx.ALL, 5)554 539 y += 1 555 540 … … 564 549 y += 1 565 550 566 iq_label = wx.StaticText(self, -1, "Intensity Data: ")551 iq_label = wx.StaticText(self, -1, "Intensity-Axis Data: ") 567 552 input_grid.Add(iq_label, (y,0), (1,1), wx.ALIGN_CENTER_VERTICAL, 5) 568 553 … … 662 647 663 648 def __init__(self, parent=None, title='File Converter', base=None, 664 manager=None, size=(PANEL_SIZE * 0.96, PANEL_SIZE * 0.9),649 manager=None, size=(PANEL_SIZE * 1.05, PANEL_SIZE / 1.1), 665 650 *args, **kwargs): 666 651 kwargs['title'] = title -
src/sas/sasgui/perspectives/file_converter/file_converter.py
r94e3572 r463e7ffc 25 25 Returns a set of menu entries 26 26 """ 27 help_txt = "Convert ASCII or BSL/OTOKO data to CanSAS or NXcanSAS formats"27 help_txt = "Convert single column ASCII data to CanSAS format" 28 28 return [("File Converter", help_txt, self.on_file_converter)] 29 29 -
src/sas/sasgui/perspectives/file_converter/media/file_converter_help.rst
r59decb81 rd73998c 18 18 * Single-column ASCII data, with lines that end without any delimiter, 19 19 or with a comma or semi-colon delimiter 20 * 2D `ISIS ASCII formatted21 <http://www.isis.stfc.ac.uk/instruments/loq/software/22 colette-ascii-file-format-descriptions9808.pdf>`_ data23 20 * `1D BSL/OTOKO format 24 21 <http://www.diamond.ac.uk/Beamlines/Soft-Condensed-Matter/small-angle/ … … 39 36 40 37 1) Select the files containing your Q-axis and Intensity-axis data 41 2) Choose whether the files are in ASCII 1D, ASCII 2D, 1D BSL/OTOKO or 2D BSL/OTOKO format38 2) Choose whether the files are in ASCII, 1D BSL/OTOKO or 2D BSL/OTOKO format 42 39 3) Choose where you would like to save the converted file 43 40 4) Optionally, input some metadata such as sample size, detector name, etc … … 50 47 file, a dialog will appear asking which frames you would like converted. You 51 48 may enter a start frame, end frame & increment, and all frames in that subset 52 will be converted. For example, entering 0, 50 and 10 will convert frames 0, 49 will be converted. For example, entering 0, 50 and 10 will convert frames 0, 53 50 10, 20, 30, 40 & 50. 54 51 … … 59 56 single file, so there is an option in the *Select Frame* dialog to output each 60 57 frame to its own file. The single file option will produce one file with 61 multiple `<SASdata>` elements. The multiple file option will output a separate 62 file with one `<SASdata>` element for each frame. The frame number will also be 58 multiple `<SASdata>` elements. The multiple file option will output a separate 59 file with one `<SASdata>` element for each frame. The frame number will also be 63 60 appended to the file name. 64 61 65 The multiple file option is not available when exporting to NXcanSAS because 62 The multiple file option is not available when exporting to NXcanSAS because 66 63 the HDF5 format is more efficient at handling large amounts of data. 67 64 -
test/fileconverter/test/cansas1d.xml
rdcb91cf rc476457 26 26 <Shadowfactor><!-- Shadowfactor is optional --></Shadowfactor> 27 27 </Idata> 28 <Idata>29 <Q unit="1/A">0.04</Q>30 <I unit="1/cm">1002</I>31 <Idev unit="1/cm">4</Idev>32 <Qdev unit="1/A">0.02</Qdev>33 <Qmean unit="1/A"><!-- Qmean is optional --></Qmean>34 <Shadowfactor><!-- Shadowfactor is optional --></Shadowfactor>35 </Idata>36 <Idata>37 <Q unit="1/A">0.05</Q>38 <I unit="1/cm">1003</I>39 <Idev unit="1/cm">4</Idev>40 <Qdev unit="1/A">0.02</Qdev>41 <Qmean unit="1/A"><!-- Qmean is optional --></Qmean>42 <Shadowfactor><!-- Shadowfactor is optional --></Shadowfactor>43 </Idata>44 <Idata>45 <Q unit="1/A">0.06</Q>46 <I unit="1/cm">1004</I>47 <Idev unit="1/cm">4</Idev>48 <Qdev unit="1/A">0.02</Qdev>49 <Qmean unit="1/A"><!-- Qmean is optional --></Qmean>50 <Shadowfactor><!-- Shadowfactor is optional --></Shadowfactor>51 </Idata>52 28 </SASdata> 53 29 <SASsample name='my sample'> … … 71 47 Some text here 72 48 </details> 73 49 74 50 </SASsample> 75 51 <SASinstrument> -
test/fileconverter/test/utest_nxcansas_writer.py
r248ff73 r17c9436 1 1 from sas.sascalc.file_converter.nxcansas_writer import NXcanSASWriter 2 from sas.sascalc.dataloader.loader import Loader 2 from sas.sascalc.dataloader.readers.cansas_reader import Reader as XMLReader 3 from sas.sascalc.dataloader.readers.red2d_reader import Reader as DATReader 4 from sas.sascalc.dataloader.readers.cansas_reader_HDF5 import Reader as HDF5Reader 3 5 4 6 import os … … 12 14 13 15 def setUp(self): 14 self.loader = Loader()15 16 self.writer = NXcanSASWriter() 16 17 self.read_file_1d = "cansas1d.xml" … … 18 19 self.read_file_2d = "exp18_14_igor_2dqxqy.dat" 19 20 self.write_file_2d = "export2d.h5" 21 self.hdf5_reader = HDF5Reader() 20 22 21 self.data_1d = self.loader.load(self.read_file_1d)[0] 23 xml_reader = XMLReader() 24 self.data_1d = xml_reader.read(self.read_file_1d)[0] 22 25 23 self.data_2d = self.loader.load(self.read_file_2d)[0] 26 dat_reader = DATReader() 27 self.data_2d = dat_reader.read(self.read_file_2d) 24 28 self.data_2d.detector[0].name = '' 25 29 self.data_2d.source.radiation = 'neutron' … … 27 31 def test_write_1d(self): 28 32 self.writer.write([self.data_1d], self.write_file_1d) 29 data = self. loader.load(self.write_file_1d)33 data = self.hdf5_reader.read(self.write_file_1d) 30 34 self.assertTrue(len(data) == 1) 31 35 data = data[0] … … 37 41 def test_write_2d(self): 38 42 self.writer.write([self.data_2d], self.write_file_2d) 39 data = self. loader.load(self.write_file_2d)43 data = self.hdf5_reader.read(self.write_file_2d) 40 44 self.assertTrue(len(data) == 1) 41 45 data = data[0] -
test/sascalculator/test/utest_slit_length_calculator.py
rb09095a r959eb01 5 5 import unittest 6 6 from sas.sascalc.dataloader.readers.ascii_reader import Reader 7 from sas.sascalc.calculator.slit_length_calculator import SlitlengthCalculator \ 8 as calculator 7 from sas.sascalc.calculator.slit_length_calculator import SlitlengthCalculator as calculator 9 8 9 import os.path 10 10 11 class SlitCalculator(unittest.TestCase):11 class slit_calculator(unittest.TestCase): 12 12 13 13 def setUp(self): … … 15 15 self.reader = Reader() 16 16 17 def test_slit _length_calculation(self):17 def test_slitlength_calculation(self): 18 18 """ 19 19 Test slit_length_calculator" 20 20 """ 21 list = self.reader.read("beam profile.DAT") 22 self.assertTrue(len(list) == 1) 23 f = list[0] 21 f = self.reader.read("beam profile.DAT") 24 22 cal = calculator() 25 23 cal.set_data(f.x,f.y) 26 slit _length = cal.calculate_slit_length()24 slitlength = cal.calculate_slit_length() 27 25 28 26 # The value "5.5858" was obtained by manual calculation. 29 27 # It turns out our slit length is FWHM/2 30 self.assertAlmostEqual(slit _length,5.5858/2, 3)28 self.assertAlmostEqual(slitlength,5.5858/2, 3) 31 29 32 30 -
test/sasdataloader/test/utest_abs_reader.py
rce8c7bd rc551bb3 5 5 6 6 import unittest 7 import math 7 8 import numpy as np 8 9 from sas.sascalc.dataloader.loader import Loader 10 from sas.sascalc.dataloader.readers.IgorReader import Reader as IgorReader 9 11 from sas.sascalc.dataloader.readers.abs_reader import Reader as AbsReader 12 from sas.sascalc.dataloader.readers.hfir1d_reader import Reader as HFIRReader 10 13 from sas.sascalc.dataloader.readers.danse_reader import Reader as DANSEReader 11 14 from sas.sascalc.dataloader.readers.cansas_reader import Reader as CANSASReader 12 15 13 16 from sas.sascalc.dataloader.data_info import Data1D 14 17 15 18 import os.path 16 19 17 20 18 21 class abs_reader(unittest.TestCase): 19 22 20 23 def setUp(self): 21 24 reader = AbsReader() … … 24 27 def test_abs_checkdata(self): 25 28 """ 26 Check the data content to see whether 29 Check the data content to see whether 27 30 it matches the specific file we loaded. 28 31 Check the units too to see whether the … … 32 35 self.assertEqual(self.data.filename, "jan08002.ABS") 33 36 self.assertEqual(self.data.meta_data['loader'], "IGOR 1D") 34 37 35 38 self.assertEqual(self.data.source.wavelength_unit, 'A') 36 39 self.assertEqual(self.data.source.wavelength, 6.0) 37 40 38 41 self.assertEqual(self.data.detector[0].distance_unit, 'mm') 39 42 self.assertEqual(self.data.detector[0].distance, 1000.0) 40 43 41 44 self.assertEqual(self.data.sample.transmission, 0.5667) 42 45 43 46 self.assertEqual(self.data.detector[0].beam_center_unit, 'mm') 44 center_x = 114.58*5.0 845 center_y = 64.22*5.0 847 center_x = 114.58*5.0 48 center_y = 64.22*5.0 46 49 self.assertEqual(self.data.detector[0].beam_center.x, center_x) 47 50 self.assertEqual(self.data.detector[0].beam_center.y, center_y) 48 51 49 52 self.assertEqual(self.data.y_unit, '1/cm') 50 53 self.assertEqual(self.data.x[0], 0.002618) … … 52 55 self.assertEqual(self.data.x[2], 0.01309) 53 56 self.assertEqual(self.data.x[126], 0.5828) 54 57 55 58 self.assertEqual(self.data.y[0], 0.02198) 56 59 self.assertEqual(self.data.y[1], 0.02201) 57 60 self.assertEqual(self.data.y[2], 0.02695) 58 61 self.assertEqual(self.data.y[126], 0.2958) 59 62 60 63 self.assertEqual(self.data.dy[0], 0.002704) 61 64 self.assertEqual(self.data.dy[1], 0.001643) 62 65 self.assertEqual(self.data.dy[2], 0.002452) 63 66 self.assertEqual(self.data.dy[126], 1) 64 67 65 68 def test_checkdata2(self): 66 69 self.assertEqual(self.data.dy[126], 1) … … 71 74 self.assertEqual(data.meta_data['loader'], "IGOR 1D") 72 75 73 class DanseReaderTests(unittest.TestCase): 76 77 class hfir_reader(unittest.TestCase): 74 78 75 79 def setUp(self): 76 reader = DANSEReader() 77 self.data = reader.read("MP_New.sans") 78 79 def test_checkdata(self): 80 """ 81 Check the data content to see whether 80 reader = HFIRReader() 81 self.data = reader.read("S2-30dq.d1d") 82 83 def test_hfir_checkdata(self): 84 """ 85 Check the data content to see whether 86 it matches the specific file we loaded. 87 """ 88 self.assertEqual(self.data.filename, "S2-30dq.d1d") 89 # THIS FILE FORMAT IS CURRENTLY READ BY THE ASCII READER 90 self.assertEqual(self.data.meta_data['loader'], "HFIR 1D") 91 self.assertEqual(len(self.data.x), 134) 92 self.assertEqual(len(self.data.y), 134) 93 # Q I dI dQ 94 # Point 1: 0.003014 0.003010 0.000315 0.008249 95 self.assertEqual(self.data.x[1], 0.003014) 96 self.assertEqual(self.data.y[1], 0.003010) 97 self.assertEqual(self.data.dy[1], 0.000315) 98 self.assertEqual(self.data.dx[1], 0.008249) 99 100 def test_generic_loader(self): 101 # the generic loader should work as well 102 data = Loader().load("S2-30dq.d1d") 103 self.assertEqual(data.meta_data['loader'], "HFIR 1D") 104 105 106 class igor_reader(unittest.TestCase): 107 108 def setUp(self): 109 # the IgorReader should be able to read this filetype 110 # if it can't, stop here. 111 reader = IgorReader() 112 self.data = reader.read("MAR07232_rest.ASC") 113 114 def test_igor_checkdata(self): 115 """ 116 Check the data content to see whether 82 117 it matches the specific file we loaded. 83 118 Check the units too to see whether the … … 85 120 tests won't pass 86 121 """ 122 self.assertEqual(self.data.filename, "MAR07232_rest.ASC") 123 self.assertEqual(self.data.meta_data['loader'], "IGOR 2D") 124 125 self.assertEqual(self.data.source.wavelength_unit, 'A') 126 self.assertEqual(self.data.source.wavelength, 8.4) 127 128 self.assertEqual(self.data.detector[0].distance_unit, 'mm') 129 self.assertEqual(self.data.detector[0].distance, 13705) 130 131 self.assertEqual(self.data.sample.transmission, 0.84357) 132 133 self.assertEqual(self.data.detector[0].beam_center_unit, 'mm') 134 center_x = (68.76 - 1)*5.0 135 center_y = (62.47 - 1)*5.0 136 self.assertEqual(self.data.detector[0].beam_center.x, center_x) 137 self.assertEqual(self.data.detector[0].beam_center.y, center_y) 138 139 self.assertEqual(self.data.I_unit, '1/cm') 140 # 3 points should be suffcient to check that the data is in column 141 # major order. 142 np.testing.assert_almost_equal(self.data.data[0:3], 143 [0.279783, 0.28951, 0.167634]) 144 np.testing.assert_almost_equal(self.data.qx_data[0:3], 145 [-0.01849072, -0.01821785, -0.01794498]) 146 np.testing.assert_almost_equal(self.data.qy_data[0:3], 147 [-0.01677435, -0.01677435, -0.01677435]) 148 149 def test_generic_loader(self): 150 # the generic loader should direct the file to IgorReader as well 151 data = Loader().load("MAR07232_rest.ASC") 152 self.assertEqual(data.meta_data['loader'], "IGOR 2D") 153 154 155 class danse_reader(unittest.TestCase): 156 157 def setUp(self): 158 reader = DANSEReader() 159 self.data = reader.read("MP_New.sans") 160 161 def test_checkdata(self): 162 """ 163 Check the data content to see whether 164 it matches the specific file we loaded. 165 Check the units too to see whether the 166 Data1D defaults changed. Otherwise the 167 tests won't pass 168 """ 87 169 self.assertEqual(self.data.filename, "MP_New.sans") 88 170 self.assertEqual(self.data.meta_data['loader'], "DANSE") 89 171 90 172 self.assertEqual(self.data.source.wavelength_unit, 'A') 91 173 self.assertEqual(self.data.source.wavelength, 7.5) 92 174 93 175 self.assertEqual(self.data.detector[0].distance_unit, 'mm') 94 176 self.assertAlmostEqual(self.data.detector[0].distance, 5414.99, 3) 95 177 96 178 self.assertEqual(self.data.detector[0].beam_center_unit, 'mm') 97 179 center_x = 68.74*5.0 … … 99 181 self.assertEqual(self.data.detector[0].beam_center.x, center_x) 100 182 self.assertEqual(self.data.detector[0].beam_center.y, center_y) 101 183 102 184 self.assertEqual(self.data.I_unit, '1/cm') 103 185 self.assertEqual(self.data.data[0], 1.57831) … … 114 196 self.assertEqual(data.meta_data['loader'], "DANSE") 115 197 116 198 117 199 class cansas_reader(unittest.TestCase): 118 200 119 201 def setUp(self): 120 202 reader = CANSASReader() … … 130 212 self.assertEqual(self.data.filename, "cansas1d.xml") 131 213 self._checkdata() 132 214 133 215 def _checkdata(self): 134 216 """ 135 Check the data content to see whether 217 Check the data content to see whether 136 218 it matches the specific file we loaded. 137 219 Check the units too to see whether the … … 141 223 self.assertEqual(self.data.run[0], "1234") 142 224 self.assertEqual(self.data.meta_data['loader'], "CanSAS XML 1D") 143 225 144 226 # Data 145 227 self.assertEqual(len(self.data.x), 2) … … 156 238 self.assertEqual(self.data.run_name['1234'], 'run name') 157 239 self.assertEqual(self.data.title, "Test title") 158 240 159 241 # Sample info 160 242 self.assertEqual(self.data.sample.ID, "SI600-new-long") … … 162 244 self.assertEqual(self.data.sample.thickness_unit, 'mm') 163 245 self.assertAlmostEqual(self.data.sample.thickness, 1.03) 164 246 165 247 self.assertAlmostEqual(self.data.sample.transmission, 0.327) 166 248 167 249 self.assertEqual(self.data.sample.temperature_unit, 'C') 168 250 self.assertEqual(self.data.sample.temperature, 0) … … 176 258 self.assertAlmostEqual(self.data.sample.orientation.y, 0.02, 6) 177 259 178 self.assertEqual(self.data.sample.details[0], "http://chemtools.chem.soton.ac.uk/projects/blog/blogs.php/bit_id/2720") 179 self.assertEqual(self.data.sample.details[1], "Some text here") 180 260 self.assertEqual(self.data.sample.details[0], "http://chemtools.chem.soton.ac.uk/projects/blog/blogs.php/bit_id/2720") 261 self.assertEqual(self.data.sample.details[1], "Some text here") 262 181 263 # Instrument info 182 264 self.assertEqual(self.data.instrument, "canSAS instrument") 183 265 184 266 # Source 185 267 self.assertEqual(self.data.source.radiation, "neutron") 186 268 187 269 self.assertEqual(self.data.source.beam_size_unit, "mm") 188 270 self.assertEqual(self.data.source.beam_size_name, "bm") 189 271 self.assertEqual(self.data.source.beam_size.x, 12) 190 272 self.assertEqual(self.data.source.beam_size.y, 13) 191 273 192 274 self.assertEqual(self.data.source.beam_shape, "disc") 193 275 194 276 self.assertEqual(self.data.source.wavelength_unit, "A") 195 277 self.assertEqual(self.data.source.wavelength, 6) 196 278 197 279 self.assertEqual(self.data.source.wavelength_max_unit, "nm") 198 280 self.assertAlmostEqual(self.data.source.wavelength_max, 1.0) … … 201 283 self.assertEqual(self.data.source.wavelength_spread_unit, "percent") 202 284 self.assertEqual(self.data.source.wavelength_spread, 14.3) 203 285 204 286 # Collimation 205 287 _found1 = False … … 207 289 self.assertEqual(self.data.collimation[0].length, 123.) 208 290 self.assertEqual(self.data.collimation[0].name, 'test coll name') 209 291 210 292 for item in self.data.collimation[0].aperture: 211 293 self.assertEqual(item.size_unit,'mm') … … 221 303 and item.type == 'radius': 222 304 _found2 = True 223 305 224 306 if _found1 == False or _found2 == False: 225 raise RuntimeError, "Could not find all data %s %s" % (_found1, _found2) 226 307 raise RuntimeError, "Could not find all data %s %s" % (_found1, _found2) 308 227 309 # Detector 228 310 self.assertEqual(self.data.detector[0].name, "fictional hybrid") 229 311 self.assertEqual(self.data.detector[0].distance_unit, "mm") 230 312 self.assertEqual(self.data.detector[0].distance, 4150) 231 313 232 314 self.assertEqual(self.data.detector[0].orientation_unit, "degree") 233 315 self.assertAlmostEqual(self.data.detector[0].orientation.x, 1.0, 6) 234 316 self.assertEqual(self.data.detector[0].orientation.y, 0.0) 235 317 self.assertEqual(self.data.detector[0].orientation.z, 0.0) 236 318 237 319 self.assertEqual(self.data.detector[0].offset_unit, "m") 238 320 self.assertEqual(self.data.detector[0].offset.x, .001) 239 321 self.assertEqual(self.data.detector[0].offset.y, .002) 240 322 self.assertEqual(self.data.detector[0].offset.z, None) 241 323 242 324 self.assertEqual(self.data.detector[0].beam_center_unit, "mm") 243 325 self.assertEqual(self.data.detector[0].beam_center.x, 322.64) 244 326 self.assertEqual(self.data.detector[0].beam_center.y, 327.68) 245 327 self.assertEqual(self.data.detector[0].beam_center.z, None) 246 328 247 329 self.assertEqual(self.data.detector[0].pixel_size_unit, "mm") 248 330 self.assertEqual(self.data.detector[0].pixel_size.x, 5) 249 331 self.assertEqual(self.data.detector[0].pixel_size.y, 5) 250 332 self.assertEqual(self.data.detector[0].pixel_size.z, None) 251 333 252 334 # Process 253 335 _found_term1 = False … … 267 349 and float(t['value']) == 10.0): 268 350 _found_term1 = True 269 351 270 352 if _found_term1 == False or _found_term2 == False: 271 353 raise RuntimeError, "Could not find all process terms %s %s" % (_found_term1, _found_term2) 272 354 273 355 def test_writer(self): 274 356 r = CANSASReader() … … 282 364 if os.path.isfile(filename): 283 365 os.remove(filename) 284 366 285 367 def test_units(self): 286 368 """ … … 293 375 self.assertEqual(self.data.filename, filename) 294 376 self._checkdata() 295 377 296 378 def test_badunits(self): 297 379 """ … … 307 389 # This one should 308 390 self.assertAlmostEqual(self.data.sample.transmission, 0.327) 309 391 310 392 self.assertEqual(self.data.meta_data['loader'], "CanSAS XML 1D") 311 393 print(self.data.errors) … … 321 403 self.assertEqual(self.data.filename, filename) 322 404 self.assertEqual(self.data.run[0], "1234") 323 405 324 406 # Data 325 407 self.assertEqual(len(self.data.x), 2) -
test/sasdataloader/test/utest_ascii.py
rad92c5a r959eb01 6 6 7 7 import unittest 8 from sas.sascalc.dataloader.loader import Loader 8 from sas.sascalc.dataloader.loader import Loader 9 10 import os.path 9 11 10 11 class ABSReaderTests(unittest.TestCase): 12 class abs_reader(unittest.TestCase): 12 13 13 14 def setUp(self): 14 15 self.loader = Loader() 15 self.f1_list = self.loader.load("ascii_test_1.txt") 16 self.f1 = self.f1_list[0] 17 self.f2_list = self.loader.load("ascii_test_2.txt") 18 self.f2 = self.f2_list[0] 19 self.f3_list = self.loader.load("ascii_test_3.txt") 20 self.f3 = self.f3_list[0] 21 self.f4_list = self.loader.load("ascii_test_4.abs") 22 self.f4 = self.f4_list[0] 23 self.f5_list = self.loader.load("ascii_test_5.txt") 24 self.f5 = self.f5_list[0] 25 16 26 17 def test_checkdata(self): 27 18 """ 28 19 Test .ABS file loaded as ascii 29 20 """ 21 f = self.loader.load("ascii_test_1.txt") 30 22 # The length of the data is 10 31 self.assertEqual(len( self.f1.x), 10)32 self.assertEqual( self.f1.x[0],0.002618)33 self.assertEqual( self.f1.x[9],0.0497)34 self.assertEqual( self.f1.x_unit, '1/A')35 self.assertEqual( self.f1.y_unit, '1/cm')23 self.assertEqual(len(f.x), 10) 24 self.assertEqual(f.x[0],0.002618) 25 self.assertEqual(f.x[9],0.0497) 26 self.assertEqual(f.x_unit, '1/A') 27 self.assertEqual(f.y_unit, '1/cm') 36 28 37 self.assertEqual( self.f1.meta_data['loader'],"ASCII")38 29 self.assertEqual(f.meta_data['loader'],"ASCII") 30 39 31 def test_truncated_1(self): 40 32 """ … … 46 38 as though it were the start of a footer). 47 39 """ 48 # The length of the data is 5 49 self.assertEqual(len(self.f2.x), 5) 50 self.assertEqual(self.f2.x[0],0.002618) 51 self.assertEqual(self.f2.x[4],0.02356) 52 40 # Test .ABS file loaded as ascii 41 f = self.loader.load("ascii_test_2.txt") 42 # The length of the data is 10 43 self.assertEqual(len(f.x), 5) 44 self.assertEqual(f.x[0],0.002618) 45 self.assertEqual(f.x[4],0.02356) 46 53 47 def test_truncated_2(self): 54 48 """ … … 58 52 reading at the first inconsitent line. 59 53 """ 54 # Test .ABS file loaded as ascii 55 f = self.loader.load("ascii_test_3.txt") 60 56 # The length of the data is 5 61 self.assertEqual(len( self.f3.x), 5)62 self.assertEqual( self.f3.x[0],0.002618)63 self.assertEqual( self.f3.x[4],0.02356)64 57 self.assertEqual(len(f.x), 5) 58 self.assertEqual(f.x[0],0.002618) 59 self.assertEqual(f.x[4],0.02356) 60 65 61 def test_truncated_3(self): 66 62 """ … … 70 66 reading at the last line of header. 71 67 """ 68 # Test .ABS file loaded as ascii 69 f = self.loader.load("ascii_test_4.abs") 72 70 # The length of the data is 5 73 self.assertEqual(len( self.f4.x), 5)74 self.assertEqual( self.f4.x[0],0.012654)75 self.assertEqual( self.f4.x[4],0.02654)76 71 self.assertEqual(len(f.x), 5) 72 self.assertEqual(f.x[0],0.012654) 73 self.assertEqual(f.x[4],0.02654) 74 77 75 def test_truncated_4(self): 78 76 """ … … 80 78 Only the last 5 2-col lines should be read. 81 79 """ 80 # Test .ABS file loaded as ascii 81 f = self.loader.load("ascii_test_5.txt") 82 82 # The length of the data is 5 83 self.assertEqual(len( self.f5.x), 5)84 self.assertEqual( self.f5.x[0],0.02879)85 self.assertEqual( self.f5.x[4],0.0497)86 83 self.assertEqual(len(f.x), 5) 84 self.assertEqual(f.x[0],0.02879) 85 self.assertEqual(f.x[4],0.0497) 86 87 87 def test_truncated_5(self): 88 88 """ 89 Test a 6-col ascii file with complex header where one of them has a 90 letter and many lines with 2 or 2 columns in the middle of the data91 section. Will be rejected because fewer than 5 lines.89 Test a 6-col ascii file with complex header where one of them has a letter and 90 many lines with 2 or 2 columns in the middle of the data section. 91 Only last four lines should be read. 92 92 """ 93 93 # Test .ABS file loaded as ascii … … 98 98 except: 99 99 self.assertEqual(f, None) 100 100 101 101 if __name__ == '__main__': 102 102 unittest.main() -
test/sasdataloader/test/utest_averaging.py
r2a52b0e re123eb9 46 46 # respectively. 47 47 self.qmin = get_q(1.0, 1.0, detector.distance, source.wavelength) 48 48 49 self.qmax = get_q(49.5, 49.5, detector.distance, source.wavelength) 49 50 … … 103 104 def setUp(self): 104 105 filepath = os.path.join(os.path.dirname( 105 os.path.realpath(__file__)), 'MAR07232_rest. h5')106 self.data = Loader().load(filepath) [0]106 os.path.realpath(__file__)), 'MAR07232_rest.ASC') 107 self.data = Loader().load(filepath) 107 108 108 109 def test_ring(self): … … 119 120 filepath = os.path.join(os.path.dirname( 120 121 os.path.realpath(__file__)), 'ring_testdata.txt') 121 answer = Loader().load(filepath) [0]122 answer = Loader().load(filepath) 122 123 123 124 for i in range(r.nbins_phi - 1): … … 139 140 filepath = os.path.join(os.path.dirname( 140 141 os.path.realpath(__file__)), 'avg_testdata.txt') 141 answer = Loader().load(filepath) [0]142 answer = Loader().load(filepath) 142 143 for i in range(r.nbins_phi): 143 144 self.assertAlmostEqual(o.x[i], answer.x[i], 4) … … 175 176 filepath = os.path.join(os.path.dirname( 176 177 os.path.realpath(__file__)), 'slabx_testdata.txt') 177 answer = Loader().load(filepath) [0]178 answer = Loader().load(filepath) 178 179 for i in range(len(o.x)): 179 180 self.assertAlmostEqual(o.x[i], answer.x[i], 4) … … 194 195 filepath = os.path.join(os.path.dirname( 195 196 os.path.realpath(__file__)), 'slaby_testdata.txt') 196 answer = Loader().load(filepath) [0]197 answer = Loader().load(filepath) 197 198 for i in range(len(o.x)): 198 199 self.assertAlmostEqual(o.x[i], answer.x[i], 4) … … 203 204 """ 204 205 Test sector averaging I(phi) 205 When considering the whole azimuthal range (2pi), 206 When considering the whole azimuthal range (2pi), 206 207 the answer should be the same as ring averaging. 207 208 The test data was not generated by IGOR. … … 221 222 filepath = os.path.join(os.path.dirname( 222 223 os.path.realpath(__file__)), 'ring_testdata.txt') 223 answer = Loader().load(filepath) [0]224 answer = Loader().load(filepath) 224 225 for i in range(len(o.x)): 225 226 self.assertAlmostEqual(o.x[i], answer.x[i], 4) … … 239 240 filepath = os.path.join(os.path.dirname( 240 241 os.path.realpath(__file__)), 'sectorphi_testdata.txt') 241 answer = Loader().load(filepath) [0]242 answer = Loader().load(filepath) 242 243 for i in range(len(o.x)): 243 244 self.assertAlmostEqual(o.x[i], answer.x[i], 4) … … 257 258 filepath = os.path.join(os.path.dirname( 258 259 os.path.realpath(__file__)), 'sectorq_testdata.txt') 259 answer = Loader().load(filepath) [0]260 answer = Loader().load(filepath) 260 261 for i in range(len(o.x)): 261 262 self.assertAlmostEqual(o.x[i], answer.x[i], 4) … … 276 277 for i in range(len(o.x)): 277 278 self.assertAlmostEqual(o.x[i], expected_binning[i], 3) 278 279 279 280 # TODO: Test for Y values (o.y) 280 281 # print len(self.data.x_bins) … … 287 288 # xedges_width = (xedges[1] - xedges[0]) 288 289 # xedges_center = xedges[1:] - xedges_width / 2 289 290 290 291 # yedges_width = (yedges[1] - yedges[0]) 291 292 # yedges_center = yedges[1:] - yedges_width / 2 292 293 293 294 # print H.flatten().shape 294 295 # print o.y.shape 295 296 296 297 297 298 if __name__ == '__main__': -
test/sasdataloader/test/utest_red2d_reader.py
r248ff73 r959eb01 7 7 import unittest 8 8 from sas.sascalc.dataloader.loader import Loader 9 9 10 10 import os.path 11 11 12 12 class abs_reader(unittest.TestCase): 13 13 14 14 def setUp(self): 15 15 self.loader = Loader() 16 16 17 17 def test_checkdata(self): 18 18 """ 19 19 Test .DAT file loaded as IGOR/DAT 2D Q_map 20 20 """ 21 f = self.loader.load("exp18_14_igor_2dqxqy.dat") [0]21 f = self.loader.load("exp18_14_igor_2dqxqy.dat") 22 22 # The length of the data is 10 23 23 self.assertEqual(len(f.qx_data), 36864) … … 26 26 self.assertEqual(f.Q_unit, '1/A') 27 27 self.assertEqual(f.I_unit, '1/cm') 28 28 29 29 self.assertEqual(f.meta_data['loader'],"IGOR/DAT 2D Q_map") 30 31 30 31 32 32 if __name__ == '__main__': 33 33 unittest.main() 34 -
test/sasinvariant/test/utest_use_cases.py
rb09095a r959eb01 6 6 import unittest 7 7 from sas.sascalc.dataloader.loader import Loader 8 8 9 from sas.sascalc.invariant import invariant 9 10 10 11 11 class Data1D: 12 12 pass 13 14 13 15 14 class TestLineFit(unittest.TestCase): 16 15 """ … … 18 17 """ 19 18 def setUp(self): 20 self.data_list = Loader().load("linefittest.txt") 21 self.data = self.data_list[0] 22 19 self.data = Loader().load("linefittest.txt") 20 23 21 def test_fit_line_data(self): 24 22 """ 25 23 Fit_Test_1: test linear fit, ax +b, without fixed 26 24 """ 27 25 28 26 # Create invariant object. Background and scale left as defaults. 29 27 fit = invariant.Extrapolator(data=self.data) 30 28 31 # 29 ##Without holding 32 30 p, dp = fit.fit(power=None) 33 31 … … 35 33 self.assertAlmostEquals(p[0], 2.3983,3) 36 34 self.assertAlmostEquals(p[1], 0.87833,3) 35 37 36 38 37 def test_fit_line_data_fixed(self): … … 40 39 Fit_Test_2: test linear fit, ax +b, with 'a' fixed 41 40 """ 42 41 43 42 # Create invariant object. Background and scale left as defaults. 44 43 fit = invariant.Extrapolator(data=self.data) 45 46 # 44 45 #With holding a = -power =4 47 46 p, dp = fit.fit(power=-4) 48 47 … … 50 49 self.assertAlmostEquals(p[0], 4) 51 50 self.assertAlmostEquals(p[1], -4.0676,3) 52 53 51 54 52 class TestLineFitNoweight(unittest.TestCase): 55 53 """ … … 57 55 """ 58 56 def setUp(self): 59 self.data_list = Loader().load("linefittest_no_weight.txt") 60 self.data = self.data_list[0] 61 57 self.data = Loader().load("linefittest_no_weight.txt") 58 62 59 def skip_test_fit_line_data_no_weight(self): 63 60 """ 64 61 Fit_Test_1: test linear fit, ax +b, without fixed 65 62 """ 66 63 67 64 # Create invariant object. Background and scale left as defaults. 68 65 fit = invariant.Extrapolator(data=self.data) 69 70 # 66 67 ##Without holding 71 68 p, dp = fit.fit(power=None) 72 69 … … 74 71 self.assertAlmostEquals(p[0], 2.4727,3) 75 72 self.assertAlmostEquals(p[1], 0.6,3) 73 76 74 77 75 def test_fit_line_data_fixed_no_weight(self): … … 79 77 Fit_Test_2: test linear fit, ax +b, with 'a' fixed 80 78 """ 81 79 82 80 # Create invariant object. Background and scale left as defaults. 83 81 fit = invariant.Extrapolator(data=self.data) 84 82 85 83 #With holding a = -power =4 86 84 p, dp = fit.fit(power=-4) … … 89 87 self.assertAlmostEquals(p[0], 4) 90 88 self.assertAlmostEquals(p[1], -7.8,3) 91 92 89 93 90 class TestInvPolySphere(unittest.TestCase): 94 91 """ … … 96 93 """ 97 94 def setUp(self): 98 self.data_list = Loader().load("PolySpheres.txt") 99 self.data = self.data_list[0] 100 95 self.data = Loader().load("PolySpheres.txt") 96 101 97 def test_wrong_data(self): 102 98 """ test receiving Data1D not of type loader""" 99 100 103 101 self.assertRaises(ValueError,invariant.InvariantCalculator, Data1D()) 104 102 105 103 def test_use_case_1(self): 106 104 """ … … 109 107 # Create invariant object. Background and scale left as defaults. 110 108 inv = invariant.InvariantCalculator(data=self.data) 111 109 112 110 # We have to be able to tell the InvariantCalculator whether we want the 113 111 # extrapolation or not. By default, when the user doesn't specify, we 114 # should compute Q* without extrapolation. That's what should be done 115 # in __init__. 116 112 # should compute Q* without extrapolation. That's what should be done in __init__. 113 117 114 # We call get_qstar() with no argument, which signifies that we do NOT 118 115 # want extrapolation. 119 116 qstar = inv.get_qstar() 120 117 121 118 # The volume fraction and surface use Q*. That means that the following 122 119 # methods should check that Q* has been computed. If not, it should … … 124 121 v, dv = inv.get_volume_fraction_with_error(contrast=2.6e-6) 125 122 s, ds = inv.get_surface_with_error(contrast=2.6e-6, porod_const=2) 126 123 127 124 # Test results 128 125 self.assertAlmostEquals(qstar, 7.48959e-5,2) 129 126 self.assertAlmostEquals(v, 0.005644689, 4) 130 127 self.assertAlmostEquals(s , 941.7452, 3) 131 128 132 129 def test_use_case_2(self): 133 130 """ 134 Invariant without extrapolation. Invariant, volume fraction and surface135 are given with errors.136 """ 137 # Create invariant object. Background and scale left as defaults. 138 inv = invariant.InvariantCalculator(data=self.data) 139 131 Invariant without extrapolation. Invariant, volume fraction and surface 132 are given with errors. 133 """ 134 # Create invariant object. Background and scale left as defaults. 135 inv = invariant.InvariantCalculator(data=self.data) 136 140 137 # Get the invariant with errors 141 138 qstar, qstar_err = inv.get_qstar_with_error() 142 139 143 140 # The volume fraction and surface use Q*. That means that the following 144 141 # methods should check that Q* has been computed. If not, it should … … 150 147 self.assertAlmostEquals(v, 0.005644689, 1) 151 148 self.assertAlmostEquals(s , 941.7452, 3) 152 149 150 153 151 def test_use_case_3(self): 154 152 """ … … 157 155 # Create invariant object. Background and scale left as defaults. 158 156 inv = invariant.InvariantCalculator(data=self.data) 159 157 160 158 # Set the extrapolation parameters for the low-Q range 161 159 162 160 # The npts parameter should have a good default. 163 161 # The range parameter should be 'high' or 'low' 164 162 # The function parameter should default to None. If it is None, 165 # the method should pick a good default 166 # (Guinier at low-Q and 1/q^4 at high-Q). 167 # The method should also check for consistency of the extrapolation 168 # and function parameters. For instance, you might not want to allow 169 # 'high' and 'guinier'. 163 # the method should pick a good default (Guinier at low-Q and 1/q^4 at high-Q). 164 # The method should also check for consistency of the extrapolation and function 165 # parameters. For instance, you might not want to allow 'high' and 'guinier'. 170 166 # The power parameter (not shown below) should default to 4. 171 167 inv.set_extrapolation(range='low', npts=10, function='guinier') 172 173 # The version of the call without error 174 # At this point, we could still compute Q* without extrapolation by 175 # callingget_qstar with arguments, or with extrapolation=None.168 169 # The version of the call without error 170 # At this point, we could still compute Q* without extrapolation by calling 171 # get_qstar with arguments, or with extrapolation=None. 176 172 qstar = inv.get_qstar(extrapolation='low') 177 173 178 174 # The version of the call with error 179 175 qstar, qstar_err = inv.get_qstar_with_error(extrapolation='low') … … 182 178 v, dv = inv.get_volume_fraction_with_error(contrast=2.6e-6) 183 179 s, ds = inv.get_surface_with_error(contrast=2.6e-6, porod_const=2) 184 180 185 181 # Test results 186 182 self.assertAlmostEquals(qstar, 7.49e-5, 1) 187 183 self.assertAlmostEquals(v, 0.005648401, 4) 188 184 self.assertAlmostEquals(s , 941.7452, 3) 189 185 190 186 def test_use_case_4(self): 191 187 """ … … 194 190 # Create invariant object. Background and scale left as defaults. 195 191 inv = invariant.InvariantCalculator(data=self.data) 196 192 197 193 # Set the extrapolation parameters for the high-Q range 198 inv.set_extrapolation(range='high', npts=10, function='power_law', 199 power=4) 200 201 # The version of the call without error 202 # The function parameter defaults to None, then is picked to be 203 # 'power_law' for extrapolation='high' 194 inv.set_extrapolation(range='high', npts=10, function='power_law', power=4) 195 196 # The version of the call without error 197 # The function parameter defaults to None, then is picked to be 'power_law' for extrapolation='high' 204 198 qstar = inv.get_qstar(extrapolation='high') 205 199 206 200 # The version of the call with error 207 201 qstar, qstar_err = inv.get_qstar_with_error(extrapolation='high') … … 210 204 v, dv = inv.get_volume_fraction_with_error(contrast=2.6e-6) 211 205 s, ds = inv.get_surface_with_error(contrast=2.6e-6, porod_const=2) 212 206 213 207 # Test results 214 208 self.assertAlmostEquals(qstar, 7.49e-5,2) 215 209 self.assertAlmostEquals(v, 0.005952674, 3) 216 210 self.assertAlmostEquals(s , 941.7452, 3) 217 211 218 212 def test_use_case_5(self): 219 213 """ … … 222 216 # Create invariant object. Background and scale left as defaults. 223 217 inv = invariant.InvariantCalculator(data=self.data) 224 218 225 219 # Set the extrapolation parameters for the low- and high-Q ranges 226 220 inv.set_extrapolation(range='low', npts=10, function='guinier') 227 inv.set_extrapolation(range='high', npts=10, function='power_law', 228 power=4) 229 230 # The version of the call without error 231 # The function parameter defaults to None, then is picked to be 232 # 'power_law' for extrapolation='high' 221 inv.set_extrapolation(range='high', npts=10, function='power_law', power=4) 222 223 # The version of the call without error 224 # The function parameter defaults to None, then is picked to be 'power_law' for extrapolation='high' 233 225 qstar = inv.get_qstar(extrapolation='both') 234 226 … … 239 231 v, dv = inv.get_volume_fraction_with_error(contrast=2.6e-6) 240 232 s, ds = inv.get_surface_with_error(contrast=2.6e-6, porod_const=2) 241 233 242 234 # Test results 243 235 self.assertAlmostEquals(qstar, 7.88981e-5,2) 244 236 self.assertAlmostEquals(v, 0.005952674, 3) 245 237 self.assertAlmostEquals(s , 941.7452, 3) 246 238 247 239 def test_use_case_6(self): 248 240 """ … … 251 243 # Create invariant object. Background and scale left as defaults. 252 244 inv = invariant.InvariantCalculator(data=self.data) 253 245 254 246 # Set the extrapolation parameters for the high-Q range 255 247 inv.set_extrapolation(range='low', npts=10, function='power_law', power=4) 256 248 257 249 # The version of the call without error 258 250 # The function parameter defaults to None, then is picked to be 'power_law' for extrapolation='high' 259 251 qstar = inv.get_qstar(extrapolation='low') 260 252 261 253 # The version of the call with error 262 254 qstar, qstar_err = inv.get_qstar_with_error(extrapolation='low') … … 265 257 v, dv = inv.get_volume_fraction_with_error(contrast=2.6e-6) 266 258 s, ds = inv.get_surface_with_error(contrast=2.6e-6, porod_const=2) 267 259 268 260 # Test results 269 261 self.assertAlmostEquals(qstar, 7.49e-5,2) 270 262 self.assertAlmostEquals(v, 0.005952674, 3) 271 263 self.assertAlmostEquals(s , 941.7452, 3) 272 273 264 274 265 class TestInvPinholeSmear(unittest.TestCase): 275 266 """ … … 280 271 list = Loader().load("latex_smeared.xml") 281 272 self.data_q_smear = list[0] 282 273 283 274 def test_use_case_1(self): 284 275 """ … … 287 278 inv = invariant.InvariantCalculator(data=self.data_q_smear) 288 279 qstar = inv.get_qstar() 289 280 290 281 v = inv.get_volume_fraction(contrast=2.6e-6) 291 282 s = inv.get_surface(contrast=2.6e-6, porod_const=2) … … 294 285 self.assertAlmostEquals(v, 0.115352622, 2) 295 286 self.assertAlmostEquals(s , 941.7452, 3 ) 296 287 297 288 def test_use_case_2(self): 298 289 """ … … 302 293 # Create invariant object. Background and scale left as defaults. 303 294 inv = invariant.InvariantCalculator(data=self.data_q_smear) 304 295 305 296 # Get the invariant with errors 306 297 qstar, qstar_err = inv.get_qstar_with_error() … … 312 303 self.assertAlmostEquals(v, 0.115352622, 2) 313 304 self.assertAlmostEquals(s , 941.7452, 3 ) 314 305 315 306 def test_use_case_3(self): 316 307 """ … … 328 319 v, dv = inv.get_volume_fraction_with_error(contrast=2.6e-6) 329 320 s, ds = inv.get_surface_with_error(contrast=2.6e-6, porod_const=2) 330 321 331 322 # Test results 332 323 self.assertAlmostEquals(qstar, 0.00138756,2) 333 324 self.assertAlmostEquals(v, 0.117226896,2) 334 325 self.assertAlmostEquals(s ,941.7452, 3) 335 326 336 327 def test_use_case_4(self): 337 328 """ … … 346 337 # The version of the call with error 347 338 qstar, qstar_err = inv.get_qstar_with_error(extrapolation='high') 339 340 # Get the volume fraction and surface 341 # WHY SHOULD THIS FAIL? 342 #self.assertRaises(RuntimeError, inv.get_volume_fraction_with_error, 2.6e-6) 343 344 # Check that an exception is raised when the 'surface' is not defined 345 # WHY SHOULD THIS FAIL? 346 #self.assertRaises(RuntimeError, inv.get_surface_with_error, 2.6e-6, 2) 348 347 349 348 # Test results 350 349 self.assertAlmostEquals(qstar, 0.0045773,2) 351 350 352 351 def test_use_case_5(self): 353 352 """ … … 358 357 # Set the extrapolation parameters for the low- and high-Q ranges 359 358 inv.set_extrapolation(range='low', npts=10, function='guinier') 360 inv.set_extrapolation(range='high', npts=10, function='power_law', 361 power=4) 362 # The version of the call without error 363 # The function parameter defaults to None, then is picked to be 364 # 'power_law' for extrapolation='high' 359 inv.set_extrapolation(range='high', npts=10, function='power_law', power=4) 360 # The version of the call without error 361 # The function parameter defaults to None, then is picked to be 'power_law' for extrapolation='high' 365 362 qstar = inv.get_qstar(extrapolation='both') 366 363 # The version of the call with error 367 364 qstar, qstar_err = inv.get_qstar_with_error(extrapolation='both') 368 365 366 # Get the volume fraction and surface 367 # WHY SHOULD THIS FAIL? 368 #self.assertRaises(RuntimeError, inv.get_volume_fraction_with_error, 2.6e-6) 369 #self.assertRaises(RuntimeError, inv.get_surface_with_error, 2.6e-6, 2) 370 369 371 # Test results 370 372 self.assertAlmostEquals(qstar, 0.00460319,3)
Note: See TracChangeset
for help on using the changeset viewer.