Changeset cd57c7d4 in sasview for src/sas/sascalc/dataloader/readers
- Timestamp:
- Sep 11, 2017 10:12:16 AM (7 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, magnetic_scatt, release-4.2.2, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- b1f20d1
- Parents:
- c9ecd1b (diff), e2b2473 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Location:
- src/sas/sascalc/dataloader/readers
- Files:
-
- 4 deleted
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/readers/__init__.py
r959eb01 r7a5d066 1 # Backward compatibility with the previous implementation of thedefault readers2 from associations import re gister_readers1 # Method to associate extensions to default readers 2 from associations import read_associations 3 3 4 # Method to associate extensions to default readers5 from associations import read_associations6 4 7 5 # Method to return the location of the XML settings file -
src/sas/sascalc/dataloader/readers/abs_reader.py
r959eb01 rad92c5a 1 1 """ 2 IGOR 1D data reader 2 3 """ 3 4 ##################################################################### 4 # This software was developed by the University of Tennessee as part of the5 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)6 # project funded by the US National Science Foundation.7 # See the license text in license.txt8 # copyright 2008, University of Tennessee5 # This software was developed by the University of Tennessee as part of the 6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 # project funded by the US National Science Foundation. 8 # See the license text in license.txt 9 # copyright 2008, University of Tennessee 9 10 ###################################################################### 10 11 12 import logging 11 13 import numpy as np 12 import os 13 from sas.sascalc.dataloader.data_info import Data1D 14 from sas.sascalc.dataloader.data_info import Detector 15 16 has_converter = True 17 try: 18 from sas.sascalc.data_util.nxsunit import Converter 19 except: 20 has_converter = False 21 22 23 class Reader: 14 from sas.sascalc.dataloader.file_reader_base_class import FileReader 15 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D, Data1D,\ 16 Detector 17 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 18 DefaultReaderException 19 20 logger = logging.getLogger(__name__) 21 22 23 class Reader(FileReader): 24 24 """ 25 25 Class to load IGOR reduced .ABS files 26 26 """ 27 # #File type27 # File type 28 28 type_name = "IGOR 1D" 29 # #Wildcards29 # Wildcards 30 30 type = ["IGOR 1D files (*.abs)|*.abs"] 31 # #List of allowed extensions32 ext = ['.abs' , '.ABS']31 # List of allowed extensions 32 ext = ['.abs'] 33 33 34 def read(self, path):34 def get_file_contents(self): 35 35 """ 36 Load data file. 37 38 :param path: file path 39 40 :return: Data1D object, or None 36 Get the contents of the file 41 37 42 38 :raise RuntimeError: when the file can't be opened 43 39 :raise ValueError: when the length of the data vectors are inconsistent 44 40 """ 45 if os.path.isfile(path): 46 basename = os.path.basename(path) 47 root, extension = os.path.splitext(basename) 48 if extension.lower() in self.ext: 49 try: 50 input_f = open(path,'r') 41 buff = self.f_open.read() 42 filepath = self.f_open.name 43 lines = buff.splitlines() 44 self.has_converter = True 45 try: 46 from sas.sascalc.data_util.nxsunit import Converter 47 except: 48 self.has_converter = False 49 self.output = [] 50 self.current_datainfo = DataInfo() 51 self.current_datainfo.filename = filepath 52 self.reset_data_list(len(lines)) 53 detector = Detector() 54 data_line = 0 55 self.reset_data_list(len(lines)) 56 self.current_datainfo.detector.append(detector) 57 self.current_datainfo.filename = filepath 58 59 is_info = False 60 is_center = False 61 is_data_started = False 62 63 base_q_unit = '1/A' 64 base_i_unit = '1/cm' 65 data_conv_q = Converter(base_q_unit) 66 data_conv_i = Converter(base_i_unit) 67 68 for line in lines: 69 # Information line 1 70 if is_info: 71 is_info = False 72 line_toks = line.split() 73 74 # Wavelength in Angstrom 75 try: 76 value = float(line_toks[1]) 77 if self.has_converter and \ 78 self.current_datainfo.source.wavelength_unit != 'A': 79 conv = Converter('A') 80 self.current_datainfo.source.wavelength = conv(value, 81 units=self.current_datainfo.source.wavelength_unit) 82 else: 83 self.current_datainfo.source.wavelength = value 84 except KeyError: 85 msg = "ABSReader cannot read wavelength from %s" % filepath 86 self.current_datainfo.errors.append(msg) 87 88 # Detector distance in meters 89 try: 90 value = float(line_toks[3]) 91 if self.has_converter and detector.distance_unit != 'm': 92 conv = Converter('m') 93 detector.distance = conv(value, 94 units=detector.distance_unit) 95 else: 96 detector.distance = value 51 97 except: 52 raise RuntimeError, "abs_reader: cannot open %s" % path 53 buff = input_f.read() 54 lines = buff.split('\n') 55 x = np.zeros(0) 56 y = np.zeros(0) 57 dy = np.zeros(0) 58 dx = np.zeros(0) 59 output = Data1D(x, y, dy=dy, dx=dx) 60 detector = Detector() 61 output.detector.append(detector) 62 output.filename = basename 63 64 is_info = False 98 msg = "ABSReader cannot read SDD from %s" % filepath 99 self.current_datainfo.errors.append(msg) 100 101 # Transmission 102 try: 103 self.current_datainfo.sample.transmission = \ 104 float(line_toks[4]) 105 except ValueError: 106 # Transmission isn't always in the header 107 pass 108 109 # Sample thickness in mm 110 try: 111 value = float(line_toks[5]) 112 if self.has_converter and \ 113 self.current_datainfo.sample.thickness_unit != 'cm': 114 conv = Converter('cm') 115 self.current_datainfo.sample.thickness = conv(value, 116 units=self.current_datainfo.sample.thickness_unit) 117 else: 118 self.current_datainfo.sample.thickness = value 119 except ValueError: 120 # Thickness is not a mandatory entry 121 pass 122 123 # MON CNT LAMBDA DET ANG DET DIST TRANS THICK AVE STEP 124 if line.count("LAMBDA") > 0: 125 is_info = True 126 127 # Find center info line 128 if is_center: 65 129 is_center = False 66 is_data_started = False 67 68 data_conv_q = None 69 data_conv_i = None 70 71 if has_converter == True and output.x_unit != '1/A': 72 data_conv_q = Converter('1/A') 73 # Test it 74 data_conv_q(1.0, output.x_unit) 75 76 if has_converter == True and output.y_unit != '1/cm': 77 data_conv_i = Converter('1/cm') 78 # Test it 79 data_conv_i(1.0, output.y_unit) 80 81 for line in lines: 82 83 # Information line 1 84 if is_info == True: 85 is_info = False 86 line_toks = line.split() 87 88 # Wavelength in Angstrom 89 try: 90 value = float(line_toks[1]) 91 if has_converter == True and \ 92 output.source.wavelength_unit != 'A': 93 conv = Converter('A') 94 output.source.wavelength = conv(value, 95 units=output.source.wavelength_unit) 96 else: 97 output.source.wavelength = value 98 except: 99 #goes to ASC reader 100 msg = "abs_reader: cannot open %s" % path 101 raise RuntimeError, msg 102 103 # Distance in meters 104 try: 105 value = float(line_toks[3]) 106 if has_converter == True and \ 107 detector.distance_unit != 'm': 108 conv = Converter('m') 109 detector.distance = conv(value, 110 units=detector.distance_unit) 111 else: 112 detector.distance = value 113 except: 114 #goes to ASC reader 115 msg = "abs_reader: cannot open %s" % path 116 raise RuntimeError, msg 117 # Transmission 118 try: 119 output.sample.transmission = float(line_toks[4]) 120 except: 121 # Transmission is not a mandatory entry 122 pass 123 124 # Thickness in mm 125 try: 126 value = float(line_toks[5]) 127 if has_converter == True and \ 128 output.sample.thickness_unit != 'cm': 129 conv = Converter('cm') 130 output.sample.thickness = conv(value, 131 units=output.sample.thickness_unit) 132 else: 133 output.sample.thickness = value 134 except: 135 # Thickness is not a mandatory entry 136 pass 137 138 #MON CNT LAMBDA DET ANG DET DIST TRANS THICK 139 # AVE STEP 140 if line.count("LAMBDA") > 0: 141 is_info = True 142 143 # Find center info line 144 if is_center == True: 145 is_center = False 146 line_toks = line.split() 147 # Center in bin number 148 center_x = float(line_toks[0]) 149 center_y = float(line_toks[1]) 150 151 # Bin size 152 if has_converter == True and \ 153 detector.pixel_size_unit != 'mm': 154 conv = Converter('mm') 155 detector.pixel_size.x = conv(5.0, 156 units=detector.pixel_size_unit) 157 detector.pixel_size.y = conv(5.0, 158 units=detector.pixel_size_unit) 159 else: 160 detector.pixel_size.x = 5.0 161 detector.pixel_size.y = 5.0 162 163 # Store beam center in distance units 164 # Det 640 x 640 mm 165 if has_converter == True and \ 166 detector.beam_center_unit != 'mm': 167 conv = Converter('mm') 168 detector.beam_center.x = conv(center_x * 5.0, 169 units=detector.beam_center_unit) 170 detector.beam_center.y = conv(center_y * 5.0, 171 units=detector.beam_center_unit) 172 else: 173 detector.beam_center.x = center_x * 5.0 174 detector.beam_center.y = center_y * 5.0 175 176 # Detector type 177 try: 178 detector.name = line_toks[7] 179 except: 180 # Detector name is not a mandatory entry 181 pass 182 183 #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L 184 # BSTOP(mm) DET_TYP 185 if line.count("BCENT") > 0: 186 is_center = True 187 188 # Parse the data 189 if is_data_started == True: 190 toks = line.split() 191 192 try: 193 _x = float(toks[0]) 194 _y = float(toks[1]) 195 _dy = float(toks[2]) 196 _dx = float(toks[3]) 197 198 if data_conv_q is not None: 199 _x = data_conv_q(_x, units=output.x_unit) 200 _dx = data_conv_i(_dx, units=output.x_unit) 201 202 if data_conv_i is not None: 203 _y = data_conv_i(_y, units=output.y_unit) 204 _dy = data_conv_i(_dy, units=output.y_unit) 205 206 x = np.append(x, _x) 207 y = np.append(y, _y) 208 dy = np.append(dy, _dy) 209 dx = np.append(dx, _dx) 210 211 except: 212 # Could not read this data line. If we are here 213 # it is because we are in the data section. Just 214 # skip it. 215 pass 216 217 #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 218 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 219 if line.count("The 6 columns") > 0: 220 is_data_started = True 221 222 # Sanity check 223 if not len(y) == len(dy): 224 msg = "abs_reader: y and dy have different length" 225 raise ValueError, msg 226 # If the data length is zero, consider this as 227 # though we were not able to read the file. 228 if len(x) == 0: 229 raise ValueError, "ascii_reader: could not load file" 230 231 output.x = x[x != 0] 232 output.y = y[x != 0] 233 output.dy = dy[x != 0] 234 output.dx = dx[x != 0] 235 if data_conv_q is not None: 236 output.xaxis("\\rm{Q}", output.x_unit) 130 line_toks = line.split() 131 # Center in bin number 132 center_x = float(line_toks[0]) 133 center_y = float(line_toks[1]) 134 135 # Bin size 136 if self.has_converter and detector.pixel_size_unit != 'mm': 137 conv = Converter('mm') 138 detector.pixel_size.x = conv(5.08, 139 units=detector.pixel_size_unit) 140 detector.pixel_size.y = conv(5.08, 141 units=detector.pixel_size_unit) 237 142 else: 238 output.xaxis("\\rm{Q}", 'A^{-1}') 239 if data_conv_i is not None: 240 output.yaxis("\\rm{Intensity}", output.y_unit) 143 detector.pixel_size.x = 5.08 144 detector.pixel_size.y = 5.08 145 146 # Store beam center in distance units 147 # Det 640 x 640 mm 148 if self.has_converter and detector.beam_center_unit != 'mm': 149 conv = Converter('mm') 150 detector.beam_center.x = conv(center_x * 5.08, 151 units=detector.beam_center_unit) 152 detector.beam_center.y = conv(center_y * 5.08, 153 units=detector.beam_center_unit) 241 154 else: 242 output.yaxis("\\rm{Intensity}", "cm^{-1}") 243 244 # Store loading process information 245 output.meta_data['loader'] = self.type_name 246 return output 155 detector.beam_center.x = center_x * 5.08 156 detector.beam_center.y = center_y * 5.08 157 158 # Detector type 159 try: 160 detector.name = line_toks[7] 161 except: 162 # Detector name is not a mandatory entry 163 pass 164 165 # BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L BSTOP(mm) DET_TYP 166 if line.count("BCENT") > 0: 167 is_center = True 168 169 # Parse the data 170 if is_data_started: 171 toks = line.split() 172 173 try: 174 _x = float(toks[0]) 175 _y = float(toks[1]) 176 _dy = float(toks[2]) 177 _dx = float(toks[3]) 178 179 if data_conv_q is not None: 180 _x = data_conv_q(_x, units=base_q_unit) 181 _dx = data_conv_q(_dx, units=base_q_unit) 182 183 if data_conv_i is not None: 184 _y = data_conv_i(_y, units=base_i_unit) 185 _dy = data_conv_i(_dy, units=base_i_unit) 186 187 self.current_dataset.x[data_line] = _x 188 self.current_dataset.y[data_line] = _y 189 self.current_dataset.dy[data_line] = _dy 190 self.current_dataset.dx[data_line] = _dx 191 data_line += 1 192 193 except ValueError: 194 # Could not read this data line. If we are here 195 # it is because we are in the data section. Just 196 # skip it. 197 pass 198 199 # The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 200 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 201 if line.count("The 6 columns") > 0: 202 is_data_started = True 203 204 self.remove_empty_q_values(True, True) 205 206 # Sanity check 207 if not len(self.current_dataset.y) == len(self.current_dataset.dy): 208 self.set_all_to_none() 209 msg = "abs_reader: y and dy have different length" 210 raise ValueError(msg) 211 # If the data length is zero, consider this as 212 # though we were not able to read the file. 213 if len(self.current_dataset.x) == 0: 214 self.set_all_to_none() 215 raise ValueError("ascii_reader: could not load file") 216 217 if data_conv_q is not None: 218 self.current_dataset.xaxis("\\rm{Q}", base_q_unit) 247 219 else: 248 raise RuntimeError, "%s is not a file" % path 249 return None 220 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 221 if data_conv_i is not None: 222 self.current_dataset.yaxis("\\rm{Intensity}", base_i_unit) 223 else: 224 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 225 226 # Store loading process information 227 self.current_datainfo.meta_data['loader'] = self.type_name 228 self.send_to_output() -
src/sas/sascalc/dataloader/readers/anton_paar_saxs_reader.py
ra235f715 rfafe52a 9 9 10 10 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, Sample, Source11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, DataInfo, Sample, Source 12 12 from sas.sascalc.dataloader.data_info import Process, Aperture, Collimation, TransmissionSpectrum, Detector 13 13 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 14 14 15 15 class Reader(XMLreader): 16 16 """ 17 A class for reading in CanSAS v2.0 data files. The existing iteration opens Mantid generated HDF5 formatted files 18 with file extension .h5/.H5. Any number of data sets may be present within the file and any dimensionality of data 19 may be used. Currently 1D and 2D SAS data sets are supported, but future implementations will include 1D and 2D 20 SESANS data. This class assumes a single data set for each sasentry. 21 22 :Dependencies: 23 The CanSAS HDF5 reader requires h5py v2.5.0 or later. 17 A class for reading in Anton Paar .pdh files 24 18 """ 25 19 … … 30 24 ## Raw file contents to be processed 31 25 raw_data = None 32 ## Data set being modified33 current_dataset = None34 26 ## For recursion and saving purposes, remember parent objects 35 27 parent_list = None … … 42 34 ## Flag to bypass extension check 43 35 allow_all = False 44 ## List of files to return45 output = None46 36 47 37 def reset_state(self): 48 self.current_dataset = Data1D(np.empty(0), np.empty(0),49 np.empty(0), np.empty(0))38 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), np.empty(0), np.empty(0)) 39 self.current_datainfo = DataInfo() 50 40 self.datasets = [] 51 41 self.raw_data = None … … 63 53 self.lower = 5 64 54 65 def read(self, filename):55 def get_file_contents(self): 66 56 """ 67 57 This is the general read method that all SasView data_loaders must have. … … 73 63 ## Reinitialize the class when loading a new data file to reset all class variables 74 64 self.reset_state() 75 ## Check that the file exists 76 if os.path.isfile(filename): 77 basename = os.path.basename(filename) 78 _, extension = os.path.splitext(basename) 79 # If the file type is not allowed, return empty list 80 if extension in self.ext or self.allow_all: 81 ## Load the data file 82 input_f = open(filename, 'r') 83 buff = input_f.read() 84 self.raw_data = buff.splitlines() 85 self.read_data() 86 return self.output 65 buff = self.f_open.read() 66 self.raw_data = buff.splitlines() 67 self.read_data() 87 68 88 69 def read_data(self): 70 correctly_loaded = True 71 error_message = "" 72 89 73 q_unit = "1/nm" 90 74 i_unit = "1/um^2" 91 self.current_dataset.title = self.raw_data[0] 92 self.current_dataset.meta_data["Keywords"] = self.raw_data[1] 93 line3 = self.raw_data[2].split() 94 line4 = self.raw_data[3].split() 95 line5 = self.raw_data[4].split() 96 self.data_points = int(line3[0]) 97 self.lower = 5 98 self.upper = self.lower + self.data_points 99 self.source.radiation = 'x-ray' 100 normal = float(line4[3]) 101 self.current_dataset.source.radiation = "x-ray" 102 self.current_dataset.source.name = "Anton Paar SAXSess Instrument" 103 self.current_dataset.source.wavelength = float(line4[4]) 104 xvals = [] 105 yvals = [] 106 dyvals = [] 107 for i in range(self.lower, self.upper): 108 index = i - self.lower 109 data = self.raw_data[i].split() 110 xvals.insert(index, normal * float(data[0])) 111 yvals.insert(index, normal * float(data[1])) 112 dyvals.insert(index, normal * float(data[2])) 75 try: 76 self.current_datainfo.title = self.raw_data[0] 77 self.current_datainfo.meta_data["Keywords"] = self.raw_data[1] 78 line3 = self.raw_data[2].split() 79 line4 = self.raw_data[3].split() 80 line5 = self.raw_data[4].split() 81 self.data_points = int(line3[0]) 82 self.lower = 5 83 self.upper = self.lower + self.data_points 84 self.source.radiation = 'x-ray' 85 normal = float(line4[3]) 86 self.current_datainfo.source.radiation = "x-ray" 87 self.current_datainfo.source.name = "Anton Paar SAXSess Instrument" 88 self.current_datainfo.source.wavelength = float(line4[4]) 89 xvals = [] 90 yvals = [] 91 dyvals = [] 92 for i in range(self.lower, self.upper): 93 index = i - self.lower 94 data = self.raw_data[i].split() 95 xvals.insert(index, normal * float(data[0])) 96 yvals.insert(index, normal * float(data[1])) 97 dyvals.insert(index, normal * float(data[2])) 98 except Exception as e: 99 error_message = "Couldn't load {}.\n".format(self.f_open.name) 100 error_message += e.message 101 raise FileContentsException(error_message) 113 102 self.current_dataset.x = np.append(self.current_dataset.x, xvals) 114 103 self.current_dataset.y = np.append(self.current_dataset.y, yvals) 115 104 self.current_dataset.dy = np.append(self.current_dataset.dy, dyvals) 116 105 if self.data_points != self.current_dataset.x.size: 117 self.errors.add("Not all data was loaded properly.") 118 if self.current_dataset.dx.size != self.current_dataset.x.size: 119 dxvals = np.zeros(self.current_dataset.x.size) 120 self.current_dataset.dx = dxvals 106 error_message += "Not all data points could be loaded.\n" 107 correctly_loaded = False 121 108 if self.current_dataset.x.size != self.current_dataset.y.size: 122 self.errors.add("The x and y data sets are not the same size.") 109 error_message += "The x and y data sets are not the same size.\n" 110 correctly_loaded = False 123 111 if self.current_dataset.y.size != self.current_dataset.dy.size: 124 self.errors.add("The y and dy datasets are not the same size.") 125 self.current_dataset.errors = self.errors 112 error_message += "The y and dy datasets are not the same size.\n" 113 correctly_loaded = False 114 126 115 self.current_dataset.xaxis("Q", q_unit) 127 116 self.current_dataset.yaxis("Intensity", i_unit) 128 117 xml_intermediate = self.raw_data[self.upper:] 129 118 xml = ''.join(xml_intermediate) 130 self.set_xml_string(xml) 131 dom = self.xmlroot.xpath('/fileinfo') 132 self._parse_child(dom) 133 self.output.append(self.current_dataset) 119 try: 120 self.set_xml_string(xml) 121 dom = self.xmlroot.xpath('/fileinfo') 122 self._parse_child(dom) 123 except Exception as e: 124 # Data loaded but XML metadata has an error 125 error_message += "Data points have been loaded but there was an " 126 error_message += "error reading XML metadata: " + e.message 127 correctly_loaded = False 128 self.send_to_output() 129 if not correctly_loaded: 130 raise DataReaderException(error_message) 134 131 135 132 def _parse_child(self, dom, parent=''): … … 146 143 self._parse_child(node, key) 147 144 if key == "SampleDetector": 148 self.current_data set.detector.append(self.detector)145 self.current_datainfo.detector.append(self.detector) 149 146 self.detector = Detector() 150 147 else: 151 148 if key == "value": 152 149 if parent == "Wavelength": 153 self.current_data set.source.wavelength = value150 self.current_datainfo.source.wavelength = value 154 151 elif parent == "SampleDetector": 155 152 self.detector.distance = value 156 153 elif parent == "Temperature": 157 self.current_data set.sample.temperature = value154 self.current_datainfo.sample.temperature = value 158 155 elif parent == "CounterSlitLength": 159 156 self.detector.slit_length = value … … 161 158 value = value.replace("_", "") 162 159 if parent == "Wavelength": 163 self.current_data set.source.wavelength_unit = value160 self.current_datainfo.source.wavelength_unit = value 164 161 elif parent == "SampleDetector": 165 162 self.detector.distance_unit = value … … 169 166 self.current_dataset.yaxis(self.current_dataset._yaxis, value) 170 167 elif parent == "Temperature": 171 self.current_data set.sample.temperature_unit = value168 self.current_datainfo.sample.temperature_unit = value 172 169 elif parent == "CounterSlitLength": 173 170 self.detector.slit_length_unit = value -
src/sas/sascalc/dataloader/readers/ascii_reader.py
r235f514 rf994e8b1 1 1 """ 2 ASCIIreader2 Generic multi-column ASCII data reader 3 3 """ 4 4 ############################################################################ 5 # This software was developed by the University of Tennessee as part of the6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 # project funded by the US National Science Foundation.8 # If you use DANSE applications to do scientific research that leads to9 # publication, we ask that you acknowledge the use of the software with the10 # following sentence:11 # This work benefited from DANSE software developed under NSF award DMR-0520547.12 # copyright 2008, University of Tennessee5 # This software was developed by the University of Tennessee as part of the 6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 # project funded by the US National Science Foundation. 8 # If you use DANSE applications to do scientific research that leads to 9 # publication, we ask that you acknowledge the use of the software with the 10 # following sentence: 11 # This work benefited from DANSE software developed under NSF award DMR-0520547. 12 # copyright 2008, University of Tennessee 13 13 ############################################################################# 14 14 15 import logging 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D 18 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 19 DefaultReaderException 15 20 16 import numpy as np 17 import os 18 from sas.sascalc.dataloader.data_info import Data1D 19 20 # Check whether we have a converter available 21 has_converter = True 22 try: 23 from sas.sascalc.data_util.nxsunit import Converter 24 except: 25 has_converter = False 26 _ZERO = 1e-16 21 logger = logging.getLogger(__name__) 27 22 28 23 29 class Reader :24 class Reader(FileReader): 30 25 """ 31 26 Class to load ascii files (2, 3 or 4 columns). 32 27 """ 33 # #File type28 # File type 34 29 type_name = "ASCII" 35 36 ## Wildcards 30 # Wildcards 37 31 type = ["ASCII files (*.txt)|*.txt", 38 32 "ASCII files (*.dat)|*.dat", 39 33 "ASCII files (*.abs)|*.abs", 40 34 "CSV files (*.csv)|*.csv"] 41 ## List of allowed extensions 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 35 # List of allowed extensions 36 ext = ['.txt', '.dat', '.abs', '.csv'] 37 # Flag to bypass extension check 38 allow_all = True 39 # data unless that is the only data 40 min_data_pts = 5 43 41 44 ## Flag to bypass extension check 45 allow_all = True 42 def get_file_contents(self): 43 """ 44 Get the contents of the file 45 """ 46 46 47 def read(self, path): 48 """ 49 Load data file 47 buff = self.f_open.read() 48 filepath = self.f_open.name 49 lines = buff.splitlines() 50 self.output = [] 51 self.current_datainfo = DataInfo() 52 self.current_datainfo.filename = filepath 53 self.reset_data_list(len(lines)) 50 54 51 :param path: file path 52 :return: Data1D object, or None 55 # The first good line of data will define whether 56 # we have 2-column or 3-column ascii 57 has_error_dx = None 58 has_error_dy = None 53 59 54 :raise RuntimeError: when the file can't be opened 55 :raise ValueError: when the length of the data vectors are inconsistent 56 """ 57 if os.path.isfile(path): 58 basename = os.path.basename(path) 59 _, extension = os.path.splitext(basename) 60 if self.allow_all or extension.lower() in self.ext: 61 try: 62 # Read in binary mode since GRASP frequently has no-ascii 63 # characters that breaks the open operation 64 input_f = open(path,'rb') 65 except: 66 raise RuntimeError, "ascii_reader: cannot open %s" % path 67 buff = input_f.read() 68 lines = buff.splitlines() 60 # Initialize counters for data lines and header lines. 61 is_data = False 62 # More than "5" lines of data is considered as actual 63 # To count # of current data candidate lines 64 candidate_lines = 0 65 # To count total # of previous data candidate lines 66 candidate_lines_previous = 0 67 # Current line number 68 line_no = 0 69 # minimum required number of columns of data 70 lentoks = 2 71 for line in lines: 72 toks = self.splitline(line.strip()) 73 # To remember the number of columns in the current line of data 74 new_lentoks = len(toks) 75 try: 76 if new_lentoks == 0: 77 # If the line is blank, skip and continue on 78 # In case of breaks within data sets. 79 continue 80 elif new_lentoks != lentoks and is_data: 81 # If a footer is found, break the loop and save the data 82 break 83 elif new_lentoks != lentoks and not is_data: 84 # If header lines are numerical 85 candidate_lines = 0 86 self.reset_data_list(len(lines) - line_no) 69 87 70 # Arrays for data storage 71 tx = np.zeros(0) 72 ty = np.zeros(0) 73 tdy = np.zeros(0) 74 tdx = np.zeros(0) 88 self.current_dataset.x[candidate_lines] = float(toks[0]) 75 89 76 # The first good line of data will define whether 77 # we have 2-column or 3-column ascii 90 if new_lentoks > 1: 91 self.current_dataset.y[candidate_lines] = float(toks[1]) 92 93 # If a 3rd row is present, consider it dy 94 if new_lentoks > 2: 95 self.current_dataset.dy[candidate_lines] = \ 96 float(toks[2]) 97 has_error_dy = True 98 99 # If a 4th row is present, consider it dx 100 if new_lentoks > 3: 101 self.current_dataset.dx[candidate_lines] = \ 102 float(toks[3]) 103 has_error_dx = True 104 105 candidate_lines += 1 106 # If 5 or more lines, this is considering the set data 107 if candidate_lines >= self.min_data_pts: 108 is_data = True 109 110 if is_data and new_lentoks >= 8: 111 msg = "This data looks like 2D ASCII data. Use the file " 112 msg += "converter tool to convert it to NXcanSAS." 113 raise FileContentsException(msg) 114 115 # To remember the # of columns on the current line 116 # for the next line of data 117 lentoks = new_lentoks 118 line_no += 1 119 except ValueError: 120 # ValueError is raised when non numeric strings conv. to float 121 # It is data and meet non - number, then stop reading 122 if is_data: 123 break 124 # Delete the previously stored lines of data candidates if 125 # the list is not data 126 self.reset_data_list(len(lines) - line_no) 127 lentoks = 2 78 128 has_error_dx = None 79 129 has_error_dy = None 130 # Reset # of lines of data candidates 131 candidate_lines = 0 132 133 if not is_data: 134 self.set_all_to_none() 135 if self.extension in self.ext: 136 msg = "ASCII Reader error: Fewer than five Q data points found " 137 msg += "in {}.".format(filepath) 138 raise FileContentsException(msg) 139 else: 140 msg = "ASCII Reader could not load the file {}".format(filepath) 141 raise DefaultReaderException(msg) 142 # Sanity check 143 if has_error_dy and not len(self.current_dataset.y) == \ 144 len(self.current_dataset.dy): 145 msg = "ASCII Reader error: Number of I and dI data points are" 146 msg += " different in {}.".format(filepath) 147 # TODO: Add error to self.current_datainfo.errors instead? 148 self.set_all_to_none() 149 raise FileContentsException(msg) 150 if has_error_dx and not len(self.current_dataset.x) == \ 151 len(self.current_dataset.dx): 152 msg = "ASCII Reader error: Number of Q and dQ data points are" 153 msg += " different in {}.".format(filepath) 154 # TODO: Add error to self.current_datainfo.errors instead? 155 self.set_all_to_none() 156 raise FileContentsException(msg) 80 157 81 #Initialize counters for data lines and header lines. 82 is_data = False 83 # More than "5" lines of data is considered as actual 84 # data unless that is the only data 85 min_data_pts = 5 86 # To count # of current data candidate lines 87 candidate_lines = 0 88 # To count total # of previous data candidate lines 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 91 lentoks = 2 92 for line in lines: 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 96 try: 97 if new_lentoks == 1 and not is_data: 98 ## If only one item in list, no longer data 99 raise ValueError 100 elif new_lentoks == 0: 101 ## If the line is blank, skip and continue on 102 ## In case of breaks within data sets. 103 continue 104 elif new_lentoks != lentoks and is_data: 105 ## If a footer is found, break the loop and save the data 106 break 107 elif new_lentoks != lentoks and not is_data: 108 ## If header lines are numerical 109 candidate_lines = 0 110 candidate_lines_previous = 0 158 self.remove_empty_q_values(has_error_dx, has_error_dy) 159 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 160 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 111 161 112 #Make sure that all columns are numbers. 113 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError 115 float(toks[colnum]) 116 117 candidate_lines += 1 118 _x = float(toks[0]) 119 _y = float(toks[1]) 120 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 125 is_data = True 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy is None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx is None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 140 is_data == False: 141 try: 142 tx = np.zeros(0) 143 ty = np.zeros(0) 144 tdy = np.zeros(0) 145 tdx = np.zeros(0) 146 except: 147 pass 148 149 if has_error_dy == True: 150 tdy = np.append(tdy, _dy) 151 if has_error_dx == True: 152 tdx = np.append(tdx, _dx) 153 tx = np.append(tx, _x) 154 ty = np.append(ty, _y) 155 156 #To remember the # of columns on the current line 157 # for the next line of data 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 161 # It is data and meet non - number, then stop reading 162 if is_data == True: 163 break 164 lentoks = 2 165 has_error_dx = None 166 has_error_dy = None 167 #Reset # of lines of data candidates 168 candidate_lines = 0 169 except: 170 pass 171 172 input_f.close() 173 if not is_data: 174 msg = "ascii_reader: x has no data" 175 raise RuntimeError, msg 176 # Sanity check 177 if has_error_dy == True and not len(ty) == len(tdy): 178 msg = "ascii_reader: y and dy have different length" 179 raise RuntimeError, msg 180 if has_error_dx == True and not len(tx) == len(tdx): 181 msg = "ascii_reader: y and dy have different length" 182 raise RuntimeError, msg 183 # If the data length is zero, consider this as 184 # though we were not able to read the file. 185 if len(tx) == 0: 186 raise RuntimeError, "ascii_reader: could not load file" 187 188 #Let's re-order the data to make cal. 189 # curve look better some cases 190 ind = np.lexsort((ty, tx)) 191 x = np.zeros(len(tx)) 192 y = np.zeros(len(ty)) 193 dy = np.zeros(len(tdy)) 194 dx = np.zeros(len(tdx)) 195 output = Data1D(x, y, dy=dy, dx=dx) 196 self.filename = output.filename = basename 197 198 for i in ind: 199 x[i] = tx[ind[i]] 200 y[i] = ty[ind[i]] 201 if has_error_dy == True: 202 dy[i] = tdy[ind[i]] 203 if has_error_dx == True: 204 dx[i] = tdx[ind[i]] 205 # Zeros in dx, dy 206 if has_error_dx: 207 dx[dx == 0] = _ZERO 208 if has_error_dy: 209 dy[dy == 0] = _ZERO 210 #Data 211 output.x = x[x != 0] 212 output.y = y[x != 0] 213 output.dy = dy[x != 0] if has_error_dy == True\ 214 else np.zeros(len(output.y)) 215 output.dx = dx[x != 0] if has_error_dx == True\ 216 else np.zeros(len(output.x)) 217 218 output.xaxis("\\rm{Q}", 'A^{-1}') 219 output.yaxis("\\rm{Intensity}", "cm^{-1}") 220 221 # Store loading process information 222 output.meta_data['loader'] = self.type_name 223 if len(output.x) < 1: 224 raise RuntimeError, "%s is empty" % path 225 return output 226 227 else: 228 raise RuntimeError, "%s is not a file" % path 229 return None 230 231 def splitline(self, line): 232 """ 233 Splits a line into pieces based on common delimeters 234 :param line: A single line of text 235 :return: list of values 236 """ 237 # Initial try for CSV (split on ,) 238 toks = line.split(',') 239 # Now try SCSV (split on ;) 240 if len(toks) < 2: 241 toks = line.split(';') 242 # Now go for whitespace 243 if len(toks) < 2: 244 toks = line.split() 245 return toks 162 # Store loading process information 163 self.current_datainfo.meta_data['loader'] = self.type_name 164 self.send_to_output() -
src/sas/sascalc/dataloader/readers/associations.py
ra1b8fee rce8c7bd 14 14 #copyright 2009, University of Tennessee 15 15 ############################################################################# 16 from __future__ import print_function17 18 import os19 16 import sys 20 17 import logging 21 import json22 18 23 19 logger = logging.getLogger(__name__) 24 20 25 FILE_NAME = 'defaults.json' 21 FILE_ASSOCIATIONS = { 22 ".xml": "cansas_reader", 23 ".ses": "sesans_reader", 24 ".h5": "cansas_reader_HDF5", 25 ".txt": "ascii_reader", 26 ".dat": "red2d_reader", 27 ".abs": "abs_reader", 28 ".sans": "danse_reader", 29 ".pdh": "anton_paar_saxs_reader" 30 } 26 31 27 def read_associations(loader, settings=FILE_NAME): 32 33 def read_associations(loader, settings=FILE_ASSOCIATIONS): 28 34 """ 29 35 Read the specified settings file to associate 30 36 default readers to file extension. 31 37 32 38 :param loader: Loader object 33 39 :param settings: path to the json settings file [string] 34 40 """ 35 reader_dir = os.path.dirname(__file__) 36 path = os.path.join(reader_dir, settings) 37 38 # If we can't find the file in the installation 39 # directory, look into the execution directory. 40 if not os.path.isfile(path): 41 path = os.path.join(os.getcwd(), settings) 42 if not os.path.isfile(path): 43 path = os.path.join(sys.path[0], settings) 44 if not os.path.isfile(path): 45 path = settings 46 if not os.path.isfile(path): 47 path = "./%s" % settings 48 if os.path.isfile(path): 49 with open(path) as fh: 50 json_tree = json.load(fh) 51 52 # Read in the file extension associations 53 entry_list = json_tree['SasLoader']['FileType'] 54 55 # For each FileType entry, get the associated reader and extension 56 for entry in entry_list: 57 reader = entry['-reader'] 58 ext = entry['-extension'] 59 60 if reader is not None and ext is not None: 61 # Associate the extension with a particular reader 62 # TODO: Modify the Register code to be case-insensitive 63 # and remove the extra line below. 64 try: 65 exec "import %s" % reader 66 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 67 reader) 68 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 69 reader) 70 except: 71 msg = "read_associations: skipping association" 72 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 73 logger.error(msg) 74 else: 75 print("Could not find reader association settings\n %s [%s]" % (__file__, os.getcwd())) 76 77 78 def register_readers(registry_function): 79 """ 80 Function called by the registry/loader object to register 81 all default readers using a call back function. 82 83 :WARNING: this method is now obsolete 84 85 :param registry_function: function to be called to register each reader 86 """ 87 logger.info("register_readers is now obsolete: use read_associations()") 88 import abs_reader 89 import ascii_reader 90 import cansas_reader 91 import danse_reader 92 import hfir1d_reader 93 import IgorReader 94 import red2d_reader 95 #import tiff_reader 96 import nexus_reader 97 import sesans_reader 98 import cansas_reader_HDF5 99 import anton_paar_saxs_reader 100 registry_function(sesans_reader) 101 registry_function(abs_reader) 102 registry_function(ascii_reader) 103 registry_function(cansas_reader) 104 registry_function(danse_reader) 105 registry_function(hfir1d_reader) 106 registry_function(IgorReader) 107 registry_function(red2d_reader) 108 #registry_function(tiff_reader) 109 registry_function(nexus_reader) 110 registry_function(cansas_reader_HDF5) 111 registry_function(anton_paar_saxs_reader) 112 return True 41 # For each FileType entry, get the associated reader and extension 42 for ext, reader in settings.iteritems(): 43 if reader is not None and ext is not None: 44 # Associate the extension with a particular reader 45 # TODO: Modify the Register code to be case-insensitive 46 # FIXME: Remove exec statements 47 # and remove the extra line below. 48 try: 49 exec "import %s" % reader 50 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 51 reader) 52 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 53 reader) 54 except: 55 msg = "read_associations: skipping association" 56 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 57 logger.error(msg) -
src/sas/sascalc/dataloader/readers/cansas_reader.py
r6a455cd3 rcd57c7d4 1 """2 CanSAS data reader - new recursive cansas_version.3 """4 ############################################################################5 #This software was developed by the University of Tennessee as part of the6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 #project funded by the US National Science Foundation.8 #If you use DANSE applications to do scientific research that leads to9 #publication, we ask that you acknowledge the use of the software with the10 #following sentence:11 #This work benefited from DANSE software developed under NSF award DMR-0520547.12 #copyright 2008,2009 University of Tennessee13 #############################################################################14 15 1 import logging 16 2 import numpy as np … … 29 15 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 30 16 from sas.sascalc.dataloader.readers.cansas_constants import CansasConstants, CurrentLevel 17 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, \ 18 DefaultReaderException, DataReaderException 31 19 32 20 # The following 2 imports *ARE* used. Do not remove either. 33 21 import xml.dom.minidom 34 22 from xml.dom.minidom import parseString 23 24 from lxml import etree 35 25 36 26 logger = logging.getLogger(__name__) … … 55 45 56 46 class Reader(XMLreader): 57 """58 Class to load cansas 1D XML files59 60 :Dependencies:61 The CanSAS reader requires PyXML 0.8.4 or later.62 """63 # CanSAS version - defaults to version 1.064 47 cansas_version = "1.0" 65 48 base_ns = "{cansas1d/1.0}" … … 75 58 ns_list = None 76 59 # Temporary storage location for loading multiple data sets in a single file 77 current_datainfo = None78 current_dataset = None79 60 current_data1d = None 80 61 data = None 81 # List of data1D objects to be sent back to SasView82 output = None83 62 # Wildcards 84 63 type = ["XML files (*.xml)|*.xml", "SasView Save Files (*.svs)|*.svs"] … … 110 89 111 90 def read(self, xml_file, schema_path="", invalid=True): 112 """ 113 Validate and read in an xml_file file in the canSAS format. 114 115 :param xml_file: A canSAS file path in proper XML format 116 :param schema_path: A file path to an XML schema to validate the xml_file against 117 """ 118 # For every file loaded, reset everything to a base state 91 if schema_path != "" or invalid != True: 92 # read has been called from self.get_file_contents because xml file doens't conform to schema 93 _, self.extension = os.path.splitext(os.path.basename(xml_file)) 94 return self.get_file_contents(xml_file=xml_file, schema_path=schema_path, invalid=invalid) 95 96 # Otherwise, read has been called by the data loader - file_reader_base_class handles this 97 return super(XMLreader, self).read(xml_file) 98 99 def get_file_contents(self, xml_file=None, schema_path="", invalid=True): 100 # Reset everything since we're loading a new file 119 101 self.reset_state() 120 102 self.invalid = invalid 121 # Check that the file exists 122 if os.path.isfile(xml_file): 123 basename, extension = os.path.splitext(os.path.basename(xml_file)) 124 # If the file type is not allowed, return nothing 125 if extension in self.ext or self.allow_all: 126 # Get the file location of 127 self.load_file_and_schema(xml_file, schema_path) 128 self.add_data_set() 129 # Try to load the file, but raise an error if unable to. 130 # Check the file matches the XML schema 103 if xml_file is None: 104 xml_file = self.f_open.name 105 # We don't sure f_open since lxml handles opnening/closing files 106 if not self.f_open.closed: 107 self.f_open.close() 108 109 basename, _ = os.path.splitext(os.path.basename(xml_file)) 110 111 try: 112 # Raises FileContentsException 113 self.load_file_and_schema(xml_file, schema_path) 114 self.current_datainfo = DataInfo() 115 # Raises FileContentsException if file doesn't meet CanSAS schema 116 self.is_cansas(self.extension) 117 self.invalid = False # If we reach this point then file must be valid CanSAS 118 119 # Parse each SASentry 120 entry_list = self.xmlroot.xpath('/ns:SASroot/ns:SASentry', namespaces={ 121 'ns': self.cansas_defaults.get("ns") 122 }) 123 # Look for a SASentry 124 self.names.append("SASentry") 125 self.set_processing_instructions() 126 127 for entry in entry_list: 128 self.current_datainfo.filename = basename + self.extension 129 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 130 self.current_datainfo.meta_data[PREPROCESS] = self.processing_instructions 131 self._parse_entry(entry) 132 has_error_dx = self.current_dataset.dx is not None 133 has_error_dy = self.current_dataset.dy is not None 134 self.remove_empty_q_values(has_error_dx=has_error_dx, 135 has_error_dy=has_error_dy) 136 self.send_to_output() # Combine datasets with DataInfo 137 self.current_datainfo = DataInfo() # Reset DataInfo 138 except FileContentsException as fc_exc: 139 # File doesn't meet schema - try loading with a less strict schema 140 base_name = xml_reader.__file__ 141 base_name = base_name.replace("\\", "/") 142 base = base_name.split("/sas/")[0] 143 if self.cansas_version == "1.1": 144 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 145 else: 146 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 147 self.set_schema(invalid_schema) 148 if self.invalid: 131 149 try: 132 self.is_cansas(extension) 133 self.invalid = False 134 # Get each SASentry from XML file and add it to a list. 135 entry_list = self.xmlroot.xpath( 136 '/ns:SASroot/ns:SASentry', 137 namespaces={'ns': self.cansas_defaults.get("ns")}) 138 self.names.append("SASentry") 139 140 # Get all preprocessing events and encoding 141 self.set_processing_instructions() 142 143 # Parse each <SASentry> item 144 for entry in entry_list: 145 # Create a new DataInfo object for every <SASentry> 146 147 # Set the file name and then parse the entry. 148 self.current_datainfo.filename = basename + extension 149 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 150 self.current_datainfo.meta_data[PREPROCESS] = \ 151 self.processing_instructions 152 153 # Parse the XML SASentry 154 self._parse_entry(entry) 155 # Combine datasets with datainfo 156 self.add_data_set() 157 except RuntimeError: 158 # If the file does not match the schema, raise this error 150 # Load data with less strict schema 151 self.read(xml_file, invalid_schema, False) 152 153 # File can still be read but doesn't match schema, so raise exception 154 self.load_file_and_schema(xml_file) # Reload strict schema so we can find where error are in file 159 155 invalid_xml = self.find_invalid_xml() 160 156 if invalid_xml != "": 161 invalid_xml = INVALID_XML.format(basename + extension) + invalid_xml 162 self.errors.add(invalid_xml) 163 # Try again with an invalid CanSAS schema, that requires only a data set in each 164 base_name = xml_reader.__file__ 165 base_name = base_name.replace("\\", "/") 166 base = base_name.split("/sas/")[0] 167 if self.cansas_version == "1.1": 168 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 169 else: 170 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 171 self.set_schema(invalid_schema) 172 try: 173 if self.invalid: 174 if self.is_cansas(): 175 self.output = self.read(xml_file, invalid_schema, False) 176 else: 177 raise RuntimeError 178 else: 179 raise RuntimeError 180 except RuntimeError: 181 x = np.zeros(1) 182 y = np.zeros(1) 183 self.current_data1d = Data1D(x,y) 184 self.current_data1d.errors = self.errors 185 return [self.current_data1d] 186 else: 187 self.output.append("Not a valid file path.") 188 # Return a list of parsed entries that dataloader can manage 189 return self.output 157 invalid_xml = INVALID_XML.format(basename + self.extension) + invalid_xml 158 raise DataReaderException(invalid_xml) # Handled by base class 159 except FileContentsException as fc_exc: 160 msg = "CanSAS Reader could not load the file {}".format(xml_file) 161 if fc_exc.message is not None: # Propagate error messages from earlier 162 msg = fc_exc.message 163 if not self.extension in self.ext: # If the file has no associated loader 164 raise DefaultReaderException(msg) 165 raise FileContentsException(msg) 166 pass 167 else: 168 raise fc_exc 169 except Exception as e: # Convert all other exceptions to FileContentsExceptions 170 raise FileContentsException(e.message) 171 172 173 def load_file_and_schema(self, xml_file, schema_path=""): 174 base_name = xml_reader.__file__ 175 base_name = base_name.replace("\\", "/") 176 base = base_name.split("/sas/")[0] 177 178 # Try and parse the XML file 179 try: 180 self.set_xml_file(xml_file) 181 except etree.XMLSyntaxError: # File isn't valid XML so can't be loaded 182 msg = "SasView cannot load {}.\nInvalid XML syntax".format(xml_file) 183 raise FileContentsException(msg) 184 185 self.cansas_version = self.xmlroot.get("version", "1.0") 186 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 187 188 if schema_path == "": 189 schema_path = "{}/sas/sascalc/dataloader/readers/schema/{}".format( 190 base, self.cansas_defaults.get("schema").replace("\\", "/") 191 ) 192 self.set_schema(schema_path) 193 194 def is_cansas(self, ext="xml"): 195 """ 196 Checks to see if the XML file is a CanSAS file 197 198 :param ext: The file extension of the data file 199 :raises FileContentsException: Raised if XML file isn't valid CanSAS 200 """ 201 if self.validate_xml(): # Check file is valid XML 202 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 203 value = self.xmlroot.get(name) 204 # Check schema CanSAS version matches file CanSAS version 205 if CANSAS_NS.get(self.cansas_version).get("ns") == value.rsplit(" ")[0]: 206 return True 207 if ext == "svs": 208 return True # Why is this required? 209 # If we get to this point then file isn't valid CanSAS 210 logger.warning("File doesn't meet CanSAS schema. Trying to load anyway.") 211 raise FileContentsException("The file is not valid CanSAS") 190 212 191 213 def _parse_entry(self, dom, recurse=False): 192 """193 Parse a SASEntry - new recursive method for parsing the dom of194 the CanSAS data format. This will allow multiple data files195 and extra nodes to be read in simultaneously.196 197 :param dom: dom object with a namespace base of names198 """199 200 214 if not self._is_call_local() and not recurse: 201 215 self.reset_state() 202 self.add_data_set() 216 self.data = [] 217 self.current_datainfo = DataInfo() 203 218 self.names.append("SASentry") 204 219 self.parent_class = "SASentry" 205 self._check_for_empty_data() 206 self.base_ns = "{0}{1}{2}".format("{", \ 207 CANSAS_NS.get(self.cansas_version).get("ns"), "}") 208 209 # Go through each child in the parent element 220 # Create an empty dataset if no data has been passed to the reader 221 if self.current_dataset is None: 222 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), 223 np.empty(0), np.empty(0)) 224 self.base_ns = "{" + CANSAS_NS.get(self.cansas_version).get("ns") + "}" 225 226 # Loop through each child in the parent element 210 227 for node in dom: 211 228 attr = node.attrib … … 218 235 if tagname == "fitting_plug_in" or tagname == "pr_inversion" or tagname == "invariant": 219 236 continue 220 221 237 # Get where to store content 222 238 self.names.append(tagname_original) … … 234 250 else: 235 251 self.current_dataset.shape = () 236 # Recurs ion stepto access data within the group237 self._parse_entry(node, True)252 # Recurse to access data within the group 253 self._parse_entry(node, recurse=True) 238 254 if tagname == "SASsample": 239 255 self.current_datainfo.sample.name = name … … 245 261 self.aperture.name = name 246 262 self.aperture.type = type 247 self. add_intermediate()263 self._add_intermediate() 248 264 else: 249 265 if isinstance(self.current_dataset, plottable_2D): … … 262 278 self.current_datainfo.notes.append(data_point) 263 279 264 # I and Q - 1D data280 # I and Q points 265 281 elif tagname == 'I' and isinstance(self.current_dataset, plottable_1D): 266 282 self.current_dataset.yaxis("Intensity", unit) … … 274 290 self.current_dataset.dx = np.append(self.current_dataset.dx, data_point) 275 291 elif tagname == 'dQw': 292 if self.current_dataset.dxw is None: 293 self.current_dataset.dxw = np.empty(0) 276 294 self.current_dataset.dxw = np.append(self.current_dataset.dxw, data_point) 277 295 elif tagname == 'dQl': 296 if self.current_dataset.dxl is None: 297 self.current_dataset.dxl = np.empty(0) 278 298 self.current_dataset.dxl = np.append(self.current_dataset.dxl, data_point) 279 299 elif tagname == 'Qmean': … … 351 371 elif tagname == 'name' and self.parent_class == 'SASinstrument': 352 372 self.current_datainfo.instrument = data_point 373 353 374 # Detector Information 354 375 elif tagname == 'name' and self.parent_class == 'SASdetector': … … 396 417 self.detector.orientation.z = data_point 397 418 self.detector.orientation_unit = unit 419 398 420 # Collimation and Aperture 399 421 elif tagname == 'length' and self.parent_class == 'SAScollimation': … … 429 451 elif tagname == 'term' and self.parent_class == 'SASprocess': 430 452 unit = attr.get("unit", "") 431 dic = {} 432 dic["name"] = name 433 dic["value"] = data_point 434 dic["unit"] = unit 453 dic = { "name": name, "value": data_point, "unit": unit } 435 454 self.process.term.append(dic) 436 455 … … 485 504 if not self._is_call_local() and not recurse: 486 505 self.frm = "" 487 self.add_data_set() 506 self.current_datainfo.errors = set() 507 for error in self.errors: 508 self.current_datainfo.errors.add(error) 509 self.errors.clear() 510 self.send_to_output() 488 511 empty = None 489 512 return self.output[0], empty 490 513 491 492 514 def _is_call_local(self): 493 """494 495 """496 515 if self.frm == "": 497 516 inter = inspect.stack() … … 505 524 return True 506 525 507 def is_cansas(self, ext="xml"): 508 """ 509 Checks to see if the xml file is a CanSAS file 510 511 :param ext: The file extension of the data file 512 """ 513 if self.validate_xml(): 514 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 515 value = self.xmlroot.get(name) 516 if CANSAS_NS.get(self.cansas_version).get("ns") == \ 517 value.rsplit(" ")[0]: 518 return True 519 if ext == "svs": 520 return True 521 raise RuntimeError 522 523 def load_file_and_schema(self, xml_file, schema_path=""): 524 """ 525 Loads the file and associates a schema, if a schema is passed in or if one already exists 526 527 :param xml_file: The xml file path sent to Reader.read 528 :param schema_path: The path to a schema associated with the xml_file, or find one based on the file 529 """ 530 base_name = xml_reader.__file__ 531 base_name = base_name.replace("\\", "/") 532 base = base_name.split("/sas/")[0] 533 534 # Load in xml file and get the cansas version from the header 535 self.set_xml_file(xml_file) 536 self.cansas_version = self.xmlroot.get("version", "1.0") 537 538 # Generic values for the cansas file based on the version 539 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 540 if schema_path == "": 541 schema_path = "{0}/sas/sascalc/dataloader/readers/schema/{1}".format \ 542 (base, self.cansas_defaults.get("schema")).replace("\\", "/") 543 544 # Link a schema to the XML file. 545 self.set_schema(schema_path) 546 547 def add_data_set(self): 548 """ 549 Adds the current_dataset to the list of outputs after preforming final processing on the data and then calls a 550 private method to generate a new data set. 551 552 :param key: NeXus group name for current tree level 553 """ 554 555 if self.current_datainfo and self.current_dataset: 556 self._final_cleanup() 557 self.data = [] 558 self.current_datainfo = DataInfo() 559 560 def _initialize_new_data_set(self, node=None): 561 """ 562 A private class method to generate a new 1D data object. 563 Outside methods should call add_data_set() to be sure any existing data is stored properly. 564 565 :param node: XML node to determine if 1D or 2D data 566 """ 567 x = np.array(0) 568 y = np.array(0) 569 for child in node: 570 if child.tag.replace(self.base_ns, "") == "Idata": 571 for i_child in child: 572 if i_child.tag.replace(self.base_ns, "") == "Qx": 573 self.current_dataset = plottable_2D() 574 return 575 self.current_dataset = plottable_1D(x, y) 576 577 def add_intermediate(self): 526 def _add_intermediate(self): 578 527 """ 579 528 This method stores any intermediate objects within the final data set after fully reading the set. 580 581 :param parent: The NXclass name for the h5py Group object that just finished being processed 582 """ 583 529 """ 584 530 if self.parent_class == 'SASprocess': 585 531 self.current_datainfo.process.append(self.process) … … 600 546 self._check_for_empty_resolution() 601 547 self.data.append(self.current_dataset) 602 603 def _final_cleanup(self):604 """605 Final cleanup of the Data1D object to be sure it has all the606 appropriate information needed for perspectives607 """608 609 # Append errors to dataset and reset class errors610 self.current_datainfo.errors = set()611 for error in self.errors:612 self.current_datainfo.errors.add(error)613 self.errors.clear()614 615 # Combine all plottables with datainfo and append each to output616 # Type cast data arrays to float64 and find min/max as appropriate617 for dataset in self.data:618 if isinstance(dataset, plottable_1D):619 if dataset.x is not None:620 dataset.x = np.delete(dataset.x, [0])621 dataset.x = dataset.x.astype(np.float64)622 dataset.xmin = np.min(dataset.x)623 dataset.xmax = np.max(dataset.x)624 if dataset.y is not None:625 dataset.y = np.delete(dataset.y, [0])626 dataset.y = dataset.y.astype(np.float64)627 dataset.ymin = np.min(dataset.y)628 dataset.ymax = np.max(dataset.y)629 if dataset.dx is not None:630 dataset.dx = np.delete(dataset.dx, [0])631 dataset.dx = dataset.dx.astype(np.float64)632 if dataset.dxl is not None:633 dataset.dxl = np.delete(dataset.dxl, [0])634 dataset.dxl = dataset.dxl.astype(np.float64)635 if dataset.dxw is not None:636 dataset.dxw = np.delete(dataset.dxw, [0])637 dataset.dxw = dataset.dxw.astype(np.float64)638 if dataset.dy is not None:639 dataset.dy = np.delete(dataset.dy, [0])640 dataset.dy = dataset.dy.astype(np.float64)641 np.trim_zeros(dataset.x)642 np.trim_zeros(dataset.y)643 np.trim_zeros(dataset.dy)644 elif isinstance(dataset, plottable_2D):645 dataset.data = dataset.data.astype(np.float64)646 dataset.qx_data = dataset.qx_data.astype(np.float64)647 dataset.xmin = np.min(dataset.qx_data)648 dataset.xmax = np.max(dataset.qx_data)649 dataset.qy_data = dataset.qy_data.astype(np.float64)650 dataset.ymin = np.min(dataset.qy_data)651 dataset.ymax = np.max(dataset.qy_data)652 dataset.q_data = np.sqrt(dataset.qx_data * dataset.qx_data653 + dataset.qy_data * dataset.qy_data)654 if dataset.err_data is not None:655 dataset.err_data = dataset.err_data.astype(np.float64)656 if dataset.dqx_data is not None:657 dataset.dqx_data = dataset.dqx_data.astype(np.float64)658 if dataset.dqy_data is not None:659 dataset.dqy_data = dataset.dqy_data.astype(np.float64)660 if dataset.mask is not None:661 dataset.mask = dataset.mask.astype(dtype=bool)662 663 if len(dataset.shape) == 2:664 n_rows, n_cols = dataset.shape665 dataset.y_bins = dataset.qy_data[0::int(n_cols)]666 dataset.x_bins = dataset.qx_data[:int(n_cols)]667 dataset.data = dataset.data.flatten()668 else:669 dataset.y_bins = []670 dataset.x_bins = []671 dataset.data = dataset.data.flatten()672 673 final_dataset = combine_data(dataset, self.current_datainfo)674 self.output.append(final_dataset)675 676 def _create_unique_key(self, dictionary, name, numb=0):677 """678 Create a unique key value for any dictionary to prevent overwriting679 Recurse until a unique key value is found.680 681 :param dictionary: A dictionary with any number of entries682 :param name: The index of the item to be added to dictionary683 :param numb: The number to be appended to the name, starts at 0684 """685 if dictionary.get(name) is not None:686 numb += 1687 name = name.split("_")[0]688 name += "_{0}".format(numb)689 name = self._create_unique_key(dictionary, name, numb)690 return name691 548 692 549 def _get_node_value(self, node, tagname): … … 799 656 return node_value, value_unit 800 657 801 def _check_for_empty_data(self):802 """803 Creates an empty data set if no data is passed to the reader804 805 :param data1d: presumably a Data1D object806 """807 if self.current_dataset is None:808 x_vals = np.empty(0)809 y_vals = np.empty(0)810 dx_vals = np.empty(0)811 dy_vals = np.empty(0)812 dxl = np.empty(0)813 dxw = np.empty(0)814 self.current_dataset = plottable_1D(x_vals, y_vals, dx_vals, dy_vals)815 self.current_dataset.dxl = dxl816 self.current_dataset.dxw = dxw817 818 658 def _check_for_empty_resolution(self): 819 659 """ 820 A method to check all resolution data sets are the same size as I and Q 821 """ 822 if isinstance(self.current_dataset, plottable_1D): 823 dql_exists = False 824 dqw_exists = False 825 dq_exists = False 826 di_exists = False 827 if self.current_dataset.dxl is not None: 828 dql_exists = True 829 if self.current_dataset.dxw is not None: 830 dqw_exists = True 831 if self.current_dataset.dx is not None: 832 dq_exists = True 833 if self.current_dataset.dy is not None: 834 di_exists = True 835 if dqw_exists and not dql_exists: 836 array_size = self.current_dataset.dxw.size - 1 837 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 838 np.zeros([array_size])) 839 elif dql_exists and not dqw_exists: 840 array_size = self.current_dataset.dxl.size - 1 841 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 842 np.zeros([array_size])) 843 elif not dql_exists and not dqw_exists and not dq_exists: 844 array_size = self.current_dataset.x.size - 1 845 self.current_dataset.dx = np.append(self.current_dataset.dx, 846 np.zeros([array_size])) 847 if not di_exists: 848 array_size = self.current_dataset.y.size - 1 849 self.current_dataset.dy = np.append(self.current_dataset.dy, 850 np.zeros([array_size])) 851 elif isinstance(self.current_dataset, plottable_2D): 852 dqx_exists = False 853 dqy_exists = False 854 di_exists = False 855 mask_exists = False 856 if self.current_dataset.dqx_data is not None: 857 dqx_exists = True 858 if self.current_dataset.dqy_data is not None: 859 dqy_exists = True 860 if self.current_dataset.err_data is not None: 861 di_exists = True 862 if self.current_dataset.mask is not None: 863 mask_exists = True 864 if not dqy_exists: 865 array_size = self.current_dataset.qy_data.size - 1 866 self.current_dataset.dqy_data = np.append( 867 self.current_dataset.dqy_data, np.zeros([array_size])) 868 if not dqx_exists: 869 array_size = self.current_dataset.qx_data.size - 1 870 self.current_dataset.dqx_data = np.append( 871 self.current_dataset.dqx_data, np.zeros([array_size])) 872 if not di_exists: 873 array_size = self.current_dataset.data.size - 1 874 self.current_dataset.err_data = np.append( 875 self.current_dataset.err_data, np.zeros([array_size])) 876 if not mask_exists: 877 array_size = self.current_dataset.data.size - 1 878 self.current_dataset.mask = np.append( 879 self.current_dataset.mask, 880 np.ones([array_size] ,dtype=bool)) 881 882 ####### All methods below are for writing CanSAS XML files ####### 883 660 a method to check all resolution data sets are the same size as I and q 661 """ 662 dql_exists = False 663 dqw_exists = False 664 dq_exists = False 665 di_exists = False 666 if self.current_dataset.dxl is not None: 667 dql_exists = True 668 if self.current_dataset.dxw is not None: 669 dqw_exists = True 670 if self.current_dataset.dx is not None: 671 dq_exists = True 672 if self.current_dataset.dy is not None: 673 di_exists = True 674 if dqw_exists and not dql_exists: 675 array_size = self.current_dataset.dxw.size - 1 676 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 677 np.zeros([array_size])) 678 elif dql_exists and not dqw_exists: 679 array_size = self.current_dataset.dxl.size - 1 680 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 681 np.zeros([array_size])) 682 elif not dql_exists and not dqw_exists and not dq_exists: 683 array_size = self.current_dataset.x.size - 1 684 self.current_dataset.dx = np.append(self.current_dataset.dx, 685 np.zeros([array_size])) 686 if not di_exists: 687 array_size = self.current_dataset.y.size - 1 688 self.current_dataset.dy = np.append(self.current_dataset.dy, 689 np.zeros([array_size])) 690 691 def _initialize_new_data_set(self, node=None): 692 if node is not None: 693 for child in node: 694 if child.tag.replace(self.base_ns, "") == "Idata": 695 for i_child in child: 696 if i_child.tag.replace(self.base_ns, "") == "Qx": 697 self.current_dataset = plottable_2D() 698 return 699 self.current_dataset = plottable_1D(np.array(0), np.array(0)) 700 701 ## Writing Methods 884 702 def write(self, filename, datainfo): 885 703 """ … … 1516 1334 exec "storage.%s = entry.text.strip()" % variable 1517 1335 1518 1519 1336 # DO NOT REMOVE Called by outside packages: 1520 1337 # sas.sasgui.perspectives.invariant.invariant_state -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
rc9ecd1b rcd57c7d4 13 13 TransmissionSpectrum, Detector 14 14 from sas.sascalc.dataloader.data_info import combine_data_info_with_plottable 15 16 17 class Reader(): 15 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DefaultReaderException 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 18 19 class Reader(FileReader): 18 20 """ 19 21 A class for reading in CanSAS v2.0 data files. The existing iteration opens … … 40 42 # Raw file contents to be processed 41 43 raw_data = None 42 # Data info currently being read in43 current_datainfo = None44 # SASdata set currently being read in45 current_dataset = None46 44 # List of plottable1D objects that should be linked to the current_datainfo 47 45 data1d = None … … 56 54 # Flag to bypass extension check 57 55 allow_all = True 58 # List of files to return 59 output = None 60 61 def read(self, filename): 56 57 def get_file_contents(self): 62 58 """ 63 59 This is the general read method that all SasView data_loaders must have. … … 68 64 # Reinitialize when loading a new data file to reset all class variables 69 65 self.reset_class_variables() 66 67 filename = self.f_open.name 68 self.f_open.close() # IO handled by h5py 69 70 70 # Check that the file exists 71 71 if os.path.isfile(filename): … … 75 75 if extension in self.ext or self.allow_all: 76 76 # Load the data file 77 self.raw_data = h5py.File(filename, 'r') 78 # Read in all child elements of top level SASroot 79 self.read_children(self.raw_data, []) 80 # Add the last data set to the list of outputs 81 self.add_data_set() 82 # Close the data file 83 self.raw_data.close() 84 # Return data set(s) 85 return self.output 77 try: 78 self.raw_data = h5py.File(filename, 'r') 79 except Exception as e: 80 if extension not in self.ext: 81 msg = "CanSAS2.0 HDF5 Reader could not load file {}".format(basename + extension) 82 raise DefaultReaderException(msg) 83 raise FileContentsException(e.message) 84 try: 85 # Read in all child elements of top level SASroot 86 self.read_children(self.raw_data, []) 87 # Add the last data set to the list of outputs 88 self.add_data_set() 89 except Exception as exc: 90 raise FileContentsException(exc.message) 91 finally: 92 # Close the data file 93 self.raw_data.close() 94 95 for dataset in self.output: 96 if isinstance(dataset, Data1D): 97 if dataset.x.size < 5: 98 self.output = [] 99 raise FileContentsException("Fewer than 5 data points found.") 86 100 87 101 def reset_class_variables(self): … … 430 444 Data1D and Data2D objects 431 445 """ 432 433 446 # Type cast data arrays to float64 434 447 if len(self.current_datainfo.trans_spectrum) > 0: … … 454 467 # Type cast data arrays to float64 and find min/max as appropriate 455 468 for dataset in self.data2d: 456 dataset.data = dataset.data.astype(np.float64)457 dataset.err_data = dataset.err_data.astype(np.float64)458 if dataset.qx_data is not None:459 dataset.xmin = np.min(dataset.qx_data)460 dataset.xmax = np.max(dataset.qx_data)461 dataset.qx_data = dataset.qx_data.astype(np.float64)462 if dataset.dqx_data is not None:463 dataset.dqx_data = dataset.dqx_data.astype(np.float64)464 if dataset.qy_data is not None:465 dataset.ymin = np.min(dataset.qy_data)466 dataset.ymax = np.max(dataset.qy_data)467 dataset.qy_data = dataset.qy_data.astype(np.float64)468 if dataset.dqy_data is not None:469 dataset.dqy_data = dataset.dqy_data.astype(np.float64)470 if dataset.q_data is not None:471 dataset.q_data = dataset.q_data.astype(np.float64)472 469 zeros = np.ones(dataset.data.size, dtype=bool) 473 470 try: … … 492 489 dataset.x_bins = dataset.qx_data[:n_cols] 493 490 dataset.data = dataset.data.flatten() 494 495 final_dataset = combine_data_info_with_plottable( 496 dataset, self.current_datainfo) 497 self.output.append(final_dataset) 491 self.current_dataset = dataset 492 self.send_to_output() 498 493 499 494 for dataset in self.data1d: 500 if dataset.x is not None: 501 dataset.x = dataset.x.astype(np.float64) 502 dataset.xmin = np.min(dataset.x) 503 dataset.xmax = np.max(dataset.x) 504 if dataset.y is not None: 505 dataset.y = dataset.y.astype(np.float64) 506 dataset.ymin = np.min(dataset.y) 507 dataset.ymax = np.max(dataset.y) 508 if dataset.dx is not None: 509 dataset.dx = dataset.dx.astype(np.float64) 510 if dataset.dxl is not None: 511 dataset.dxl = dataset.dxl.astype(np.float64) 512 if dataset.dxw is not None: 513 dataset.dxw = dataset.dxw.astype(np.float64) 514 if dataset.dy is not None: 515 dataset.dy = dataset.dy.astype(np.float64) 516 final_dataset = combine_data_info_with_plottable( 517 dataset, self.current_datainfo) 518 self.output.append(final_dataset) 495 self.current_dataset = dataset 496 self.send_to_output() 519 497 520 498 def add_data_set(self, key=""): -
src/sas/sascalc/dataloader/readers/danse_reader.py
r235f514 ra78a02f 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #If you use DANSE applications to do scientific research that leads to 9 9 #publication, we ask that you acknowledge the use of the software with the … … 14 14 import math 15 15 import os 16 import sys17 16 import numpy as np 18 17 import logging 19 from sas.sascalc.dataloader.data_info import Data2D, Detector18 from sas.sascalc.dataloader.data_info import plottable_2D, DataInfo, Detector 20 19 from sas.sascalc.dataloader.manipulations import reader2D_converter 20 from sas.sascalc.dataloader.file_reader_base_class import FileReader 21 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 21 22 22 23 logger = logging.getLogger(__name__) … … 30 31 31 32 32 class Reader :33 class Reader(FileReader): 33 34 """ 34 35 Example data manipulation … … 40 41 ## Extension 41 42 ext = ['.sans', '.SANS'] 42 43 def read(self, filename=None): 44 """ 45 Open and read the data in a file 46 @param file: path of the file 47 """ 48 49 read_it = False 50 for item in self.ext: 51 if filename.lower().find(item) >= 0: 52 read_it = True 53 54 if read_it: 43 44 def get_file_contents(self): 45 self.current_datainfo = DataInfo() 46 self.current_dataset = plottable_2D() 47 self.output = [] 48 49 loaded_correctly = True 50 error_message = "" 51 52 # defaults 53 # wavelength in Angstrom 54 wavelength = 10.0 55 # Distance in meter 56 distance = 11.0 57 # Pixel number of center in x 58 center_x = 65 59 # Pixel number of center in y 60 center_y = 65 61 # Pixel size [mm] 62 pixel = 5.0 63 # Size in x, in pixels 64 size_x = 128 65 # Size in y, in pixels 66 size_y = 128 67 # Format version 68 fversion = 1.0 69 70 self.current_datainfo.filename = os.path.basename(self.f_open.name) 71 detector = Detector() 72 self.current_datainfo.detector.append(detector) 73 74 self.current_dataset.data = np.zeros([size_x, size_y]) 75 self.current_dataset.err_data = np.zeros([size_x, size_y]) 76 77 read_on = True 78 data_start_line = 1 79 while read_on: 80 line = self.f_open.readline() 81 data_start_line += 1 82 if line.find("DATA:") >= 0: 83 read_on = False 84 break 85 toks = line.split(':') 55 86 try: 56 datafile = open(filename, 'r')57 except:58 raise RuntimeError,"danse_reader cannot open %s" % (filename)59 60 # defaults61 # wavelength in Angstrom62 wavelength = 10.063 # Distance in meter64 distance = 11.065 # Pixel number of center in x66 center_x = 6567 # Pixel number of center in y68 center_y = 6569 # Pixel size [mm]70 pixel = 5.071 # Size in x, in pixels72 size_x = 12873 # Size in y, in pixels74 size_y = 12875 # Format version76 fversion = 1.077 78 output = Data2D()79 output.filename = os.path.basename(filename)80 detector = Detector()81 output.detector.append(detector)82 83 output.data = np.zeros([size_x,size_y])84 output.err_data = np.zeros([size_x, size_y])85 86 data_conv_q = None87 data_conv_i = None88 89 if has_converter == True and output.Q_unit != '1/A':90 data_conv_q = Converter('1/A')91 # Test it92 data_conv_q(1.0, output.Q_unit)93 94 if has_converter == True and output.I_unit != '1/cm':95 data_conv_i = Converter('1/cm')96 # Test it97 data_conv_i(1.0, output.I_unit)98 99 read_on = True100 while read_on:101 line = datafile.readline()102 if line.find("DATA:") >= 0:103 read_on = False104 break105 toks = line.split(':')106 87 if toks[0] == "FORMATVERSION": 107 88 fversion = float(toks[1]) 108 if toks[0] == "WAVELENGTH":89 elif toks[0] == "WAVELENGTH": 109 90 wavelength = float(toks[1]) 110 91 elif toks[0] == "DISTANCE": … … 120 101 elif toks[0] == "SIZE_Y": 121 102 size_y = int(toks[1]) 122 123 # Read the data 124 data = [] 125 error = [] 126 if fversion == 1.0: 127 data_str = datafile.readline() 128 data = data_str.split(' ') 129 else: 130 read_on = True 131 while read_on: 132 data_str = datafile.readline() 133 if len(data_str) == 0: 134 read_on = False 135 else: 136 toks = data_str.split() 137 try: 138 val = float(toks[0]) 139 err = float(toks[1]) 140 if data_conv_i is not None: 141 val = data_conv_i(val, units=output._yunit) 142 err = data_conv_i(err, units=output._yunit) 143 data.append(val) 144 error.append(err) 145 except: 146 logger.info("Skipping line:%s,%s" %(data_str, 147 sys.exc_value)) 148 149 # Initialize 150 x_vals = [] 151 y_vals = [] 152 ymin = None 153 ymax = None 154 xmin = None 155 xmax = None 156 157 # Qx and Qy vectors 158 theta = pixel / distance / 100.0 159 stepq = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 160 for i_x in range(size_x): 161 theta = (i_x - center_x + 1) * pixel / distance / 100.0 162 qx = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 163 164 if has_converter == True and output.Q_unit != '1/A': 165 qx = data_conv_q(qx, units=output.Q_unit) 166 167 x_vals.append(qx) 168 if xmin is None or qx < xmin: 169 xmin = qx 170 if xmax is None or qx > xmax: 171 xmax = qx 172 173 ymin = None 174 ymax = None 175 for i_y in range(size_y): 176 theta = (i_y - center_y + 1) * pixel / distance / 100.0 177 qy = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 178 179 if has_converter == True and output.Q_unit != '1/A': 180 qy = data_conv_q(qy, units=output.Q_unit) 181 182 y_vals.append(qy) 183 if ymin is None or qy < ymin: 184 ymin = qy 185 if ymax is None or qy > ymax: 186 ymax = qy 187 188 # Store the data in the 2D array 189 i_x = 0 190 i_y = -1 191 192 for i_pt in range(len(data)): 193 try: 194 value = float(data[i_pt]) 195 except: 196 # For version 1.0, the data were still 197 # stored as strings at this point. 198 msg = "Skipping entry (v1.0):%s,%s" % (str(data[i_pt]), 199 sys.exc_value) 200 logger.info(msg) 201 202 # Get bin number 203 if math.fmod(i_pt, size_x) == 0: 204 i_x = 0 205 i_y += 1 206 else: 207 i_x += 1 208 209 output.data[i_y][i_x] = value 210 if fversion>1.0: 211 output.err_data[i_y][i_x] = error[i_pt] 212 213 # Store all data 214 # Store wavelength 215 if has_converter == True and output.source.wavelength_unit != 'A': 216 conv = Converter('A') 217 wavelength = conv(wavelength, 218 units=output.source.wavelength_unit) 219 output.source.wavelength = wavelength 220 221 # Store distance 222 if has_converter == True and detector.distance_unit != 'm': 223 conv = Converter('m') 224 distance = conv(distance, units=detector.distance_unit) 225 detector.distance = distance 226 227 # Store pixel size 228 if has_converter == True and detector.pixel_size_unit != 'mm': 229 conv = Converter('mm') 230 pixel = conv(pixel, units=detector.pixel_size_unit) 231 detector.pixel_size.x = pixel 232 detector.pixel_size.y = pixel 233 234 # Store beam center in distance units 235 detector.beam_center.x = center_x * pixel 236 detector.beam_center.y = center_y * pixel 237 238 # Store limits of the image (2D array) 239 xmin = xmin - stepq / 2.0 240 xmax = xmax + stepq / 2.0 241 ymin = ymin - stepq /2.0 242 ymax = ymax + stepq / 2.0 243 244 if has_converter == True and output.Q_unit != '1/A': 245 xmin = data_conv_q(xmin, units=output.Q_unit) 246 xmax = data_conv_q(xmax, units=output.Q_unit) 247 ymin = data_conv_q(ymin, units=output.Q_unit) 248 ymax = data_conv_q(ymax, units=output.Q_unit) 249 output.xmin = xmin 250 output.xmax = xmax 251 output.ymin = ymin 252 output.ymax = ymax 253 254 # Store x and y axis bin centers 255 output.x_bins = x_vals 256 output.y_bins = y_vals 257 258 # Units 259 if data_conv_q is not None: 260 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 261 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 262 else: 263 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 264 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 265 266 if data_conv_i is not None: 267 output.zaxis("\\rm{Intensity}", output.I_unit) 268 else: 269 output.zaxis("\\rm{Intensity}", "cm^{-1}") 270 271 if not fversion >= 1.0: 272 msg = "Danse_reader can't read this file %s" % filename 273 raise ValueError, msg 274 else: 275 logger.info("Danse_reader Reading %s \n" % filename) 276 277 # Store loading process information 278 output.meta_data['loader'] = self.type_name 279 output = reader2D_converter(output) 280 return output 281 282 return None 103 except ValueError as e: 104 error_message += "Unable to parse {}. Default value used.\n".format(toks[0]) 105 loaded_correctly = False 106 107 # Read the data 108 data = [] 109 error = [] 110 if not fversion >= 1.0: 111 msg = "danse_reader can't read this file {}".format(self.f_open.name) 112 raise FileContentsException(msg) 113 114 for line_num, data_str in enumerate(self.f_open.readlines()): 115 toks = data_str.split() 116 try: 117 val = float(toks[0]) 118 err = float(toks[1]) 119 data.append(val) 120 error.append(err) 121 except ValueError as exc: 122 msg = "Unable to parse line {}: {}".format(line_num + data_start_line, data_str.strip()) 123 raise FileContentsException(msg) 124 125 num_pts = size_x * size_y 126 if len(data) < num_pts: 127 msg = "Not enough data points provided. Expected {} but got {}".format( 128 size_x * size_y, len(data)) 129 raise FileContentsException(msg) 130 elif len(data) > num_pts: 131 error_message += ("Too many data points provided. Expected {0} but" 132 " got {1}. Only the first {0} will be used.\n").format(num_pts, len(data)) 133 loaded_correctly = False 134 data = data[:num_pts] 135 error = error[:num_pts] 136 137 # Qx and Qy vectors 138 theta = pixel / distance / 100.0 139 i_x = np.arange(size_x) 140 theta = (i_x - center_x + 1) * pixel / distance / 100.0 141 x_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 142 xmin = x_vals.min() 143 xmax = x_vals.max() 144 145 i_y = np.arange(size_y) 146 theta = (i_y - center_y + 1) * pixel / distance / 100.0 147 y_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 148 ymin = y_vals.min() 149 ymax = y_vals.max() 150 151 self.current_dataset.data = np.array(data, dtype=np.float64).reshape((size_y, size_x)) 152 if fversion > 1.0: 153 self.current_dataset.err_data = np.array(error, dtype=np.float64).reshape((size_y, size_x)) 154 155 # Store all data 156 # Store wavelength 157 if has_converter == True and self.current_datainfo.source.wavelength_unit != 'A': 158 conv = Converter('A') 159 wavelength = conv(wavelength, 160 units=self.current_datainfo.source.wavelength_unit) 161 self.current_datainfo.source.wavelength = wavelength 162 163 # Store distance 164 if has_converter == True and detector.distance_unit != 'm': 165 conv = Converter('m') 166 distance = conv(distance, units=detector.distance_unit) 167 detector.distance = distance 168 169 # Store pixel size 170 if has_converter == True and detector.pixel_size_unit != 'mm': 171 conv = Converter('mm') 172 pixel = conv(pixel, units=detector.pixel_size_unit) 173 detector.pixel_size.x = pixel 174 detector.pixel_size.y = pixel 175 176 # Store beam center in distance units 177 detector.beam_center.x = center_x * pixel 178 detector.beam_center.y = center_y * pixel 179 180 181 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 182 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 183 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 184 185 self.current_dataset.x_bins = x_vals 186 self.current_dataset.y_bins = y_vals 187 188 # Reshape data 189 x_vals = np.tile(x_vals, (size_y, 1)).flatten() 190 y_vals = np.tile(y_vals, (size_x, 1)).T.flatten() 191 if (np.all(self.current_dataset.err_data == None) 192 or np.any(self.current_dataset.err_data <= 0)): 193 new_err_data = np.sqrt(np.abs(self.current_dataset.data)) 194 else: 195 new_err_data = self.current_dataset.err_data.flatten() 196 197 self.current_dataset.err_data = new_err_data 198 self.current_dataset.qx_data = x_vals 199 self.current_dataset.qy_data = y_vals 200 self.current_dataset.q_data = np.sqrt(x_vals**2 + y_vals**2) 201 self.current_dataset.mask = np.ones(len(x_vals), dtype=bool) 202 203 # Store loading process information 204 self.current_datainfo.meta_data['loader'] = self.type_name 205 206 self.send_to_output() 207 208 if not loaded_correctly: 209 raise DataReaderException(error_message) -
src/sas/sascalc/dataloader/readers/red2d_reader.py
ra1b8fee r2f85af7 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #See the license text in license.txt 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 from __future__ import print_function12 13 11 import os 14 12 import numpy as np 15 13 import math 16 from sas.sascalc.dataloader.data_info import Data2D, Detector 14 from sas.sascalc.dataloader.data_info import plottable_2D, DataInfo, Detector 15 from sas.sascalc.dataloader.file_reader_base_class import FileReader 16 from sas.sascalc.dataloader.loader_exceptions import FileContentsException 17 17 18 18 # Look for unit converter … … 22 22 except: 23 23 has_converter = False 24 25 24 25 26 26 def check_point(x_point): 27 27 """ … … 33 33 except: 34 34 return 0 35 36 37 class Reader :35 36 37 class Reader(FileReader): 38 38 """ Simple data reader for Igor data files """ 39 39 ## File type … … 43 43 ## Extension 44 44 ext = ['.DAT', '.dat'] 45 45 46 46 def write(self, filename, data): 47 47 """ 48 48 Write to .dat 49 49 50 50 :param filename: file name to write 51 51 :param data: data2D … … 53 53 import time 54 54 # Write the file 55 fd = open(filename, 'w') 56 t = time.localtime() 57 time_str = time.strftime("%H:%M on %b %d %y", t) 58 59 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 60 header_str += " created at %s \n\n" % time_str 61 # simple 2D header 62 fd.write(header_str) 63 # write qx qy I values 64 for i in range(len(data.data)): 65 fd.write("%g %g %g\n" % (data.qx_data[i], 66 data.qy_data[i], 67 data.data[i])) 68 # close 69 fd.close() 70 71 def read(self, filename=None): 72 """ Read file """ 73 if not os.path.isfile(filename): 74 raise ValueError, \ 75 "Specified file %s is not a regular file" % filename 76 55 try: 56 fd = open(filename, 'w') 57 t = time.localtime() 58 time_str = time.strftime("%H:%M on %b %d %y", t) 59 60 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 61 header_str += " created at %s \n\n" % time_str 62 # simple 2D header 63 fd.write(header_str) 64 # write qx qy I values 65 for i in range(len(data.data)): 66 fd.write("%g %g %g\n" % (data.qx_data[i], 67 data.qy_data[i], 68 data.data[i])) 69 finally: 70 fd.close() 71 72 def get_file_contents(self): 77 73 # Read file 78 f = open(filename, 'r') 79 buf = f.read() 80 f.close() 74 buf = self.f_open.read() 75 self.f_open.close() 81 76 # Instantiate data object 82 output = Data2D() 83 output.filename = os.path.basename(filename) 84 detector = Detector() 85 if len(output.detector) > 0: 86 print(str(output.detector[0])) 87 output.detector.append(detector) 88 77 self.current_dataset = plottable_2D() 78 self.current_datainfo = DataInfo() 79 self.current_datainfo.filename = os.path.basename(self.f_open.name) 80 self.current_datainfo.detector.append(Detector()) 81 89 82 # Get content 90 data Started = False91 83 data_started = False 84 92 85 ## Defaults 93 86 lines = buf.split('\n') 94 87 x = [] 95 88 y = [] 96 89 97 90 wavelength = None 98 91 distance = None 99 92 transmission = None 100 93 101 94 pixel_x = None 102 95 pixel_y = None 103 104 isInfo = False 105 isCenter = False 106 107 data_conv_q = None 108 data_conv_i = None 109 110 # Set units: This is the unit assumed for Q and I in the data file. 111 if has_converter == True and output.Q_unit != '1/A': 112 data_conv_q = Converter('1/A') 113 # Test it 114 data_conv_q(1.0, output.Q_unit) 115 116 if has_converter == True and output.I_unit != '1/cm': 117 data_conv_i = Converter('1/cm') 118 # Test it 119 data_conv_i(1.0, output.I_unit) 120 121 96 97 is_info = False 98 is_center = False 99 122 100 # Remove the last lines before the for loop if the lines are empty 123 101 # to calculate the exact number of data points … … 135 113 ## Reading the header applies only to IGOR/NIST 2D q_map data files 136 114 # Find setup info line 137 if is Info:138 is Info = False115 if is_info: 116 is_info = False 139 117 line_toks = line.split() 140 118 # Wavelength in Angstrom … … 143 121 # Units 144 122 if has_converter == True and \ 145 output.source.wavelength_unit != 'A':123 self.current_datainfo.source.wavelength_unit != 'A': 146 124 conv = Converter('A') 147 125 wavelength = conv(wavelength, 148 units= output.source.wavelength_unit)126 units=self.current_datainfo.source.wavelength_unit) 149 127 except: 150 128 #Not required … … 154 132 distance = float(line_toks[3]) 155 133 # Units 156 if has_converter == True and detector.distance_unit != 'm':134 if has_converter == True and self.current_datainfo.detector[0].distance_unit != 'm': 157 135 conv = Converter('m') 158 distance = conv(distance, units=detector.distance_unit) 136 distance = conv(distance, 137 units=self.current_datainfo.detector[0].distance_unit) 159 138 except: 160 139 #Not required 161 140 pass 162 141 163 142 # Distance in meters 164 143 try: … … 167 146 #Not required 168 147 pass 169 148 170 149 if line.count("LAMBDA") > 0: 171 is Info = True172 150 is_info = True 151 173 152 # Find center info line 174 if is Center:175 is Center = False153 if is_center: 154 is_center = False 176 155 line_toks = line.split() 177 156 # Center in bin number … … 180 159 181 160 if line.count("BCENT") > 0: 182 is Center = True161 is_center = True 183 162 # Check version 184 163 if line.count("Data columns") > 0: … … 187 166 # Find data start 188 167 if line.count("ASCII data") > 0: 189 data Started = True168 data_started = True 190 169 continue 191 170 192 171 ## Read and get data. 193 if data Started == True:172 if data_started == True: 194 173 line_toks = line.split() 195 174 if len(line_toks) == 0: 196 175 #empty line 197 176 continue 198 # the number of columns must be stayed same 177 # the number of columns must be stayed same 199 178 col_num = len(line_toks) 200 179 break … … 204 183 # index for lines_array 205 184 lines_index = np.arange(len(lines)) 206 185 207 186 # get the data lines 208 187 data_lines = lines_array[lines_index >= (line_num - 1)] … … 213 192 # split all data to one big list w/" "separator 214 193 data_list = data_list.split() 215 194 216 195 # Check if the size is consistent with data, otherwise 217 196 #try the tab(\t) separator … … 233 212 data_point = data_array.reshape(row_num, col_num).transpose() 234 213 except: 235 msg = "red2d_reader : Can't read this file: Not a proper file format"236 raise ValueError, msg214 msg = "red2d_reader can't read this file: Incorrect number of data points provided." 215 raise FileContentsException(msg) 237 216 ## Get the all data: Let's HARDcoding; Todo find better way 238 217 # Defaults … … 257 236 #if col_num > (6 + ver): mask[data_point[(6 + ver)] < 1] = False 258 237 q_data = np.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 259 260 # Extra protection(it is needed for some data files): 238 239 # Extra protection(it is needed for some data files): 261 240 # If all mask elements are False, put all True 262 241 if not mask.any(): 263 242 mask[mask == False] = True 264 243 265 244 # Store limits of the image in q space 266 245 xmin = np.min(qx_data) … … 269 248 ymax = np.max(qy_data) 270 249 271 # units272 if has_converter == True and output.Q_unit != '1/A':273 xmin = data_conv_q(xmin, units=output.Q_unit)274 xmax = data_conv_q(xmax, units=output.Q_unit)275 ymin = data_conv_q(ymin, units=output.Q_unit)276 ymax = data_conv_q(ymax, units=output.Q_unit)277 278 250 ## calculate the range of the qx and qy_data 279 251 x_size = math.fabs(xmax - xmin) 280 252 y_size = math.fabs(ymax - ymin) 281 253 282 254 # calculate the number of pixels in the each axes 283 255 npix_y = math.floor(math.sqrt(len(data))) 284 256 npix_x = math.floor(len(data) / npix_y) 285 257 286 258 # calculate the size of bins 287 259 xstep = x_size / (npix_x - 1) 288 260 ystep = y_size / (npix_y - 1) 289 261 290 262 # store x and y axis bin centers in q space 291 263 x_bins = np.arange(xmin, xmax + xstep, xstep) 292 264 y_bins = np.arange(ymin, ymax + ystep, ystep) 293 265 294 266 # get the limits of q values 295 267 xmin = xmin - xstep / 2 … … 297 269 ymin = ymin - ystep / 2 298 270 ymax = ymax + ystep / 2 299 271 300 272 #Store data in outputs 301 273 #TODO: Check the lengths 302 output.data = data274 self.current_dataset.data = data 303 275 if (err_data == 1).all(): 304 output.err_data = np.sqrt(np.abs(data))305 output.err_data[output.err_data == 0.0] = 1.0276 self.current_dataset.err_data = np.sqrt(np.abs(data)) 277 self.current_dataset.err_data[self.current_dataset.err_data == 0.0] = 1.0 306 278 else: 307 output.err_data = err_data308 309 output.qx_data = qx_data310 output.qy_data = qy_data311 output.q_data = q_data312 output.mask = mask313 314 output.x_bins = x_bins315 output.y_bins = y_bins316 317 output.xmin = xmin318 output.xmax = xmax319 output.ymin = ymin320 output.ymax = ymax321 322 output.source.wavelength = wavelength323 279 self.current_dataset.err_data = err_data 280 281 self.current_dataset.qx_data = qx_data 282 self.current_dataset.qy_data = qy_data 283 self.current_dataset.q_data = q_data 284 self.current_dataset.mask = mask 285 286 self.current_dataset.x_bins = x_bins 287 self.current_dataset.y_bins = y_bins 288 289 self.current_dataset.xmin = xmin 290 self.current_dataset.xmax = xmax 291 self.current_dataset.ymin = ymin 292 self.current_dataset.ymax = ymax 293 294 self.current_datainfo.source.wavelength = wavelength 295 324 296 # Store pixel size in mm 325 detector.pixel_size.x = pixel_x326 detector.pixel_size.y = pixel_y327 297 self.current_datainfo.detector[0].pixel_size.x = pixel_x 298 self.current_datainfo.detector[0].pixel_size.y = pixel_y 299 328 300 # Store the sample to detector distance 329 detector.distance = distance330 301 self.current_datainfo.detector[0].distance = distance 302 331 303 # optional data: if all of dq data == 0, do not pass to output 332 304 if len(dqx_data) == len(qx_data) and dqx_data.any() != 0: … … 340 312 cos_th = qx_data / diag 341 313 sin_th = qy_data / diag 342 output.dqx_data = np.sqrt((dqx_data * cos_th) * \314 self.current_dataset.dqx_data = np.sqrt((dqx_data * cos_th) * \ 343 315 (dqx_data * cos_th) \ 344 316 + (dqy_data * sin_th) * \ 345 317 (dqy_data * sin_th)) 346 output.dqy_data = np.sqrt((dqx_data * sin_th) * \318 self.current_dataset.dqy_data = np.sqrt((dqx_data * sin_th) * \ 347 319 (dqx_data * sin_th) \ 348 320 + (dqy_data * cos_th) * \ 349 321 (dqy_data * cos_th)) 350 322 else: 351 output.dqx_data = dqx_data352 output.dqy_data = dqy_data323 self.current_dataset.dqx_data = dqx_data 324 self.current_dataset.dqy_data = dqy_data 353 325 354 326 # Units of axes 355 if data_conv_q is not None: 356 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 357 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 358 else: 359 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 360 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 361 if data_conv_i is not None: 362 output.zaxis("\\rm{Intensity}", output.I_unit) 363 else: 364 output.zaxis("\\rm{Intensity}", "cm^{-1}") 365 327 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 328 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 329 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 330 366 331 # Store loading process information 367 output.meta_data['loader'] = self.type_name368 369 return output332 self.current_datainfo.meta_data['loader'] = self.type_name 333 334 self.send_to_output() -
src/sas/sascalc/dataloader/readers/sesans_reader.py
r149b8f6 rbe43448 8 8 import numpy as np 9 9 import os 10 from sas.sascalc.dataloader.data_info import Data1D 10 from sas.sascalc.dataloader.file_reader_base_class import FileReader 11 from sas.sascalc.dataloader.data_info import plottable_1D, DataInfo 12 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 11 13 12 14 # Check whether we have a converter available … … 18 20 _ZERO = 1e-16 19 21 20 21 class Reader: 22 class Reader(FileReader): 22 23 """ 23 24 Class to load sesans files (6 columns). … … 26 27 type_name = "SESANS" 27 28 28 # Wildcards29 ## Wildcards 29 30 type = ["SESANS files (*.ses)|*.ses", 30 31 "SESANS files (*..sesans)|*.sesans"] … … 35 36 allow_all = True 36 37 37 def read(self, path): 38 """ 39 Load data file 38 def get_file_contents(self): 39 self.current_datainfo = DataInfo() 40 self.current_dataset = plottable_1D(np.array([]), np.array([])) 41 self.current_datainfo.isSesans = True 42 self.output = [] 40 43 41 :param path: file path 44 line = self.f_open.readline() 45 params = {} 46 while not line.startswith("BEGIN_DATA"): 47 terms = line.split() 48 if len(terms) >= 2: 49 params[terms[0]] = " ".join(terms[1:]) 50 line = self.f_open.readline() 51 self.params = params 42 52 43 :return: SESANSData1D object, or None 53 if "FileFormatVersion" not in self.params: 54 raise FileContentsException("SES file missing FileFormatVersion") 55 if float(self.params["FileFormatVersion"]) >= 2.0: 56 raise FileContentsException("SASView only supports SES version 1") 44 57 45 :raise RuntimeError: when the file can't be opened 46 :raise ValueError: when the length of the data vectors are inconsistent 47 """ 48 if os.path.isfile(path): 49 basename = os.path.basename(path) 50 _, extension = os.path.splitext(basename) 51 if not (self.allow_all or extension.lower() in self.ext): 52 raise RuntimeError( 53 "{} has an unrecognized file extension".format(path)) 58 if "SpinEchoLength_unit" not in self.params: 59 raise FileContentsException("SpinEchoLength has no units") 60 if "Wavelength_unit" not in self.params: 61 raise FileContentsException("Wavelength has no units") 62 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 63 raise FileContentsException("The spin echo data has rudely used " 64 "different units for the spin echo length " 65 "and the wavelength. While sasview could " 66 "handle this instance, it is a violation " 67 "of the file format and will not be " 68 "handled by other software.") 69 70 headers = self.f_open.readline().split() 71 72 self._insist_header(headers, "SpinEchoLength") 73 self._insist_header(headers, "Depolarisation") 74 self._insist_header(headers, "Depolarisation_error") 75 self._insist_header(headers, "Wavelength") 76 77 data = np.loadtxt(self.f_open) 78 79 if data.shape[1] != len(headers): 80 raise FileContentsException( 81 "File has {} headers, but {} columns".format( 82 len(headers), 83 data.shape[1])) 84 85 if not data.size: 86 raise FileContentsException("{} is empty".format(path)) 87 x = data[:, headers.index("SpinEchoLength")] 88 if "SpinEchoLength_error" in headers: 89 dx = data[:, headers.index("SpinEchoLength_error")] 54 90 else: 55 raise RuntimeError("{} is not a file".format(path)) 56 with open(path, 'r') as input_f: 57 line = input_f.readline() 58 params = {} 59 while not line.startswith("BEGIN_DATA"): 60 terms = line.split() 61 if len(terms) >= 2: 62 params[terms[0]] = " ".join(terms[1:]) 63 line = input_f.readline() 64 self.params = params 91 dx = x * 0.05 92 lam = data[:, headers.index("Wavelength")] 93 if "Wavelength_error" in headers: 94 dlam = data[:, headers.index("Wavelength_error")] 95 else: 96 dlam = lam * 0.05 97 y = data[:, headers.index("Depolarisation")] 98 dy = data[:, headers.index("Depolarisation_error")] 65 99 66 if "FileFormatVersion" not in self.params: 67 raise RuntimeError("SES file missing FileFormatVersion") 68 if float(self.params["FileFormatVersion"]) >= 2.0: 69 raise RuntimeError("SASView only supports SES version 1") 100 lam_unit = self._unit_fetch("Wavelength") 101 x, x_unit = self._unit_conversion(x, "A", 102 self._unit_fetch( 103 "SpinEchoLength")) 104 dx, dx_unit = self._unit_conversion( 105 dx, lam_unit, 106 self._unit_fetch("SpinEchoLength")) 107 dlam, dlam_unit = self._unit_conversion( 108 dlam, lam_unit, 109 self._unit_fetch("Wavelength")) 110 y_unit = self._unit_fetch("Depolarisation") 70 111 71 if "SpinEchoLength_unit" not in self.params: 72 raise RuntimeError("SpinEchoLength has no units") 73 if "Wavelength_unit" not in self.params: 74 raise RuntimeError("Wavelength has no units") 75 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 76 raise RuntimeError("The spin echo data has rudely used " 77 "different units for the spin echo length " 78 "and the wavelength. While sasview could " 79 "handle this instance, it is a violation " 80 "of the file format and will not be " 81 "handled by other software.") 112 self.current_dataset.x = x 113 self.current_dataset.y = y 114 self.current_dataset.lam = lam 115 self.current_dataset.dy = dy 116 self.current_dataset.dx = dx 117 self.current_dataset.dlam = dlam 118 self.current_datainfo.isSesans = True 82 119 83 headers = input_f.readline().split() 120 self.current_datainfo._yunit = y_unit 121 self.current_datainfo._xunit = x_unit 122 self.current_datainfo.source.wavelength_unit = lam_unit 123 self.current_datainfo.source.wavelength = lam 124 self.current_datainfo.filename = os.path.basename(self.f_open.name) 125 self.current_dataset.xaxis(r"\rm{z}", x_unit) 126 # Adjust label to ln P/(lam^2 t), remove lam column refs 127 self.current_dataset.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 128 # Store loading process information 129 self.current_datainfo.meta_data['loader'] = self.type_name 130 self.current_datainfo.sample.name = params["Sample"] 131 self.current_datainfo.sample.ID = params["DataFileTitle"] 132 self.current_datainfo.sample.thickness = self._unit_conversion( 133 float(params["Thickness"]), "cm", 134 self._unit_fetch("Thickness"))[0] 84 135 85 self._insist_header(headers, "SpinEchoLength") 86 self._insist_header(headers, "Depolarisation") 87 self._insist_header(headers, "Depolarisation_error") 88 self._insist_header(headers, "Wavelength") 136 self.current_datainfo.sample.zacceptance = ( 137 float(params["Theta_zmax"]), 138 self._unit_fetch("Theta_zmax")) 89 139 90 data = np.loadtxt(input_f) 140 self.current_datainfo.sample.yacceptance = ( 141 float(params["Theta_ymax"]), 142 self._unit_fetch("Theta_ymax")) 91 143 92 if data.shape[1] != len(headers): 93 raise RuntimeError( 94 "File has {} headers, but {} columns".format( 95 len(headers), 96 data.shape[1])) 97 98 if not data.size: 99 raise RuntimeError("{} is empty".format(path)) 100 x = data[:, headers.index("SpinEchoLength")] 101 if "SpinEchoLength_error" in headers: 102 dx = data[:, headers.index("SpinEchoLength_error")] 103 else: 104 dx = x * 0.05 105 lam = data[:, headers.index("Wavelength")] 106 if "Wavelength_error" in headers: 107 dlam = data[:, headers.index("Wavelength_error")] 108 else: 109 dlam = lam * 0.05 110 y = data[:, headers.index("Depolarisation")] 111 dy = data[:, headers.index("Depolarisation_error")] 112 113 lam_unit = self._unit_fetch("Wavelength") 114 x, x_unit = self._unit_conversion(x, "A", 115 self._unit_fetch( 116 "SpinEchoLength")) 117 dx, dx_unit = self._unit_conversion( 118 dx, lam_unit, 119 self._unit_fetch("SpinEchoLength")) 120 dlam, dlam_unit = self._unit_conversion( 121 dlam, lam_unit, 122 self._unit_fetch("Wavelength")) 123 y_unit = self._unit_fetch("Depolarisation") 124 125 output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, 126 isSesans=True) 127 128 output.y_unit = y_unit 129 output.x_unit = x_unit 130 output.source.wavelength_unit = lam_unit 131 output.source.wavelength = lam 132 self.filename = output.filename = basename 133 output.xaxis(r"\rm{z}", x_unit) 134 # Adjust label to ln P/(lam^2 t), remove lam column refs 135 output.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 136 # Store loading process information 137 output.meta_data['loader'] = self.type_name 138 output.sample.name = params["Sample"] 139 output.sample.ID = params["DataFileTitle"] 140 output.sample.thickness = self._unit_conversion( 141 float(params["Thickness"]), "cm", 142 self._unit_fetch("Thickness"))[0] 143 144 output.sample.zacceptance = ( 145 float(params["Theta_zmax"]), 146 self._unit_fetch("Theta_zmax")) 147 148 output.sample.yacceptance = ( 149 float(params["Theta_ymax"]), 150 self._unit_fetch("Theta_ymax")) 151 return output 144 self.send_to_output() 152 145 153 146 @staticmethod 154 147 def _insist_header(headers, name): 155 148 if name not in headers: 156 raise RuntimeError(149 raise FileContentsException( 157 150 "Missing {} column in spin echo data".format(name)) 158 151 -
src/sas/sascalc/dataloader/readers/xml_reader.py
r6a455cd3 rcd57c7d4 18 18 from lxml import etree 19 19 from lxml.builder import E 20 from sas.sascalc.dataloader.file_reader_base_class import FileReader 20 21 21 22 logger = logging.getLogger(__name__) … … 23 24 PARSER = etree.ETCompatXMLParser(remove_comments=True, remove_pis=False) 24 25 25 class XMLreader( ):26 class XMLreader(FileReader): 26 27 """ 27 28 Generic XML read and write class. Mostly helper functions. … … 74 75 except etree.XMLSyntaxError as xml_error: 75 76 logger.info(xml_error) 77 raise xml_error 76 78 except Exception: 77 79 self.xml = None … … 91 93 except etree.XMLSyntaxError as xml_error: 92 94 logger.info(xml_error) 93 except Exception: 95 raise xml_error 96 except Exception as exc: 94 97 self.xml = None 95 98 self.xmldoc = None 96 99 self.xmlroot = None 100 raise exc 97 101 98 102 def set_schema(self, schema): … … 209 213 Create a unique key value for any dictionary to prevent overwriting 210 214 Recurses until a unique key value is found. 211 215 212 216 :param dictionary: A dictionary with any number of entries 213 217 :param name: The index of the item to be added to dictionary … … 225 229 Create an element tree for processing from an etree element 226 230 227 :param root: etree Element(s) 231 :param root: etree Element(s) 228 232 """ 229 233 return etree.ElementTree(root)
Note: See TracChangeset
for help on using the changeset viewer.