Changeset d7fd7be in sasview for src/sas/sascalc/dataloader
- Timestamp:
- Dec 22, 2017 12:08:53 PM (7 years ago)
- Branches:
- master, magnetic_scatt, release-4.2.2, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, unittest-saveload
- Children:
- 5a4d022
- Parents:
- 2651724 (diff), 0a88623 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Location:
- src/sas/sascalc/dataloader
- Files:
-
- 2 added
- 4 deleted
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/__init__.py
rb699768 r574adc7 1 from data_info import *2 from manipulations import *3 from readers import *1 from .data_info import * 2 from .manipulations import * 3 from .readers import * -
src/sas/sascalc/dataloader/data_info.py
ra1b8fee r9e6aeaf 716 716 self.y_unit = '1/cm' 717 717 except: # the data is not recognized/supported, and the user is notified 718 raise (TypeError,'data not recognized, check documentation for supported 1D data formats')718 raise TypeError('data not recognized, check documentation for supported 1D data formats') 719 719 720 720 def __str__(self): … … 796 796 len(self.y) != len(other.y): 797 797 msg = "Unable to perform operation: data length are not equal" 798 raise ValueError , msg798 raise ValueError(msg) 799 799 # Here we could also extrapolate between data points 800 800 TOLERANCE = 0.01 … … 802 802 if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE: 803 803 msg = "Incompatible data sets: x-values do not match" 804 raise ValueError , msg804 raise ValueError(msg) 805 805 806 806 # Check that the other data set has errors, otherwise … … 876 876 if not isinstance(other, Data1D): 877 877 msg = "Unable to perform operation: different types of data set" 878 raise ValueError , msg878 raise ValueError(msg) 879 879 return True 880 880 … … 948 948 949 949 if len(self.detector) > 0: 950 raise RuntimeError , "Data2D: Detector bank already filled at init"950 raise RuntimeError("Data2D: Detector bank already filled at init") 951 951 952 952 def __str__(self): … … 1020 1020 len(self.qy_data) != len(other.qy_data): 1021 1021 msg = "Unable to perform operation: data length are not equal" 1022 raise ValueError , msg1022 raise ValueError(msg) 1023 1023 for ind in range(len(self.data)): 1024 1024 if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE: 1025 1025 msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind]) 1026 raise ValueError , msg1026 raise ValueError(msg) 1027 1027 if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE: 1028 1028 msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind]) 1029 raise ValueError , msg1029 raise ValueError(msg) 1030 1030 1031 1031 # Check that the scales match … … 1108 1108 if not isinstance(other, Data2D): 1109 1109 msg = "Unable to perform operation: different types of data set" 1110 raise ValueError , msg1110 raise ValueError(msg) 1111 1111 return True 1112 1112 … … 1161 1161 final_dataset = None 1162 1162 if isinstance(data, plottable_1D): 1163 final_dataset = Data1D(data.x, data.y )1163 final_dataset = Data1D(data.x, data.y, isSesans=datainfo.isSesans) 1164 1164 final_dataset.dx = data.dx 1165 1165 final_dataset.dy = data.dy 1166 1166 final_dataset.dxl = data.dxl 1167 1167 final_dataset.dxw = data.dxw 1168 final_dataset.x_unit = data._xunit 1169 final_dataset.y_unit = data._yunit 1168 1170 final_dataset.xaxis(data._xaxis, data._xunit) 1169 1171 final_dataset.yaxis(data._yaxis, data._yunit) … … 1174 1176 final_dataset.yaxis(data._yaxis, data._yunit) 1175 1177 final_dataset.zaxis(data._zaxis, data._zunit) 1176 final_dataset.x_bins = data.x_bins 1177 final_dataset.y_bins = data.y_bins 1178 if len(data.data.shape) == 2: 1179 n_rows, n_cols = data.data.shape 1180 final_dataset.y_bins = data.qy_data[0::int(n_cols)] 1181 final_dataset.x_bins = data.qx_data[:int(n_cols)] 1178 1182 else: 1179 1183 return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \ … … 1181 1185 return return_string 1182 1186 1183 final_dataset.xmax = data.xmax 1184 final_dataset.ymax = data.ymax 1185 final_dataset.xmin = data.xmin 1186 final_dataset.ymin = data.ymin 1187 if hasattr(data, "xmax"): 1188 final_dataset.xmax = data.xmax 1189 if hasattr(data, "ymax"): 1190 final_dataset.ymax = data.ymax 1191 if hasattr(data, "xmin"): 1192 final_dataset.xmin = data.xmin 1193 if hasattr(data, "ymin"): 1194 final_dataset.ymin = data.ymin 1187 1195 final_dataset.isSesans = datainfo.isSesans 1188 1196 final_dataset.title = datainfo.title -
src/sas/sascalc/dataloader/loader.py
r463e7ffc rdc8d1c2 1 1 """ 2 2 File handler to support different file extensions. 3 Uses reflectomet ry'sregistry utility.3 Uses reflectometer registry utility. 4 4 5 5 The default readers are found in the 'readers' sub-module … … 14 14 """ 15 15 ##################################################################### 16 # This software was developed by the University of Tennessee as part of the17 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)18 # project funded by the US National Science Foundation.19 # See the license text in license.txt20 # copyright 2008, University of Tennessee16 # This software was developed by the University of Tennessee as part of the 17 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 18 # project funded by the US National Science Foundation. 19 # See the license text in license.txt 20 # copyright 2008, University of Tennessee 21 21 ###################################################################### 22 22 … … 26 26 import time 27 27 from zipfile import ZipFile 28 28 29 from sas.sascalc.data_util.registry import ExtensionRegistry 30 29 31 # Default readers are defined in the readers sub-module 30 import readers 31 from readers import ascii_reader 32 from readers import cansas_reader 32 from . import readers 33 from .loader_exceptions import NoKnownLoaderException, FileContentsException,\ 34 DefaultReaderException 35 from .readers import ascii_reader 36 from .readers import cansas_reader 37 from .readers import cansas_reader_HDF5 33 38 34 39 logger = logging.getLogger(__name__) 40 35 41 36 42 class Registry(ExtensionRegistry): … … 39 45 Readers and writers are supported. 40 46 """ 41 42 47 def __init__(self): 43 48 super(Registry, self).__init__() 44 49 45 # #Writers50 # Writers 46 51 self.writers = {} 47 52 48 # #List of wildcards53 # List of wildcards 49 54 self.wildcards = ['All (*.*)|*.*'] 50 55 51 # #Creation time, for testing56 # Creation time, for testing 52 57 self._created = time.time() 53 58 … … 63 68 of a particular reader 64 69 65 Defaults to the ascii (multi-column) reader 66 if no reader was registered for the file's 67 extension. 68 """ 70 Defaults to the ascii (multi-column), cansas XML, and cansas NeXuS 71 readers if no reader was registered for the file's extension. 72 """ 73 # Gets set to a string if the file has an associated reader that fails 74 msg_from_reader = None 69 75 try: 70 76 return super(Registry, self).load(path, format=format) 71 except: 72 try: 73 # No reader was found. Default to the ascii reader. 74 ascii_loader = ascii_reader.Reader() 75 return ascii_loader.read(path) 76 except: 77 cansas_loader = cansas_reader.Reader() 78 return cansas_loader.read(path) 77 #except Exception: raise # for debugging, don't use fallback loader 78 except NoKnownLoaderException as nkl_e: 79 pass # Try the ASCII reader 80 except FileContentsException as fc_exc: 81 # File has an associated reader but it failed. 82 # Save the error message to display later, but try the 3 default loaders 83 msg_from_reader = fc_exc.message 84 except Exception: 85 pass 86 87 # File has no associated reader, or the associated reader failed. 88 # Try the ASCII reader 89 try: 90 ascii_loader = ascii_reader.Reader() 91 return ascii_loader.read(path) 92 except DefaultReaderException: 93 pass # Loader specific error to try the cansas XML reader 94 except FileContentsException as e: 95 if msg_from_reader is None: 96 raise RuntimeError(e.message) 97 98 # ASCII reader failed - try CanSAS xML reader 99 try: 100 cansas_loader = cansas_reader.Reader() 101 return cansas_loader.read(path) 102 except DefaultReaderException: 103 pass # Loader specific error to try the NXcanSAS reader 104 except FileContentsException as e: 105 if msg_from_reader is None: 106 raise RuntimeError(e.message) 107 except Exception: 108 pass 109 110 # CanSAS XML reader failed - try NXcanSAS reader 111 try: 112 cansas_nexus_loader = cansas_reader_HDF5.Reader() 113 return cansas_nexus_loader.read(path) 114 except DefaultReaderException as e: 115 logging.error("No default loader can load the data") 116 # No known reader available. Give up and throw an error 117 if msg_from_reader is None: 118 msg = "\nUnknown data format: {}.\nThe file is not a ".format(path) 119 msg += "known format that can be loaded by SasView.\n" 120 raise NoKnownLoaderException(msg) 121 else: 122 # Associated reader and default readers all failed. 123 # Show error message from associated reader 124 raise RuntimeError(msg_from_reader) 125 except FileContentsException as e: 126 err_msg = msg_from_reader if msg_from_reader is not None else e.message 127 raise RuntimeError(err_msg) 79 128 80 129 def find_plugins(self, dir): … … 281 330 extlist = [ext for ext in self.extensions() if path.endswith(ext)] 282 331 # Sort matching extensions by decreasing order of length 283 extlist.sort( lambda a, b: len(a) < len(b))332 extlist.sort(key=len) 284 333 # Combine loaders for matching extensions into one big list 285 334 writers = [] … … 295 344 # Raise an error if there are no matching extensions 296 345 if len(writers) == 0: 297 raise ValueError , "Unknown file type for " + path346 raise ValueError("Unknown file type for " + path) 298 347 # All done 299 348 return writers … … 314 363 try: 315 364 return fn(path, data) 316 except :365 except Exception: 317 366 pass # give other loaders a chance to succeed 318 367 # If we get here it is because all loaders failed -
src/sas/sascalc/dataloader/manipulations.py
r324e0bf r574adc7 26 26 27 27 #from data_info import plottable_2D 28 from data_info import Data1D28 from .data_info import Data1D 29 29 30 30 -
src/sas/sascalc/dataloader/readers/__init__.py
r959eb01 raaa801e 1 # Backward compatibility with the previous implementation of the default readers 2 from associations import register_readers 3 4 # Method to associate extensions to default readers 5 from associations import read_associations 6 7 # Method to return the location of the XML settings file 8 def get_data_path(): 9 """ 10 Return the location of the settings file for the data readers. 11 """ 12 import os 13 return os.path.dirname(__file__) 1 # Method to associate extensions to default readers 2 from .associations import read_associations -
src/sas/sascalc/dataloader/readers/abs_reader.py
r959eb01 r1efbc190 1 1 """ 2 IGOR 1D data reader 2 3 """ 3 4 ##################################################################### 4 # This software was developed by the University of Tennessee as part of the5 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)6 # project funded by the US National Science Foundation.7 # See the license text in license.txt8 # copyright 2008, University of Tennessee5 # This software was developed by the University of Tennessee as part of the 6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 # project funded by the US National Science Foundation. 8 # See the license text in license.txt 9 # copyright 2008, University of Tennessee 9 10 ###################################################################### 10 11 12 import logging 13 11 14 import numpy as np 12 import os 13 from sas.sascalc.dataloader.data_info import Data1D 14 from sas.sascalc.dataloader.data_info import Detector 15 16 has_converter = True 17 try: 18 from sas.sascalc.data_util.nxsunit import Converter 19 except: 20 has_converter = False 21 22 23 class Reader: 15 16 from sas.sascalc.data_util.nxsunit import Converter 17 from ..file_reader_base_class import FileReader 18 from ..data_info import DataInfo, plottable_1D, Data1D, Detector 19 from ..loader_exceptions import FileContentsException, DefaultReaderException 20 21 logger = logging.getLogger(__name__) 22 23 24 class Reader(FileReader): 24 25 """ 25 26 Class to load IGOR reduced .ABS files 26 27 """ 27 # #File type28 # File type 28 29 type_name = "IGOR 1D" 29 # #Wildcards30 # Wildcards 30 31 type = ["IGOR 1D files (*.abs)|*.abs"] 31 ## List of allowed extensions 32 ext = ['.abs', '.ABS'] 33 34 def read(self, path): 35 """ 36 Load data file. 37 38 :param path: file path 39 40 :return: Data1D object, or None 41 32 # List of allowed extensions 33 ext = ['.abs'] 34 35 def get_file_contents(self): 36 """ 37 Get the contents of the file 38 42 39 :raise RuntimeError: when the file can't be opened 43 40 :raise ValueError: when the length of the data vectors are inconsistent 44 41 """ 45 if os.path.isfile(path): 46 basename = os.path.basename(path) 47 root, extension = os.path.splitext(basename) 48 if extension.lower() in self.ext: 49 try: 50 input_f = open(path,'r') 42 buff = self.readall() 43 filepath = self.f_open.name 44 lines = buff.splitlines() 45 self.output = [] 46 self.current_datainfo = DataInfo() 47 self.current_datainfo.filename = filepath 48 self.reset_data_list(len(lines)) 49 detector = Detector() 50 data_line = 0 51 self.reset_data_list(len(lines)) 52 self.current_datainfo.detector.append(detector) 53 self.current_datainfo.filename = filepath 54 55 is_info = False 56 is_center = False 57 is_data_started = False 58 59 base_q_unit = '1/A' 60 base_i_unit = '1/cm' 61 data_conv_q = Converter(base_q_unit) 62 data_conv_i = Converter(base_i_unit) 63 64 for line in lines: 65 # Information line 1 66 if is_info: 67 is_info = False 68 line_toks = line.split() 69 70 # Wavelength in Angstrom 71 try: 72 value = float(line_toks[1]) 73 if self.current_datainfo.source.wavelength_unit != 'A': 74 conv = Converter('A') 75 self.current_datainfo.source.wavelength = conv(value, 76 units=self.current_datainfo.source.wavelength_unit) 77 else: 78 self.current_datainfo.source.wavelength = value 79 except KeyError: 80 msg = "ABSReader cannot read wavelength from %s" % filepath 81 self.current_datainfo.errors.append(msg) 82 83 # Detector distance in meters 84 try: 85 value = float(line_toks[3]) 86 if detector.distance_unit != 'm': 87 conv = Converter('m') 88 detector.distance = conv(value, 89 units=detector.distance_unit) 90 else: 91 detector.distance = value 92 except Exception: 93 msg = "ABSReader cannot read SDD from %s" % filepath 94 self.current_datainfo.errors.append(msg) 95 96 # Transmission 97 try: 98 self.current_datainfo.sample.transmission = \ 99 float(line_toks[4]) 100 except ValueError: 101 # Transmission isn't always in the header 102 pass 103 104 # Sample thickness in mm 105 try: 106 # ABS writer adds 'C' with no space to the end of the 107 # thickness column. Remove it if it is there before 108 # converting the thickness. 109 if line_toks[5][-1] not in '012345679.': 110 value = float(line_toks[5][:-1]) 111 else: 112 value = float(line_toks[5]) 113 if self.current_datainfo.sample.thickness_unit != 'cm': 114 conv = Converter('cm') 115 self.current_datainfo.sample.thickness = conv(value, 116 units=self.current_datainfo.sample.thickness_unit) 117 else: 118 self.current_datainfo.sample.thickness = value 119 except ValueError: 120 # Thickness is not a mandatory entry 121 pass 122 123 # MON CNT LAMBDA DET ANG DET DIST TRANS THICK AVE STEP 124 if line.count("LAMBDA") > 0: 125 is_info = True 126 127 # Find center info line 128 if is_center: 129 is_center = False 130 line_toks = line.split() 131 # Center in bin number 132 center_x = float(line_toks[0]) 133 center_y = float(line_toks[1]) 134 135 # Bin size 136 if detector.pixel_size_unit != 'mm': 137 conv = Converter('mm') 138 detector.pixel_size.x = conv(5.08, 139 units=detector.pixel_size_unit) 140 detector.pixel_size.y = conv(5.08, 141 units=detector.pixel_size_unit) 142 else: 143 detector.pixel_size.x = 5.08 144 detector.pixel_size.y = 5.08 145 146 # Store beam center in distance units 147 # Det 640 x 640 mm 148 if detector.beam_center_unit != 'mm': 149 conv = Converter('mm') 150 detector.beam_center.x = conv(center_x * 5.08, 151 units=detector.beam_center_unit) 152 detector.beam_center.y = conv(center_y * 5.08, 153 units=detector.beam_center_unit) 154 else: 155 detector.beam_center.x = center_x * 5.08 156 detector.beam_center.y = center_y * 5.08 157 158 # Detector type 159 try: 160 detector.name = line_toks[7] 51 161 except: 52 raise RuntimeError, "abs_reader: cannot open %s" % path 53 buff = input_f.read() 54 lines = buff.split('\n') 55 x = np.zeros(0) 56 y = np.zeros(0) 57 dy = np.zeros(0) 58 dx = np.zeros(0) 59 output = Data1D(x, y, dy=dy, dx=dx) 60 detector = Detector() 61 output.detector.append(detector) 62 output.filename = basename 63 64 is_info = False 65 is_center = False 66 is_data_started = False 67 68 data_conv_q = None 69 data_conv_i = None 70 71 if has_converter == True and output.x_unit != '1/A': 72 data_conv_q = Converter('1/A') 73 # Test it 74 data_conv_q(1.0, output.x_unit) 75 76 if has_converter == True and output.y_unit != '1/cm': 77 data_conv_i = Converter('1/cm') 78 # Test it 79 data_conv_i(1.0, output.y_unit) 80 81 for line in lines: 82 83 # Information line 1 84 if is_info == True: 85 is_info = False 86 line_toks = line.split() 87 88 # Wavelength in Angstrom 89 try: 90 value = float(line_toks[1]) 91 if has_converter == True and \ 92 output.source.wavelength_unit != 'A': 93 conv = Converter('A') 94 output.source.wavelength = conv(value, 95 units=output.source.wavelength_unit) 96 else: 97 output.source.wavelength = value 98 except: 99 #goes to ASC reader 100 msg = "abs_reader: cannot open %s" % path 101 raise RuntimeError, msg 102 103 # Distance in meters 104 try: 105 value = float(line_toks[3]) 106 if has_converter == True and \ 107 detector.distance_unit != 'm': 108 conv = Converter('m') 109 detector.distance = conv(value, 110 units=detector.distance_unit) 111 else: 112 detector.distance = value 113 except: 114 #goes to ASC reader 115 msg = "abs_reader: cannot open %s" % path 116 raise RuntimeError, msg 117 # Transmission 118 try: 119 output.sample.transmission = float(line_toks[4]) 120 except: 121 # Transmission is not a mandatory entry 122 pass 123 124 # Thickness in mm 125 try: 126 value = float(line_toks[5]) 127 if has_converter == True and \ 128 output.sample.thickness_unit != 'cm': 129 conv = Converter('cm') 130 output.sample.thickness = conv(value, 131 units=output.sample.thickness_unit) 132 else: 133 output.sample.thickness = value 134 except: 135 # Thickness is not a mandatory entry 136 pass 137 138 #MON CNT LAMBDA DET ANG DET DIST TRANS THICK 139 # AVE STEP 140 if line.count("LAMBDA") > 0: 141 is_info = True 142 143 # Find center info line 144 if is_center == True: 145 is_center = False 146 line_toks = line.split() 147 # Center in bin number 148 center_x = float(line_toks[0]) 149 center_y = float(line_toks[1]) 150 151 # Bin size 152 if has_converter == True and \ 153 detector.pixel_size_unit != 'mm': 154 conv = Converter('mm') 155 detector.pixel_size.x = conv(5.0, 156 units=detector.pixel_size_unit) 157 detector.pixel_size.y = conv(5.0, 158 units=detector.pixel_size_unit) 159 else: 160 detector.pixel_size.x = 5.0 161 detector.pixel_size.y = 5.0 162 163 # Store beam center in distance units 164 # Det 640 x 640 mm 165 if has_converter == True and \ 166 detector.beam_center_unit != 'mm': 167 conv = Converter('mm') 168 detector.beam_center.x = conv(center_x * 5.0, 169 units=detector.beam_center_unit) 170 detector.beam_center.y = conv(center_y * 5.0, 171 units=detector.beam_center_unit) 172 else: 173 detector.beam_center.x = center_x * 5.0 174 detector.beam_center.y = center_y * 5.0 175 176 # Detector type 177 try: 178 detector.name = line_toks[7] 179 except: 180 # Detector name is not a mandatory entry 181 pass 182 183 #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L 184 # BSTOP(mm) DET_TYP 185 if line.count("BCENT") > 0: 186 is_center = True 187 188 # Parse the data 189 if is_data_started == True: 190 toks = line.split() 191 192 try: 193 _x = float(toks[0]) 194 _y = float(toks[1]) 195 _dy = float(toks[2]) 196 _dx = float(toks[3]) 197 198 if data_conv_q is not None: 199 _x = data_conv_q(_x, units=output.x_unit) 200 _dx = data_conv_i(_dx, units=output.x_unit) 201 202 if data_conv_i is not None: 203 _y = data_conv_i(_y, units=output.y_unit) 204 _dy = data_conv_i(_dy, units=output.y_unit) 205 206 x = np.append(x, _x) 207 y = np.append(y, _y) 208 dy = np.append(dy, _dy) 209 dx = np.append(dx, _dx) 210 211 except: 212 # Could not read this data line. If we are here 213 # it is because we are in the data section. Just 214 # skip it. 215 pass 216 217 #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 218 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 219 if line.count("The 6 columns") > 0: 220 is_data_started = True 221 222 # Sanity check 223 if not len(y) == len(dy): 224 msg = "abs_reader: y and dy have different length" 225 raise ValueError, msg 226 # If the data length is zero, consider this as 227 # though we were not able to read the file. 228 if len(x) == 0: 229 raise ValueError, "ascii_reader: could not load file" 230 231 output.x = x[x != 0] 232 output.y = y[x != 0] 233 output.dy = dy[x != 0] 234 output.dx = dx[x != 0] 235 if data_conv_q is not None: 236 output.xaxis("\\rm{Q}", output.x_unit) 237 else: 238 output.xaxis("\\rm{Q}", 'A^{-1}') 239 if data_conv_i is not None: 240 output.yaxis("\\rm{Intensity}", output.y_unit) 241 else: 242 output.yaxis("\\rm{Intensity}", "cm^{-1}") 243 244 # Store loading process information 245 output.meta_data['loader'] = self.type_name 246 return output 162 # Detector name is not a mandatory entry 163 pass 164 165 # BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L BSTOP(mm) DET_TYP 166 if line.count("BCENT") > 0: 167 is_center = True 168 169 # Parse the data 170 if is_data_started: 171 toks = line.split() 172 173 try: 174 _x = float(toks[0]) 175 _y = float(toks[1]) 176 _dy = float(toks[2]) 177 _dx = float(toks[3]) 178 179 if data_conv_q is not None: 180 _x = data_conv_q(_x, units=base_q_unit) 181 _dx = data_conv_q(_dx, units=base_q_unit) 182 183 if data_conv_i is not None: 184 _y = data_conv_i(_y, units=base_i_unit) 185 _dy = data_conv_i(_dy, units=base_i_unit) 186 187 self.current_dataset.x[data_line] = _x 188 self.current_dataset.y[data_line] = _y 189 self.current_dataset.dy[data_line] = _dy 190 self.current_dataset.dx[data_line] = _dx 191 data_line += 1 192 193 except ValueError: 194 # Could not read this data line. If we are here 195 # it is because we are in the data section. Just 196 # skip it. 197 pass 198 199 # The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 200 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 201 if line.count("The 6 columns") > 0: 202 is_data_started = True 203 204 self.remove_empty_q_values() 205 206 # Sanity check 207 if not len(self.current_dataset.y) == len(self.current_dataset.dy): 208 self.set_all_to_none() 209 msg = "abs_reader: y and dy have different length" 210 raise ValueError(msg) 211 # If the data length is zero, consider this as 212 # though we were not able to read the file. 213 if len(self.current_dataset.x) == 0: 214 self.set_all_to_none() 215 raise ValueError("ascii_reader: could not load file") 216 217 if data_conv_q is not None: 218 self.current_dataset.xaxis("\\rm{Q}", base_q_unit) 247 219 else: 248 raise RuntimeError, "%s is not a file" % path 249 return None 220 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 221 if data_conv_i is not None: 222 self.current_dataset.yaxis("\\rm{Intensity}", base_i_unit) 223 else: 224 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 225 226 # Store loading process information 227 self.current_datainfo.meta_data['loader'] = self.type_name 228 self.send_to_output() -
src/sas/sascalc/dataloader/readers/anton_paar_saxs_reader.py
ra235f715 ra5bd87a 9 9 10 10 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, Sample, Source11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, DataInfo, Sample, Source 12 12 from sas.sascalc.dataloader.data_info import Process, Aperture, Collimation, TransmissionSpectrum, Detector 13 13 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 14 14 15 15 class Reader(XMLreader): 16 16 """ 17 A class for reading in CanSAS v2.0 data files. The existing iteration opens Mantid generated HDF5 formatted files 18 with file extension .h5/.H5. Any number of data sets may be present within the file and any dimensionality of data 19 may be used. Currently 1D and 2D SAS data sets are supported, but future implementations will include 1D and 2D 20 SESANS data. This class assumes a single data set for each sasentry. 21 22 :Dependencies: 23 The CanSAS HDF5 reader requires h5py v2.5.0 or later. 17 A class for reading in Anton Paar .pdh files 24 18 """ 25 19 … … 30 24 ## Raw file contents to be processed 31 25 raw_data = None 32 ## Data set being modified33 current_dataset = None34 26 ## For recursion and saving purposes, remember parent objects 35 27 parent_list = None … … 42 34 ## Flag to bypass extension check 43 35 allow_all = False 44 ## List of files to return45 output = None46 36 47 37 def reset_state(self): 48 self.current_dataset = Data1D(np.empty(0), np.empty(0),49 np.empty(0), np.empty(0))38 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), np.empty(0), np.empty(0)) 39 self.current_datainfo = DataInfo() 50 40 self.datasets = [] 51 41 self.raw_data = None … … 63 53 self.lower = 5 64 54 65 def read(self, filename):55 def get_file_contents(self): 66 56 """ 67 57 This is the general read method that all SasView data_loaders must have. … … 73 63 ## Reinitialize the class when loading a new data file to reset all class variables 74 64 self.reset_state() 75 ## Check that the file exists 76 if os.path.isfile(filename): 77 basename = os.path.basename(filename) 78 _, extension = os.path.splitext(basename) 79 # If the file type is not allowed, return empty list 80 if extension in self.ext or self.allow_all: 81 ## Load the data file 82 input_f = open(filename, 'r') 83 buff = input_f.read() 84 self.raw_data = buff.splitlines() 85 self.read_data() 86 return self.output 65 buff = self.readall() 66 self.raw_data = buff.splitlines() 67 self.read_data() 87 68 88 69 def read_data(self): 70 correctly_loaded = True 71 error_message = "" 72 89 73 q_unit = "1/nm" 90 74 i_unit = "1/um^2" 91 self.current_dataset.title = self.raw_data[0] 92 self.current_dataset.meta_data["Keywords"] = self.raw_data[1] 93 line3 = self.raw_data[2].split() 94 line4 = self.raw_data[3].split() 95 line5 = self.raw_data[4].split() 96 self.data_points = int(line3[0]) 97 self.lower = 5 98 self.upper = self.lower + self.data_points 99 self.source.radiation = 'x-ray' 100 normal = float(line4[3]) 101 self.current_dataset.source.radiation = "x-ray" 102 self.current_dataset.source.name = "Anton Paar SAXSess Instrument" 103 self.current_dataset.source.wavelength = float(line4[4]) 104 xvals = [] 105 yvals = [] 106 dyvals = [] 107 for i in range(self.lower, self.upper): 108 index = i - self.lower 109 data = self.raw_data[i].split() 110 xvals.insert(index, normal * float(data[0])) 111 yvals.insert(index, normal * float(data[1])) 112 dyvals.insert(index, normal * float(data[2])) 75 try: 76 self.current_datainfo.title = self.raw_data[0] 77 self.current_datainfo.meta_data["Keywords"] = self.raw_data[1] 78 line3 = self.raw_data[2].split() 79 line4 = self.raw_data[3].split() 80 line5 = self.raw_data[4].split() 81 self.data_points = int(line3[0]) 82 self.lower = 5 83 self.upper = self.lower + self.data_points 84 self.source.radiation = 'x-ray' 85 normal = float(line4[3]) 86 self.current_datainfo.source.radiation = "x-ray" 87 self.current_datainfo.source.name = "Anton Paar SAXSess Instrument" 88 self.current_datainfo.source.wavelength = float(line4[4]) 89 xvals = [] 90 yvals = [] 91 dyvals = [] 92 for i in range(self.lower, self.upper): 93 index = i - self.lower 94 data = self.raw_data[i].split() 95 xvals.insert(index, normal * float(data[0])) 96 yvals.insert(index, normal * float(data[1])) 97 dyvals.insert(index, normal * float(data[2])) 98 except Exception as e: 99 error_message = "Couldn't load {}.\n".format(self.f_open.name) 100 error_message += e.message 101 raise FileContentsException(error_message) 113 102 self.current_dataset.x = np.append(self.current_dataset.x, xvals) 114 103 self.current_dataset.y = np.append(self.current_dataset.y, yvals) 115 104 self.current_dataset.dy = np.append(self.current_dataset.dy, dyvals) 116 105 if self.data_points != self.current_dataset.x.size: 117 self.errors.add("Not all data was loaded properly.") 118 if self.current_dataset.dx.size != self.current_dataset.x.size: 119 dxvals = np.zeros(self.current_dataset.x.size) 120 self.current_dataset.dx = dxvals 106 error_message += "Not all data points could be loaded.\n" 107 correctly_loaded = False 121 108 if self.current_dataset.x.size != self.current_dataset.y.size: 122 self.errors.add("The x and y data sets are not the same size.") 109 error_message += "The x and y data sets are not the same size.\n" 110 correctly_loaded = False 123 111 if self.current_dataset.y.size != self.current_dataset.dy.size: 124 self.errors.add("The y and dy datasets are not the same size.") 125 self.current_dataset.errors = self.errors 112 error_message += "The y and dy datasets are not the same size.\n" 113 correctly_loaded = False 114 126 115 self.current_dataset.xaxis("Q", q_unit) 127 116 self.current_dataset.yaxis("Intensity", i_unit) 128 117 xml_intermediate = self.raw_data[self.upper:] 129 118 xml = ''.join(xml_intermediate) 130 self.set_xml_string(xml) 131 dom = self.xmlroot.xpath('/fileinfo') 132 self._parse_child(dom) 133 self.output.append(self.current_dataset) 119 try: 120 self.set_xml_string(xml) 121 dom = self.xmlroot.xpath('/fileinfo') 122 self._parse_child(dom) 123 except Exception as e: 124 # Data loaded but XML metadata has an error 125 error_message += "Data points have been loaded but there was an " 126 error_message += "error reading XML metadata: " + e.message 127 correctly_loaded = False 128 self.send_to_output() 129 if not correctly_loaded: 130 raise DataReaderException(error_message) 134 131 135 132 def _parse_child(self, dom, parent=''): … … 146 143 self._parse_child(node, key) 147 144 if key == "SampleDetector": 148 self.current_data set.detector.append(self.detector)145 self.current_datainfo.detector.append(self.detector) 149 146 self.detector = Detector() 150 147 else: 151 148 if key == "value": 152 149 if parent == "Wavelength": 153 self.current_data set.source.wavelength = value150 self.current_datainfo.source.wavelength = value 154 151 elif parent == "SampleDetector": 155 152 self.detector.distance = value 156 153 elif parent == "Temperature": 157 self.current_data set.sample.temperature = value154 self.current_datainfo.sample.temperature = value 158 155 elif parent == "CounterSlitLength": 159 156 self.detector.slit_length = value … … 161 158 value = value.replace("_", "") 162 159 if parent == "Wavelength": 163 self.current_data set.source.wavelength_unit = value160 self.current_datainfo.source.wavelength_unit = value 164 161 elif parent == "SampleDetector": 165 162 self.detector.distance_unit = value … … 169 166 self.current_dataset.yaxis(self.current_dataset._yaxis, value) 170 167 elif parent == "Temperature": 171 self.current_data set.sample.temperature_unit = value168 self.current_datainfo.sample.temperature_unit = value 172 169 elif parent == "CounterSlitLength": 173 170 self.detector.slit_length_unit = value -
src/sas/sascalc/dataloader/readers/ascii_reader.py
r235f514 r9e6aeaf 1 1 """ 2 ASCIIreader2 Generic multi-column ASCII data reader 3 3 """ 4 4 ############################################################################ 5 # This software was developed by the University of Tennessee as part of the6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 # project funded by the US National Science Foundation.8 # If you use DANSE applications to do scientific research that leads to9 # publication, we ask that you acknowledge the use of the software with the10 # following sentence:11 # This work benefited from DANSE software developed under NSF award DMR-0520547.12 # copyright 2008, University of Tennessee5 # This software was developed by the University of Tennessee as part of the 6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 # project funded by the US National Science Foundation. 8 # If you use DANSE applications to do scientific research that leads to 9 # publication, we ask that you acknowledge the use of the software with the 10 # following sentence: 11 # This work benefited from DANSE software developed under NSF award DMR-0520547. 12 # copyright 2008, University of Tennessee 13 13 ############################################################################# 14 14 15 import logging 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D 18 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 19 DefaultReaderException 15 20 16 import numpy as np 17 import os 18 from sas.sascalc.dataloader.data_info import Data1D 19 20 # Check whether we have a converter available 21 has_converter = True 22 try: 23 from sas.sascalc.data_util.nxsunit import Converter 24 except: 25 has_converter = False 26 _ZERO = 1e-16 21 logger = logging.getLogger(__name__) 27 22 28 23 29 class Reader :24 class Reader(FileReader): 30 25 """ 31 26 Class to load ascii files (2, 3 or 4 columns). 32 27 """ 33 # #File type28 # File type 34 29 type_name = "ASCII" 35 36 ## Wildcards 30 # Wildcards 37 31 type = ["ASCII files (*.txt)|*.txt", 38 32 "ASCII files (*.dat)|*.dat", 39 33 "ASCII files (*.abs)|*.abs", 40 34 "CSV files (*.csv)|*.csv"] 41 ## List of allowed extensions 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 35 # List of allowed extensions 36 ext = ['.txt', '.dat', '.abs', '.csv'] 37 # Flag to bypass extension check 38 allow_all = True 39 # data unless that is the only data 40 min_data_pts = 5 43 41 44 ## Flag to bypass extension check 45 allow_all = True 42 def get_file_contents(self): 43 """ 44 Get the contents of the file 45 """ 46 46 47 def read(self, path): 48 """ 49 Load data file 47 buff = self.readall() 48 filepath = self.f_open.name 49 lines = buff.splitlines() 50 self.output = [] 51 self.current_datainfo = DataInfo() 52 self.current_datainfo.filename = filepath 53 self.reset_data_list(len(lines)) 50 54 51 :param path: file path 52 :return: Data1D object, or None 55 # The first good line of data will define whether 56 # we have 2-column or 3-column ascii 57 has_error_dx = None 58 has_error_dy = None 53 59 54 :raise RuntimeError: when the file can't be opened 55 :raise ValueError: when the length of the data vectors are inconsistent 56 """ 57 if os.path.isfile(path): 58 basename = os.path.basename(path) 59 _, extension = os.path.splitext(basename) 60 if self.allow_all or extension.lower() in self.ext: 61 try: 62 # Read in binary mode since GRASP frequently has no-ascii 63 # characters that breaks the open operation 64 input_f = open(path,'rb') 65 except: 66 raise RuntimeError, "ascii_reader: cannot open %s" % path 67 buff = input_f.read() 68 lines = buff.splitlines() 60 # Initialize counters for data lines and header lines. 61 is_data = False 62 # More than "5" lines of data is considered as actual 63 # To count # of current data candidate lines 64 candidate_lines = 0 65 # To count total # of previous data candidate lines 66 candidate_lines_previous = 0 67 # Current line number 68 line_no = 0 69 # minimum required number of columns of data 70 lentoks = 2 71 for line in lines: 72 toks = self.splitline(line.strip()) 73 # To remember the number of columns in the current line of data 74 new_lentoks = len(toks) 75 try: 76 if new_lentoks == 0: 77 # If the line is blank, skip and continue on 78 # In case of breaks within data sets. 79 continue 80 elif new_lentoks != lentoks and is_data: 81 # If a footer is found, break the loop and save the data 82 break 83 elif new_lentoks != lentoks and not is_data: 84 # If header lines are numerical 85 candidate_lines = 0 86 self.reset_data_list(len(lines) - line_no) 69 87 70 # Arrays for data storage 71 tx = np.zeros(0) 72 ty = np.zeros(0) 73 tdy = np.zeros(0) 74 tdx = np.zeros(0) 88 self.current_dataset.x[candidate_lines] = float(toks[0]) 75 89 76 # The first good line of data will define whether 77 # we have 2-column or 3-column ascii 90 if new_lentoks > 1: 91 self.current_dataset.y[candidate_lines] = float(toks[1]) 92 93 # If a 3rd row is present, consider it dy 94 if new_lentoks > 2: 95 self.current_dataset.dy[candidate_lines] = \ 96 float(toks[2]) 97 has_error_dy = True 98 99 # If a 4th row is present, consider it dx 100 if new_lentoks > 3: 101 self.current_dataset.dx[candidate_lines] = \ 102 float(toks[3]) 103 has_error_dx = True 104 105 candidate_lines += 1 106 # If 5 or more lines, this is considering the set data 107 if candidate_lines >= self.min_data_pts: 108 is_data = True 109 110 if is_data and new_lentoks >= 8: 111 msg = "This data looks like 2D ASCII data. Use the file " 112 msg += "converter tool to convert it to NXcanSAS." 113 raise FileContentsException(msg) 114 115 # To remember the # of columns on the current line 116 # for the next line of data 117 lentoks = new_lentoks 118 line_no += 1 119 except ValueError: 120 # ValueError is raised when non numeric strings conv. to float 121 # It is data and meet non - number, then stop reading 122 if is_data: 123 break 124 # Delete the previously stored lines of data candidates if 125 # the list is not data 126 self.reset_data_list(len(lines) - line_no) 127 lentoks = 2 78 128 has_error_dx = None 79 129 has_error_dy = None 130 # Reset # of lines of data candidates 131 candidate_lines = 0 80 132 81 #Initialize counters for data lines and header lines. 82 is_data = False 83 # More than "5" lines of data is considered as actual 84 # data unless that is the only data 85 min_data_pts = 5 86 # To count # of current data candidate lines 87 candidate_lines = 0 88 # To count total # of previous data candidate lines 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 91 lentoks = 2 92 for line in lines: 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 96 try: 97 if new_lentoks == 1 and not is_data: 98 ## If only one item in list, no longer data 99 raise ValueError 100 elif new_lentoks == 0: 101 ## If the line is blank, skip and continue on 102 ## In case of breaks within data sets. 103 continue 104 elif new_lentoks != lentoks and is_data: 105 ## If a footer is found, break the loop and save the data 106 break 107 elif new_lentoks != lentoks and not is_data: 108 ## If header lines are numerical 109 candidate_lines = 0 110 candidate_lines_previous = 0 133 if not is_data: 134 self.set_all_to_none() 135 if self.extension in self.ext: 136 msg = "ASCII Reader error: Fewer than five Q data points found " 137 msg += "in {}.".format(filepath) 138 raise FileContentsException(msg) 139 else: 140 msg = "ASCII Reader could not load the file {}".format(filepath) 141 raise DefaultReaderException(msg) 142 # Sanity check 143 if has_error_dy and not len(self.current_dataset.y) == \ 144 len(self.current_dataset.dy): 145 msg = "ASCII Reader error: Number of I and dI data points are" 146 msg += " different in {}.".format(filepath) 147 # TODO: Add error to self.current_datainfo.errors instead? 148 self.set_all_to_none() 149 raise FileContentsException(msg) 150 if has_error_dx and not len(self.current_dataset.x) == \ 151 len(self.current_dataset.dx): 152 msg = "ASCII Reader error: Number of Q and dQ data points are" 153 msg += " different in {}.".format(filepath) 154 # TODO: Add error to self.current_datainfo.errors instead? 155 self.set_all_to_none() 156 raise FileContentsException(msg) 111 157 112 #Make sure that all columns are numbers. 113 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError 115 float(toks[colnum]) 158 self.remove_empty_q_values() 159 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 160 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 116 161 117 candidate_lines += 1 118 _x = float(toks[0]) 119 _y = float(toks[1]) 120 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 125 is_data = True 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy is None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx is None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 140 is_data == False: 141 try: 142 tx = np.zeros(0) 143 ty = np.zeros(0) 144 tdy = np.zeros(0) 145 tdx = np.zeros(0) 146 except: 147 pass 148 149 if has_error_dy == True: 150 tdy = np.append(tdy, _dy) 151 if has_error_dx == True: 152 tdx = np.append(tdx, _dx) 153 tx = np.append(tx, _x) 154 ty = np.append(ty, _y) 155 156 #To remember the # of columns on the current line 157 # for the next line of data 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 161 # It is data and meet non - number, then stop reading 162 if is_data == True: 163 break 164 lentoks = 2 165 has_error_dx = None 166 has_error_dy = None 167 #Reset # of lines of data candidates 168 candidate_lines = 0 169 except: 170 pass 171 172 input_f.close() 173 if not is_data: 174 msg = "ascii_reader: x has no data" 175 raise RuntimeError, msg 176 # Sanity check 177 if has_error_dy == True and not len(ty) == len(tdy): 178 msg = "ascii_reader: y and dy have different length" 179 raise RuntimeError, msg 180 if has_error_dx == True and not len(tx) == len(tdx): 181 msg = "ascii_reader: y and dy have different length" 182 raise RuntimeError, msg 183 # If the data length is zero, consider this as 184 # though we were not able to read the file. 185 if len(tx) == 0: 186 raise RuntimeError, "ascii_reader: could not load file" 187 188 #Let's re-order the data to make cal. 189 # curve look better some cases 190 ind = np.lexsort((ty, tx)) 191 x = np.zeros(len(tx)) 192 y = np.zeros(len(ty)) 193 dy = np.zeros(len(tdy)) 194 dx = np.zeros(len(tdx)) 195 output = Data1D(x, y, dy=dy, dx=dx) 196 self.filename = output.filename = basename 197 198 for i in ind: 199 x[i] = tx[ind[i]] 200 y[i] = ty[ind[i]] 201 if has_error_dy == True: 202 dy[i] = tdy[ind[i]] 203 if has_error_dx == True: 204 dx[i] = tdx[ind[i]] 205 # Zeros in dx, dy 206 if has_error_dx: 207 dx[dx == 0] = _ZERO 208 if has_error_dy: 209 dy[dy == 0] = _ZERO 210 #Data 211 output.x = x[x != 0] 212 output.y = y[x != 0] 213 output.dy = dy[x != 0] if has_error_dy == True\ 214 else np.zeros(len(output.y)) 215 output.dx = dx[x != 0] if has_error_dx == True\ 216 else np.zeros(len(output.x)) 217 218 output.xaxis("\\rm{Q}", 'A^{-1}') 219 output.yaxis("\\rm{Intensity}", "cm^{-1}") 220 221 # Store loading process information 222 output.meta_data['loader'] = self.type_name 223 if len(output.x) < 1: 224 raise RuntimeError, "%s is empty" % path 225 return output 226 227 else: 228 raise RuntimeError, "%s is not a file" % path 229 return None 230 231 def splitline(self, line): 232 """ 233 Splits a line into pieces based on common delimeters 234 :param line: A single line of text 235 :return: list of values 236 """ 237 # Initial try for CSV (split on ,) 238 toks = line.split(',') 239 # Now try SCSV (split on ;) 240 if len(toks) < 2: 241 toks = line.split(';') 242 # Now go for whitespace 243 if len(toks) < 2: 244 toks = line.split() 245 return toks 162 # Store loading process information 163 self.current_datainfo.meta_data['loader'] = self.type_name 164 self.send_to_output() -
src/sas/sascalc/dataloader/readers/associations.py
ra1b8fee r574adc7 14 14 #copyright 2009, University of Tennessee 15 15 ############################################################################# 16 from __future__ import print_function17 18 import os19 16 import sys 20 17 import logging 21 import json22 18 23 19 logger = logging.getLogger(__name__) 24 20 25 FILE_NAME = 'defaults.json' 21 FILE_ASSOCIATIONS = { 22 ".xml": "cansas_reader", 23 ".ses": "sesans_reader", 24 ".h5": "cansas_reader_HDF5", 25 ".txt": "ascii_reader", 26 ".dat": "red2d_reader", 27 ".abs": "abs_reader", 28 ".sans": "danse_reader", 29 ".pdh": "anton_paar_saxs_reader" 30 } 26 31 27 def read_associations(loader, settings=FILE_NAME): 32 33 def read_associations(loader, settings=FILE_ASSOCIATIONS): 28 34 """ 29 35 Read the specified settings file to associate 30 36 default readers to file extension. 31 37 32 38 :param loader: Loader object 33 39 :param settings: path to the json settings file [string] 34 40 """ 35 reader_dir = os.path.dirname(__file__) 36 path = os.path.join(reader_dir, settings) 37 38 # If we can't find the file in the installation 39 # directory, look into the execution directory. 40 if not os.path.isfile(path): 41 path = os.path.join(os.getcwd(), settings) 42 if not os.path.isfile(path): 43 path = os.path.join(sys.path[0], settings) 44 if not os.path.isfile(path): 45 path = settings 46 if not os.path.isfile(path): 47 path = "./%s" % settings 48 if os.path.isfile(path): 49 with open(path) as fh: 50 json_tree = json.load(fh) 51 52 # Read in the file extension associations 53 entry_list = json_tree['SasLoader']['FileType'] 54 55 # For each FileType entry, get the associated reader and extension 56 for entry in entry_list: 57 reader = entry['-reader'] 58 ext = entry['-extension'] 59 60 if reader is not None and ext is not None: 61 # Associate the extension with a particular reader 62 # TODO: Modify the Register code to be case-insensitive 63 # and remove the extra line below. 64 try: 65 exec "import %s" % reader 66 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 67 reader) 68 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 69 reader) 70 except: 71 msg = "read_associations: skipping association" 72 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 73 logger.error(msg) 74 else: 75 print("Could not find reader association settings\n %s [%s]" % (__file__, os.getcwd())) 76 77 78 def register_readers(registry_function): 79 """ 80 Function called by the registry/loader object to register 81 all default readers using a call back function. 82 83 :WARNING: this method is now obsolete 84 85 :param registry_function: function to be called to register each reader 86 """ 87 logger.info("register_readers is now obsolete: use read_associations()") 88 import abs_reader 89 import ascii_reader 90 import cansas_reader 91 import danse_reader 92 import hfir1d_reader 93 import IgorReader 94 import red2d_reader 95 #import tiff_reader 96 import nexus_reader 97 import sesans_reader 98 import cansas_reader_HDF5 99 import anton_paar_saxs_reader 100 registry_function(sesans_reader) 101 registry_function(abs_reader) 102 registry_function(ascii_reader) 103 registry_function(cansas_reader) 104 registry_function(danse_reader) 105 registry_function(hfir1d_reader) 106 registry_function(IgorReader) 107 registry_function(red2d_reader) 108 #registry_function(tiff_reader) 109 registry_function(nexus_reader) 110 registry_function(cansas_reader_HDF5) 111 registry_function(anton_paar_saxs_reader) 112 return True 41 # For each FileType entry, get the associated reader and extension 42 for ext, reader in settings.items(): 43 if reader is not None and ext is not None: 44 # Associate the extension with a particular reader 45 # TODO: Modify the Register code to be case-insensitive 46 # FIXME: Remove exec statements 47 # and remove the extra line below. 48 try: 49 exec("from . import %s" % reader) 50 exec("loader.associate_file_type('%s', %s)" 51 % (ext.lower(), reader)) 52 exec("loader.associate_file_type('%s', %s)" 53 % (ext.upper(), reader)) 54 except: 55 msg = "read_associations: skipping association" 56 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 57 logger.error(msg) -
src/sas/sascalc/dataloader/readers/cansas_reader.py
r7432acb r2469df7 1 """2 CanSAS data reader - new recursive cansas_version.3 """4 ############################################################################5 #This software was developed by the University of Tennessee as part of the6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 #project funded by the US National Science Foundation.8 #If you use DANSE applications to do scientific research that leads to9 #publication, we ask that you acknowledge the use of the software with the10 #following sentence:11 #This work benefited from DANSE software developed under NSF award DMR-0520547.12 #copyright 2008,2009 University of Tennessee13 #############################################################################14 15 1 import logging 16 import numpy as np17 2 import os 18 3 import sys 19 4 import datetime 20 5 import inspect 21 # For saving individual sections of data 22 from sas.sascalc.dataloader.data_info import Data1D, Data2D, DataInfo, \ 23 plottable_1D, plottable_2D 24 from sas.sascalc.dataloader.data_info import Collimation, TransmissionSpectrum, \ 25 Detector, Process, Aperture 26 from sas.sascalc.dataloader.data_info import \ 27 combine_data_info_with_plottable as combine_data 28 import sas.sascalc.dataloader.readers.xml_reader as xml_reader 29 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 30 from sas.sascalc.dataloader.readers.cansas_constants import CansasConstants, CurrentLevel 6 7 import numpy as np 31 8 32 9 # The following 2 imports *ARE* used. Do not remove either. 33 10 import xml.dom.minidom 34 11 from xml.dom.minidom import parseString 12 13 from lxml import etree 14 15 from sas.sascalc.data_util.nxsunit import Converter 16 17 # For saving individual sections of data 18 from ..data_info import Data1D, Data2D, DataInfo, plottable_1D, plottable_2D, \ 19 Collimation, TransmissionSpectrum, Detector, Process, Aperture, \ 20 combine_data_info_with_plottable as combine_data 21 from ..loader_exceptions import FileContentsException, DefaultReaderException, \ 22 DataReaderException 23 from . import xml_reader 24 from .xml_reader import XMLreader 25 from .cansas_constants import CansasConstants, CurrentLevel 35 26 36 27 logger = logging.getLogger(__name__) … … 43 34 INVALID_XML = "\n\nThe loaded xml file, {0} does not fully meet the CanSAS v1.x specification. SasView loaded " + \ 44 35 "as much of the data as possible.\n\n" 45 HAS_CONVERTER = True46 try:47 from sas.sascalc.data_util.nxsunit import Converter48 except ImportError:49 HAS_CONVERTER = False50 36 51 37 CONSTANTS = CansasConstants() … … 55 41 56 42 class Reader(XMLreader): 57 """58 Class to load cansas 1D XML files59 60 :Dependencies:61 The CanSAS reader requires PyXML 0.8.4 or later.62 """63 # CanSAS version - defaults to version 1.064 43 cansas_version = "1.0" 65 44 base_ns = "{cansas1d/1.0}" … … 75 54 ns_list = None 76 55 # Temporary storage location for loading multiple data sets in a single file 77 current_datainfo = None78 current_dataset = None79 56 current_data1d = None 80 57 data = None 81 # List of data1D objects to be sent back to SasView82 output = None83 58 # Wildcards 84 59 type = ["XML files (*.xml)|*.xml", "SasView Save Files (*.svs)|*.svs"] … … 93 68 data files do not appear a second time 94 69 """ 95 self.current_datainfo = None 96 self.current_dataset = None 97 self.current_data1d = None 70 super(Reader, self).reset_state() 98 71 self.data = [] 99 72 self.process = Process() … … 104 77 self.names = [] 105 78 self.cansas_defaults = {} 106 self.output = []107 79 self.ns_list = None 108 80 self.logging = [] … … 110 82 111 83 def read(self, xml_file, schema_path="", invalid=True): 112 """ 113 Validate and read in an xml_file file in the canSAS format. 114 115 :param xml_file: A canSAS file path in proper XML format 116 :param schema_path: A file path to an XML schema to validate the xml_file against 117 """ 118 # For every file loaded, reset everything to a base state 84 if schema_path != "" or not invalid: 85 # read has been called from self.get_file_contents because xml file doens't conform to schema 86 _, self.extension = os.path.splitext(os.path.basename(xml_file)) 87 return self.get_file_contents(xml_file=xml_file, schema_path=schema_path, invalid=invalid) 88 89 # Otherwise, read has been called by the data loader - file_reader_base_class handles this 90 return super(XMLreader, self).read(xml_file) 91 92 def get_file_contents(self, xml_file=None, schema_path="", invalid=True): 93 # Reset everything since we're loading a new file 119 94 self.reset_state() 120 95 self.invalid = invalid 121 # Check that the file exists 122 if os.path.isfile(xml_file): 123 basename, extension = os.path.splitext(os.path.basename(xml_file)) 124 # If the file type is not allowed, return nothing 125 if extension in self.ext or self.allow_all: 126 # Get the file location of 127 self.load_file_and_schema(xml_file, schema_path) 128 self.add_data_set() 129 # Try to load the file, but raise an error if unable to. 130 # Check the file matches the XML schema 96 if xml_file is None: 97 xml_file = self.f_open.name 98 # We don't sure f_open since lxml handles opnening/closing files 99 try: 100 # Raises FileContentsException 101 self.load_file_and_schema(xml_file, schema_path) 102 # Parse each SASentry 103 entry_list = self.xmlroot.xpath('/ns:SASroot/ns:SASentry', 104 namespaces={ 105 'ns': self.cansas_defaults.get( 106 "ns") 107 }) 108 self.is_cansas(self.extension) 109 self.set_processing_instructions() 110 for entry in entry_list: 111 self._parse_entry(entry) 112 self.data_cleanup() 113 except FileContentsException as fc_exc: 114 # File doesn't meet schema - try loading with a less strict schema 115 base_name = xml_reader.__file__ 116 base_name = base_name.replace("\\", "/") 117 base = base_name.split("/sas/")[0] 118 if self.cansas_version == "1.1": 119 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 120 else: 121 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 122 self.set_schema(invalid_schema) 123 if self.invalid: 131 124 try: 132 self.is_cansas(extension) 133 self.invalid = False 134 # Get each SASentry from XML file and add it to a list. 135 entry_list = self.xmlroot.xpath( 136 '/ns:SASroot/ns:SASentry', 137 namespaces={'ns': self.cansas_defaults.get("ns")}) 138 self.names.append("SASentry") 139 140 # Get all preprocessing events and encoding 141 self.set_processing_instructions() 142 143 # Parse each <SASentry> item 144 for entry in entry_list: 145 # Create a new DataInfo object for every <SASentry> 146 147 # Set the file name and then parse the entry. 148 self.current_datainfo.filename = basename + extension 149 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 150 self.current_datainfo.meta_data[PREPROCESS] = \ 151 self.processing_instructions 152 153 # Parse the XML SASentry 154 self._parse_entry(entry) 155 # Combine datasets with datainfo 156 self.add_data_set() 157 except RuntimeError: 158 # If the file does not match the schema, raise this error 125 # Load data with less strict schema 126 self.read(xml_file, invalid_schema, False) 127 128 # File can still be read but doesn't match schema, so raise exception 129 self.load_file_and_schema(xml_file) # Reload strict schema so we can find where error are in file 159 130 invalid_xml = self.find_invalid_xml() 160 invalid_xml = INVALID_XML.format(basename + extension) + invalid_xml 161 self.errors.add(invalid_xml) 162 # Try again with an invalid CanSAS schema, that requires only a data set in each 163 base_name = xml_reader.__file__ 164 base_name = base_name.replace("\\", "/") 165 base = base_name.split("/sas/")[0] 166 if self.cansas_version == "1.1": 167 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 168 else: 169 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 170 self.set_schema(invalid_schema) 171 try: 172 if self.invalid: 173 if self.is_cansas(): 174 self.output = self.read(xml_file, invalid_schema, False) 175 else: 176 raise RuntimeError 177 else: 178 raise RuntimeError 179 except RuntimeError: 180 x = np.zeros(1) 181 y = np.zeros(1) 182 self.current_data1d = Data1D(x,y) 183 self.current_data1d.errors = self.errors 184 return [self.current_data1d] 185 else: 186 self.output.append("Not a valid file path.") 187 # Return a list of parsed entries that dataloader can manage 188 return self.output 131 if invalid_xml != "": 132 basename, _ = os.path.splitext( 133 os.path.basename(self.f_open.name)) 134 invalid_xml = INVALID_XML.format(basename + self.extension) + invalid_xml 135 raise DataReaderException(invalid_xml) # Handled by base class 136 except FileContentsException as fc_exc: 137 msg = "CanSAS Reader could not load the file {}".format(xml_file) 138 if fc_exc.message is not None: # Propagate error messages from earlier 139 msg = fc_exc.message 140 if not self.extension in self.ext: # If the file has no associated loader 141 raise DefaultReaderException(msg) 142 raise FileContentsException(msg) 143 pass 144 else: 145 raise fc_exc 146 except Exception as e: # Convert all other exceptions to FileContentsExceptions 147 raise FileContentsException(str(e)) 148 finally: 149 if not self.f_open.closed: 150 self.f_open.close() 151 152 def load_file_and_schema(self, xml_file, schema_path=""): 153 base_name = xml_reader.__file__ 154 base_name = base_name.replace("\\", "/") 155 base = base_name.split("/sas/")[0] 156 157 # Try and parse the XML file 158 try: 159 self.set_xml_file(xml_file) 160 except etree.XMLSyntaxError: # File isn't valid XML so can't be loaded 161 msg = "SasView cannot load {}.\nInvalid XML syntax".format(xml_file) 162 raise FileContentsException(msg) 163 164 self.cansas_version = self.xmlroot.get("version", "1.0") 165 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 166 167 if schema_path == "": 168 schema_path = "{}/sas/sascalc/dataloader/readers/schema/{}".format( 169 base, self.cansas_defaults.get("schema").replace("\\", "/") 170 ) 171 self.set_schema(schema_path) 172 173 def is_cansas(self, ext="xml"): 174 """ 175 Checks to see if the XML file is a CanSAS file 176 177 :param ext: The file extension of the data file 178 :raises FileContentsException: Raised if XML file isn't valid CanSAS 179 """ 180 if self.validate_xml(): # Check file is valid XML 181 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 182 value = self.xmlroot.get(name) 183 # Check schema CanSAS version matches file CanSAS version 184 if CANSAS_NS.get(self.cansas_version).get("ns") == value.rsplit(" ")[0]: 185 return True 186 if ext == "svs": 187 return True # Why is this required? 188 # If we get to this point then file isn't valid CanSAS 189 logger.warning("File doesn't meet CanSAS schema. Trying to load anyway.") 190 raise FileContentsException("The file is not valid CanSAS") 189 191 190 192 def _parse_entry(self, dom, recurse=False): 191 """192 Parse a SASEntry - new recursive method for parsing the dom of193 the CanSAS data format. This will allow multiple data files194 and extra nodes to be read in simultaneously.195 196 :param dom: dom object with a namespace base of names197 """198 199 193 if not self._is_call_local() and not recurse: 200 194 self.reset_state() 201 self.add_data_set() 195 if not recurse: 196 self.current_datainfo = DataInfo() 197 # Raises FileContentsException if file doesn't meet CanSAS schema 198 self.invalid = False 199 # Look for a SASentry 200 self.data = [] 201 self.parent_class = "SASentry" 202 202 self.names.append("SASentry") 203 self.parent_class = "SASentry" 204 self._check_for_empty_data() 205 self.base_ns = "{0}{1}{2}".format("{", \ 206 CANSAS_NS.get(self.cansas_version).get("ns"), "}") 207 208 # Go through each child in the parent element 203 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 204 self.current_datainfo.meta_data[ 205 PREPROCESS] = self.processing_instructions 206 if self._is_call_local() and not recurse: 207 basename, _ = os.path.splitext(os.path.basename(self.f_open.name)) 208 self.current_datainfo.filename = basename + self.extension 209 # Create an empty dataset if no data has been passed to the reader 210 if self.current_dataset is None: 211 self._initialize_new_data_set(dom) 212 self.base_ns = "{" + CANSAS_NS.get(self.cansas_version).get("ns") + "}" 213 214 # Loop through each child in the parent element 209 215 for node in dom: 210 216 attr = node.attrib … … 215 221 tagname_original = tagname 216 222 # Skip this iteration when loading in save state information 217 if tagname == "fitting_plug_in" or tagname == "pr_inversion" or tagname == "invariant":223 if tagname in ["fitting_plug_in", "pr_inversion", "invariant", "corfunc"]: 218 224 continue 219 220 225 # Get where to store content 221 226 self.names.append(tagname_original) … … 233 238 else: 234 239 self.current_dataset.shape = () 235 # Recurs ion stepto access data within the group236 self._parse_entry(node, True)240 # Recurse to access data within the group 241 self._parse_entry(node, recurse=True) 237 242 if tagname == "SASsample": 238 243 self.current_datainfo.sample.name = name … … 244 249 self.aperture.name = name 245 250 self.aperture.type = type 246 self. add_intermediate()251 self._add_intermediate() 247 252 else: 253 # TODO: Clean this up to make it faster (fewer if/elifs) 248 254 if isinstance(self.current_dataset, plottable_2D): 249 255 data_point = node.text … … 261 267 self.current_datainfo.notes.append(data_point) 262 268 263 # I and Q - 1D data269 # I and Q points 264 270 elif tagname == 'I' and isinstance(self.current_dataset, plottable_1D): 265 unit_list = unit.split("|") 266 if len(unit_list) > 1: 267 self.current_dataset.yaxis(unit_list[0].strip(), 268 unit_list[1].strip()) 269 else: 270 self.current_dataset.yaxis("Intensity", unit) 271 self.current_dataset.yaxis("Intensity", unit) 271 272 self.current_dataset.y = np.append(self.current_dataset.y, data_point) 272 273 elif tagname == 'Idev' and isinstance(self.current_dataset, plottable_1D): 273 274 self.current_dataset.dy = np.append(self.current_dataset.dy, data_point) 274 275 elif tagname == 'Q': 275 unit_list = unit.split("|") 276 if len(unit_list) > 1: 277 self.current_dataset.xaxis(unit_list[0].strip(), 278 unit_list[1].strip()) 279 else: 280 self.current_dataset.xaxis("Q", unit) 276 self.current_dataset.xaxis("Q", unit) 281 277 self.current_dataset.x = np.append(self.current_dataset.x, data_point) 282 278 elif tagname == 'Qdev': 283 279 self.current_dataset.dx = np.append(self.current_dataset.dx, data_point) 284 280 elif tagname == 'dQw': 285 281 self.current_dataset.dxw = np.append(self.current_dataset.dxw, data_point) 286 282 elif tagname == 'dQl': 287 283 self.current_dataset.dxl = np.append(self.current_dataset.dxl, data_point) … … 292 288 elif tagname == 'Sesans': 293 289 self.current_datainfo.isSesans = bool(data_point) 290 self.current_dataset.xaxis(attr.get('x_axis'), 291 attr.get('x_unit')) 292 self.current_dataset.yaxis(attr.get('y_axis'), 293 attr.get('y_unit')) 294 294 elif tagname == 'yacceptance': 295 295 self.current_datainfo.sample.yacceptance = (data_point, unit) … … 356 356 elif tagname == 'name' and self.parent_class == 'SASinstrument': 357 357 self.current_datainfo.instrument = data_point 358 358 359 # Detector Information 359 360 elif tagname == 'name' and self.parent_class == 'SASdetector': … … 401 402 self.detector.orientation.z = data_point 402 403 self.detector.orientation_unit = unit 404 403 405 # Collimation and Aperture 404 406 elif tagname == 'length' and self.parent_class == 'SAScollimation': … … 434 436 elif tagname == 'term' and self.parent_class == 'SASprocess': 435 437 unit = attr.get("unit", "") 436 dic = {} 437 dic["name"] = name 438 dic["value"] = data_point 439 dic["unit"] = unit 438 dic = { "name": name, "value": data_point, "unit": unit } 440 439 self.process.term.append(dic) 441 440 … … 490 489 if not self._is_call_local() and not recurse: 491 490 self.frm = "" 492 self.add_data_set() 493 empty = None 494 return self.output[0], empty 495 491 self.current_datainfo.errors = set() 492 for error in self.errors: 493 self.current_datainfo.errors.add(error) 494 self.data_cleanup() 495 self.sort_one_d_data() 496 self.sort_two_d_data() 497 self.reset_data_list() 498 return self.output[0], None 496 499 497 500 def _is_call_local(self): 498 """499 500 """501 501 if self.frm == "": 502 502 inter = inspect.stack() … … 510 510 return True 511 511 512 def is_cansas(self, ext="xml"): 513 """ 514 Checks to see if the xml file is a CanSAS file 515 516 :param ext: The file extension of the data file 517 """ 518 if self.validate_xml(): 519 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 520 value = self.xmlroot.get(name) 521 if CANSAS_NS.get(self.cansas_version).get("ns") == \ 522 value.rsplit(" ")[0]: 523 return True 524 if ext == "svs": 525 return True 526 raise RuntimeError 527 528 def load_file_and_schema(self, xml_file, schema_path=""): 529 """ 530 Loads the file and associates a schema, if a schema is passed in or if one already exists 531 532 :param xml_file: The xml file path sent to Reader.read 533 :param schema_path: The path to a schema associated with the xml_file, or find one based on the file 534 """ 535 base_name = xml_reader.__file__ 536 base_name = base_name.replace("\\", "/") 537 base = base_name.split("/sas/")[0] 538 539 # Load in xml file and get the cansas version from the header 540 self.set_xml_file(xml_file) 541 self.cansas_version = self.xmlroot.get("version", "1.0") 542 543 # Generic values for the cansas file based on the version 544 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 545 if schema_path == "": 546 schema_path = "{0}/sas/sascalc/dataloader/readers/schema/{1}".format \ 547 (base, self.cansas_defaults.get("schema")).replace("\\", "/") 548 549 # Link a schema to the XML file. 550 self.set_schema(schema_path) 551 552 def add_data_set(self): 553 """ 554 Adds the current_dataset to the list of outputs after preforming final processing on the data and then calls a 555 private method to generate a new data set. 556 557 :param key: NeXus group name for current tree level 558 """ 559 560 if self.current_datainfo and self.current_dataset: 561 self._final_cleanup() 562 self.data = [] 563 self.current_datainfo = DataInfo() 564 565 def _initialize_new_data_set(self, node=None): 566 """ 567 A private class method to generate a new 1D data object. 568 Outside methods should call add_data_set() to be sure any existing data is stored properly. 569 570 :param node: XML node to determine if 1D or 2D data 571 """ 572 x = np.array(0) 573 y = np.array(0) 574 for child in node: 575 if child.tag.replace(self.base_ns, "") == "Idata": 576 for i_child in child: 577 if i_child.tag.replace(self.base_ns, "") == "Qx": 578 self.current_dataset = plottable_2D() 579 return 580 self.current_dataset = plottable_1D(x, y) 581 582 def add_intermediate(self): 512 def _add_intermediate(self): 583 513 """ 584 514 This method stores any intermediate objects within the final data set after fully reading the set. 585 586 :param parent: The NXclass name for the h5py Group object that just finished being processed 587 """ 588 515 """ 589 516 if self.parent_class == 'SASprocess': 590 517 self.current_datainfo.process.append(self.process) … … 603 530 self.aperture = Aperture() 604 531 elif self.parent_class == 'SASdata': 605 self._check_for_empty_resolution()606 532 self.data.append(self.current_dataset) 607 608 def _final_cleanup(self):609 """610 Final cleanup of the Data1D object to be sure it has all the611 appropriate information needed for perspectives612 """613 614 # Append errors to dataset and reset class errors615 self.current_datainfo.errors = set()616 for error in self.errors:617 self.current_datainfo.errors.add(error)618 self.errors.clear()619 620 # Combine all plottables with datainfo and append each to output621 # Type cast data arrays to float64 and find min/max as appropriate622 for dataset in self.data:623 if isinstance(dataset, plottable_1D):624 if dataset.x is not None:625 dataset.x = np.delete(dataset.x, [0])626 dataset.x = dataset.x.astype(np.float64)627 dataset.xmin = np.min(dataset.x)628 dataset.xmax = np.max(dataset.x)629 if dataset.y is not None:630 dataset.y = np.delete(dataset.y, [0])631 dataset.y = dataset.y.astype(np.float64)632 dataset.ymin = np.min(dataset.y)633 dataset.ymax = np.max(dataset.y)634 if dataset.dx is not None:635 dataset.dx = np.delete(dataset.dx, [0])636 dataset.dx = dataset.dx.astype(np.float64)637 if dataset.dxl is not None:638 dataset.dxl = np.delete(dataset.dxl, [0])639 dataset.dxl = dataset.dxl.astype(np.float64)640 if dataset.dxw is not None:641 dataset.dxw = np.delete(dataset.dxw, [0])642 dataset.dxw = dataset.dxw.astype(np.float64)643 if dataset.dy is not None:644 dataset.dy = np.delete(dataset.dy, [0])645 dataset.dy = dataset.dy.astype(np.float64)646 np.trim_zeros(dataset.x)647 np.trim_zeros(dataset.y)648 np.trim_zeros(dataset.dy)649 elif isinstance(dataset, plottable_2D):650 dataset.data = dataset.data.astype(np.float64)651 dataset.qx_data = dataset.qx_data.astype(np.float64)652 dataset.xmin = np.min(dataset.qx_data)653 dataset.xmax = np.max(dataset.qx_data)654 dataset.qy_data = dataset.qy_data.astype(np.float64)655 dataset.ymin = np.min(dataset.qy_data)656 dataset.ymax = np.max(dataset.qy_data)657 dataset.q_data = np.sqrt(dataset.qx_data * dataset.qx_data658 + dataset.qy_data * dataset.qy_data)659 if dataset.err_data is not None:660 dataset.err_data = dataset.err_data.astype(np.float64)661 if dataset.dqx_data is not None:662 dataset.dqx_data = dataset.dqx_data.astype(np.float64)663 if dataset.dqy_data is not None:664 dataset.dqy_data = dataset.dqy_data.astype(np.float64)665 if dataset.mask is not None:666 dataset.mask = dataset.mask.astype(dtype=bool)667 668 if len(dataset.shape) == 2:669 n_rows, n_cols = dataset.shape670 dataset.y_bins = dataset.qy_data[0::int(n_cols)]671 dataset.x_bins = dataset.qx_data[:int(n_cols)]672 dataset.data = dataset.data.flatten()673 else:674 dataset.y_bins = []675 dataset.x_bins = []676 dataset.data = dataset.data.flatten()677 678 final_dataset = combine_data(dataset, self.current_datainfo)679 self.output.append(final_dataset)680 681 def _create_unique_key(self, dictionary, name, numb=0):682 """683 Create a unique key value for any dictionary to prevent overwriting684 Recurse until a unique key value is found.685 686 :param dictionary: A dictionary with any number of entries687 :param name: The index of the item to be added to dictionary688 :param numb: The number to be appended to the name, starts at 0689 """690 if dictionary.get(name) is not None:691 numb += 1692 name = name.split("_")[0]693 name += "_{0}".format(numb)694 name = self._create_unique_key(dictionary, name, numb)695 return name696 533 697 534 def _get_node_value(self, node, tagname): … … 748 585 if 'unit' in attr and attr.get('unit') is not None: 749 586 try: 750 local_unit = attr['unit'] 587 unit = attr['unit'] 588 # Split the units to retain backwards compatibility with 589 # projects, analyses, and saved data from v4.1.0 590 unit_list = unit.split("|") 591 if len(unit_list) > 1: 592 local_unit = unit_list[1] 593 else: 594 local_unit = unit 751 595 unitname = self.ns_list.current_level.get("unit", "") 752 596 if "SASdetector" in self.names: … … 771 615 else: 772 616 save_in = "current_datainfo" 773 exec "default_unit = self.{0}.{1}".format(save_in, unitname) 774 if local_unit and default_unit and local_unit.lower() != default_unit.lower() \ 775 and local_unit.lower() != "none": 776 if HAS_CONVERTER == True: 777 # Check local units - bad units raise KeyError 778 data_conv_q = Converter(local_unit) 779 value_unit = default_unit 780 node_value = data_conv_q(node_value, units=default_unit) 781 else: 782 value_unit = local_unit 783 err_msg = "Unit converter is not available.\n" 617 default_unit = getattrchain(self, '.'.join((save_in, unitname))) 618 if (local_unit and default_unit 619 and local_unit.lower() != default_unit.lower() 620 and local_unit.lower() != "none"): 621 # Check local units - bad units raise KeyError 622 #print("loading", tagname, node_value, local_unit, default_unit) 623 data_conv_q = Converter(local_unit) 624 value_unit = default_unit 625 node_value = data_conv_q(node_value, units=default_unit) 784 626 else: 785 627 value_unit = local_unit 786 628 except KeyError: 787 err_msg = "CanSAS reader: unexpected " 788 err_msg += "\"{0}\" unit [{1}]; " 789 err_msg = err_msg.format(tagname, local_unit) 790 err_msg += "expecting [{0}]".format(default_unit) 629 # Do not throw an error for loading Sesans data in cansas xml 630 # This is a temporary fix. 631 if local_unit != "A" and local_unit != 'pol': 632 err_msg = "CanSAS reader: unexpected " 633 err_msg += "\"{0}\" unit [{1}]; " 634 err_msg = err_msg.format(tagname, local_unit) 635 err_msg += "expecting [{0}]".format(default_unit) 791 636 value_unit = local_unit 792 except :637 except Exception: 793 638 err_msg = "CanSAS reader: unknown error converting " 794 639 err_msg += "\"{0}\" unit [{1}]" … … 801 646 return node_value, value_unit 802 647 803 def _check_for_empty_data(self): 804 """ 805 Creates an empty data set if no data is passed to the reader 806 807 :param data1d: presumably a Data1D object 808 """ 809 if self.current_dataset is None: 810 x_vals = np.empty(0) 811 y_vals = np.empty(0) 812 dx_vals = np.empty(0) 813 dy_vals = np.empty(0) 814 dxl = np.empty(0) 815 dxw = np.empty(0) 816 self.current_dataset = plottable_1D(x_vals, y_vals, dx_vals, dy_vals) 817 self.current_dataset.dxl = dxl 818 self.current_dataset.dxw = dxw 819 820 def _check_for_empty_resolution(self): 821 """ 822 A method to check all resolution data sets are the same size as I and Q 823 """ 824 if isinstance(self.current_dataset, plottable_1D): 825 dql_exists = False 826 dqw_exists = False 827 dq_exists = False 828 di_exists = False 829 if self.current_dataset.dxl is not None: 830 dql_exists = True 831 if self.current_dataset.dxw is not None: 832 dqw_exists = True 833 if self.current_dataset.dx is not None: 834 dq_exists = True 835 if self.current_dataset.dy is not None: 836 di_exists = True 837 if dqw_exists and not dql_exists: 838 array_size = self.current_dataset.dxw.size - 1 839 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 840 np.zeros([array_size])) 841 elif dql_exists and not dqw_exists: 842 array_size = self.current_dataset.dxl.size - 1 843 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 844 np.zeros([array_size])) 845 elif not dql_exists and not dqw_exists and not dq_exists: 846 array_size = self.current_dataset.x.size - 1 847 self.current_dataset.dx = np.append(self.current_dataset.dx, 848 np.zeros([array_size])) 849 if not di_exists: 850 array_size = self.current_dataset.y.size - 1 851 self.current_dataset.dy = np.append(self.current_dataset.dy, 852 np.zeros([array_size])) 853 elif isinstance(self.current_dataset, plottable_2D): 854 dqx_exists = False 855 dqy_exists = False 856 di_exists = False 857 mask_exists = False 858 if self.current_dataset.dqx_data is not None: 859 dqx_exists = True 860 if self.current_dataset.dqy_data is not None: 861 dqy_exists = True 862 if self.current_dataset.err_data is not None: 863 di_exists = True 864 if self.current_dataset.mask is not None: 865 mask_exists = True 866 if not dqy_exists: 867 array_size = self.current_dataset.qy_data.size - 1 868 self.current_dataset.dqy_data = np.append( 869 self.current_dataset.dqy_data, np.zeros([array_size])) 870 if not dqx_exists: 871 array_size = self.current_dataset.qx_data.size - 1 872 self.current_dataset.dqx_data = np.append( 873 self.current_dataset.dqx_data, np.zeros([array_size])) 874 if not di_exists: 875 array_size = self.current_dataset.data.size - 1 876 self.current_dataset.err_data = np.append( 877 self.current_dataset.err_data, np.zeros([array_size])) 878 if not mask_exists: 879 array_size = self.current_dataset.data.size - 1 880 self.current_dataset.mask = np.append( 881 self.current_dataset.mask, 882 np.ones([array_size] ,dtype=bool)) 883 884 ####### All methods below are for writing CanSAS XML files ####### 885 648 def _initialize_new_data_set(self, node=None): 649 if node is not None: 650 for child in node: 651 if child.tag.replace(self.base_ns, "") == "Idata": 652 for i_child in child: 653 if i_child.tag.replace(self.base_ns, "") == "Qx": 654 self.current_dataset = plottable_2D() 655 return 656 self.current_dataset = plottable_1D(np.array(0), np.array(0)) 657 658 ## Writing Methods 886 659 def write(self, filename, datainfo): 887 660 """ … … 894 667 doc, _ = self._to_xml_doc(datainfo) 895 668 # Write the file 896 file_ref = open(filename, 'w ')669 file_ref = open(filename, 'wb') 897 670 if self.encoding is None: 898 671 self.encoding = "UTF-8" … … 1039 812 node.append(point) 1040 813 self.write_node(point, "Q", datainfo.x[i], 1041 {'unit': datainfo. _xaxis + " | " + datainfo._xunit})814 {'unit': datainfo.x_unit}) 1042 815 if len(datainfo.y) >= i: 1043 816 self.write_node(point, "I", datainfo.y[i], 1044 {'unit': datainfo. _yaxis + " | " + datainfo._yunit})817 {'unit': datainfo.y_unit}) 1045 818 if datainfo.dy is not None and len(datainfo.dy) > i: 1046 819 self.write_node(point, "Idev", datainfo.dy[i], 1047 {'unit': datainfo. _yaxis + " | " + datainfo._yunit})820 {'unit': datainfo.y_unit}) 1048 821 if datainfo.dx is not None and len(datainfo.dx) > i: 1049 822 self.write_node(point, "Qdev", datainfo.dx[i], 1050 {'unit': datainfo. _xaxis + " | " + datainfo._xunit})823 {'unit': datainfo.x_unit}) 1051 824 if datainfo.dxw is not None and len(datainfo.dxw) > i: 1052 825 self.write_node(point, "dQw", datainfo.dxw[i], 1053 {'unit': datainfo. _xaxis + " | " + datainfo._xunit})826 {'unit': datainfo.x_unit}) 1054 827 if datainfo.dxl is not None and len(datainfo.dxl) > i: 1055 828 self.write_node(point, "dQl", datainfo.dxl[i], 1056 {'unit': datainfo. _xaxis + " | " + datainfo._xunit})829 {'unit': datainfo.x_unit}) 1057 830 if datainfo.isSesans: 1058 sesans = self.create_element("Sesans") 831 sesans_attrib = {'x_axis': datainfo._xaxis, 832 'y_axis': datainfo._yaxis, 833 'x_unit': datainfo.x_unit, 834 'y_unit': datainfo.y_unit} 835 sesans = self.create_element("Sesans", attrib=sesans_attrib) 1059 836 sesans.text = str(datainfo.isSesans) 1060 node.append(sesans)1061 self.write_node( node, "yacceptance", datainfo.sample.yacceptance[0],837 entry_node.append(sesans) 838 self.write_node(entry_node, "yacceptance", datainfo.sample.yacceptance[0], 1062 839 {'unit': datainfo.sample.yacceptance[1]}) 1063 self.write_node( node, "zacceptance", datainfo.sample.zacceptance[0],840 self.write_node(entry_node, "zacceptance", datainfo.sample.zacceptance[0], 1064 841 {'unit': datainfo.sample.zacceptance[1]}) 1065 842 … … 1081 858 point = self.create_element("Idata") 1082 859 node.append(point) 1083 qx = ','.join( [str(datainfo.qx_data[i]) for i in xrange(len(datainfo.qx_data))])1084 qy = ','.join( [str(datainfo.qy_data[i]) for i in xrange(len(datainfo.qy_data))])1085 intensity = ','.join( [str(datainfo.data[i]) for i in xrange(len(datainfo.data))])860 qx = ','.join(str(v) for v in datainfo.qx_data) 861 qy = ','.join(str(v) for v in datainfo.qy_data) 862 intensity = ','.join(str(v) for v in datainfo.data) 1086 863 1087 864 self.write_node(point, "Qx", qx, … … 1092 869 {'unit': datainfo._zunit}) 1093 870 if datainfo.err_data is not None: 1094 err = ','.join([str(datainfo.err_data[i]) for i in 1095 xrange(len(datainfo.err_data))]) 871 err = ','.join(str(v) for v in datainfo.err_data) 1096 872 self.write_node(point, "Idev", err, 1097 873 {'unit': datainfo._zunit}) 1098 874 if datainfo.dqy_data is not None: 1099 dqy = ','.join([str(datainfo.dqy_data[i]) for i in 1100 xrange(len(datainfo.dqy_data))]) 875 dqy = ','.join(str(v) for v in datainfo.dqy_data) 1101 876 self.write_node(point, "Qydev", dqy, 1102 877 {'unit': datainfo._yunit}) 1103 878 if datainfo.dqx_data is not None: 1104 dqx = ','.join([str(datainfo.dqx_data[i]) for i in 1105 xrange(len(datainfo.dqx_data))]) 879 dqx = ','.join(str(v) for v in datainfo.dqx_data) 1106 880 self.write_node(point, "Qxdev", dqx, 1107 881 {'unit': datainfo._xunit}) 1108 882 if datainfo.mask is not None: 1109 mask = ','.join( 1110 ["1" if datainfo.mask[i] else "0" 1111 for i in xrange(len(datainfo.mask))]) 883 mask = ','.join("1" if v else "0" for v in datainfo.mask) 1112 884 self.write_node(point, "Mask", mask) 1113 885 … … 1170 942 pos, "z", datainfo.sample.position.z, 1171 943 {"unit": datainfo.sample.position_unit}) 1172 if written == True:944 if written: 1173 945 self.append(pos, sample) 1174 946 … … 1183 955 ori, "yaw", datainfo.sample.orientation.z, 1184 956 {"unit": datainfo.sample.orientation_unit}) 1185 if written == True:957 if written: 1186 958 self.append(ori, sample) 1187 959 … … 1230 1002 size, "z", datainfo.source.beam_size.z, 1231 1003 {"unit": datainfo.source.beam_size_unit}) 1232 if written == True:1004 if written: 1233 1005 self.append(size, source) 1234 1006 … … 1286 1058 size, "z", aperture.size.z, 1287 1059 {"unit": aperture.size_unit}) 1288 if written == True:1060 if written: 1289 1061 self.append(size, apert) 1290 1062 … … 1309 1081 written = written | self.write_node(det, "SDD", item.distance, 1310 1082 {"unit": item.distance_unit}) 1311 if written == True:1083 if written: 1312 1084 self.append(det, instr) 1313 1085 … … 1319 1091 written = written | self.write_node(off, "z", item.offset.z, 1320 1092 {"unit": item.offset_unit}) 1321 if written == True:1093 if written: 1322 1094 self.append(off, det) 1323 1095 … … 1331 1103 item.orientation.z, 1332 1104 {"unit": item.orientation_unit}) 1333 if written == True:1105 if written: 1334 1106 self.append(ori, det) 1335 1107 … … 1343 1115 item.beam_center.z, 1344 1116 {"unit": item.beam_center_unit}) 1345 if written == True:1117 if written: 1346 1118 self.append(center, det) 1347 1119 … … 1353 1125 written = written | self.write_node(pix, "z", item.pixel_size.z, 1354 1126 {"unit": item.pixel_size_unit}) 1355 if written == True:1127 if written: 1356 1128 self.append(pix, det) 1357 1129 self.write_node(det, "slit_length", item.slit_length, … … 1453 1225 try: 1454 1226 value = float(entry.text) 1455 except :1227 except ValueError: 1456 1228 value = None 1457 1229 … … 1462 1234 if units is not None: 1463 1235 toks = variable.split('.') 1464 local_unit = None 1465 exec "local_unit = storage.%s_unit" % toks[0] 1236 local_unit = getattr(storage, toks[0]+"_unit") 1466 1237 if local_unit is not None and units.lower() != local_unit.lower(): 1467 if HAS_CONVERTER == True: 1468 try: 1469 conv = Converter(units) 1470 exec "storage.%s = %g" % \ 1471 (variable, conv(value, units=local_unit)) 1472 except: 1473 _, exc_value, _ = sys.exc_info() 1474 err_mess = "CanSAS reader: could not convert" 1475 err_mess += " %s unit [%s]; expecting [%s]\n %s" \ 1476 % (variable, units, local_unit, exc_value) 1477 self.errors.add(err_mess) 1478 if optional: 1479 logger.info(err_mess) 1480 else: 1481 raise ValueError, err_mess 1482 else: 1483 err_mess = "CanSAS reader: unrecognized %s unit [%s];"\ 1484 % (variable, units) 1485 err_mess += " expecting [%s]" % local_unit 1238 try: 1239 conv = Converter(units) 1240 setattrchain(storage, variable, conv(value, units=local_unit)) 1241 except Exception: 1242 _, exc_value, _ = sys.exc_info() 1243 err_mess = "CanSAS reader: could not convert" 1244 err_mess += " %s unit [%s]; expecting [%s]\n %s" \ 1245 % (variable, units, local_unit, exc_value) 1486 1246 self.errors.add(err_mess) 1487 1247 if optional: 1488 1248 logger.info(err_mess) 1489 1249 else: 1490 raise ValueError , err_mess1250 raise ValueError(err_mess) 1491 1251 else: 1492 exec "storage.%s = value" % variable1252 setattrchain(storage, variable, value) 1493 1253 else: 1494 exec "storage.%s = value" % variable1254 setattrchain(storage, variable, value) 1495 1255 1496 1256 # DO NOT REMOVE - used in saving and loading panel states. … … 1512 1272 entry = get_content(location, node) 1513 1273 if entry is not None and entry.text is not None: 1514 exec "storage.%s = entry.text.strip()" % variable 1515 1274 setattrchain(storage, variable, entry.text.strip()) 1516 1275 1517 1276 # DO NOT REMOVE Called by outside packages: … … 1556 1315 return True 1557 1316 return False 1317 1318 def getattrchain(obj, chain, default=None): 1319 """Like getattr, but the attr may contain multiple parts separated by '.'""" 1320 for part in chain.split('.'): 1321 if hasattr(obj, part): 1322 obj = getattr(obj, part, None) 1323 else: 1324 return default 1325 return obj 1326 1327 def setattrchain(obj, chain, value): 1328 """Like setattr, but the attr may contain multiple parts separated by '.'""" 1329 parts = list(chain.split('.')) 1330 for part in parts[-1]: 1331 obj = getattr(obj, part, None) 1332 if obj is None: 1333 raise ValueError("missing parent object "+part) 1334 setattr(obj, value) -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
r2651724 rd7fd7be 9 9 import sys 10 10 11 from sas.sascalc.dataloader.data_info import plottable_1D, plottable_2D,\11 from ..data_info import plottable_1D, plottable_2D,\ 12 12 Data1D, Data2D, DataInfo, Process, Aperture, Collimation, \ 13 13 TransmissionSpectrum, Detector 14 from sas.sascalc.dataloader.data_info import combine_data_info_with_plottable 15 16 17 class Reader(): 14 from ..data_info import combine_data_info_with_plottable 15 from ..loader_exceptions import FileContentsException, DefaultReaderException 16 from ..file_reader_base_class import FileReader, decode 17 18 def h5attr(node, key, default=None): 19 return decode(node.attrs.get(key, default)) 20 21 class Reader(FileReader): 18 22 """ 19 23 A class for reading in CanSAS v2.0 data files. The existing iteration opens … … 40 44 # Raw file contents to be processed 41 45 raw_data = None 42 # Data info currently being read in43 current_datainfo = None44 # SASdata set currently being read in45 current_dataset = None46 46 # List of plottable1D objects that should be linked to the current_datainfo 47 47 data1d = None … … 56 56 # Flag to bypass extension check 57 57 allow_all = True 58 # List of files to return 59 output = None 60 61 def read(self, filename): 58 59 def get_file_contents(self): 62 60 """ 63 61 This is the general read method that all SasView data_loaders must have. … … 67 65 """ 68 66 # Reinitialize when loading a new data file to reset all class variables 69 self.reset_class_variables() 67 self.reset_state() 68 69 filename = self.f_open.name 70 self.f_open.close() # IO handled by h5py 71 70 72 # Check that the file exists 71 73 if os.path.isfile(filename): … … 75 77 if extension in self.ext or self.allow_all: 76 78 # Load the data file 77 self.raw_data = h5py.File(filename, 'r') 78 # Read in all child elements of top level SASroot 79 self.read_children(self.raw_data, []) 80 # Add the last data set to the list of outputs 81 self.add_data_set() 82 # Close the data file 83 self.raw_data.close() 84 # Return data set(s) 85 return self.output 86 87 def reset_class_variables(self): 79 try: 80 self.raw_data = h5py.File(filename, 'r') 81 except Exception as e: 82 if extension not in self.ext: 83 msg = "CanSAS2.0 HDF5 Reader could not load file {}".format(basename + extension) 84 raise DefaultReaderException(msg) 85 raise FileContentsException(e.message) 86 try: 87 # Read in all child elements of top level SASroot 88 self.read_children(self.raw_data, []) 89 # Add the last data set to the list of outputs 90 self.add_data_set() 91 except Exception as exc: 92 raise FileContentsException(exc.message) 93 finally: 94 # Close the data file 95 self.raw_data.close() 96 97 for dataset in self.output: 98 if isinstance(dataset, Data1D): 99 if dataset.x.size < 5: 100 self.output = [] 101 raise FileContentsException("Fewer than 5 data points found.") 102 103 def reset_state(self): 88 104 """ 89 105 Create the reader object and define initial states for class variables 90 106 """ 91 self.current_datainfo = None 92 self.current_dataset = None 107 super(Reader, self).reset_state() 93 108 self.data1d = [] 94 109 self.data2d = [] … … 123 138 # Get all information for the current key 124 139 value = data.get(key) 125 if value.attrs.get(u'canSAS_class') is not None: 126 class_name = value.attrs.get(u'canSAS_class') 127 elif value.attrs.get(u'NX_class') is not None: 128 class_name = value.attrs.get(u'NX_class') 129 else: 130 class_name = key 140 class_name = h5attr(value, u'canSAS_class') 141 if class_name is None: 142 class_name = h5attr(value, u'NX_class') 131 143 if class_name is not None: 132 144 class_prog = re.compile(class_name) … … 135 147 136 148 if isinstance(value, h5py.Group): 149 # Set parent class before recursion 137 150 self.parent_class = class_name 138 151 parent_list.append(key) … … 146 159 # Recursion step to access data within the group 147 160 self.read_children(value, parent_list) 161 # Reset parent class when returning from recursive method 162 self.parent_class = class_name 148 163 self.add_intermediate() 149 164 parent_list.remove(key) … … 155 170 156 171 for data_point in data_set: 172 if isinstance(data_point, np.ndarray): 173 if data_point.dtype.char == 'S': 174 data_point = decode(bytes(data_point)) 175 else: 176 data_point = decode(data_point) 157 177 # Top Level Meta Data 158 178 if key == u'definition': … … 162 182 self.current_datainfo.run.append(data_point) 163 183 try: 164 run_name = value.attrs['name']184 run_name = h5attr(value, 'name') 165 185 run_dict = {data_point: run_name} 166 186 self.current_datainfo.run_name = run_dict 167 except :187 except Exception: 168 188 pass 169 189 # Title … … 458 478 Data1D and Data2D objects 459 479 """ 460 461 480 # Type cast data arrays to float64 462 481 if len(self.current_datainfo.trans_spectrum) > 0: … … 482 501 # Type cast data arrays to float64 and find min/max as appropriate 483 502 for dataset in self.data2d: 484 dataset.data = dataset.data.astype(np.float64)485 dataset.err_data = dataset.err_data.astype(np.float64)486 if dataset.qx_data is not None:487 dataset.xmin = np.min(dataset.qx_data)488 dataset.xmax = np.max(dataset.qx_data)489 dataset.qx_data = dataset.qx_data.astype(np.float64)490 if dataset.dqx_data is not None:491 dataset.dqx_data = dataset.dqx_data.astype(np.float64)492 if dataset.qy_data is not None:493 dataset.ymin = np.min(dataset.qy_data)494 dataset.ymax = np.max(dataset.qy_data)495 dataset.qy_data = dataset.qy_data.astype(np.float64)496 if dataset.dqy_data is not None:497 dataset.dqy_data = dataset.dqy_data.astype(np.float64)498 if dataset.q_data is not None:499 dataset.q_data = dataset.q_data.astype(np.float64)500 503 zeros = np.ones(dataset.data.size, dtype=bool) 501 504 try: … … 520 523 dataset.x_bins = dataset.qx_data[:n_cols] 521 524 dataset.data = dataset.data.flatten() 522 523 final_dataset = combine_data_info_with_plottable( 524 dataset, self.current_datainfo) 525 self.output.append(final_dataset) 525 self.current_dataset = dataset 526 self.send_to_output() 526 527 527 528 for dataset in self.data1d: 528 if dataset.x is not None: 529 dataset.x = dataset.x.astype(np.float64) 530 dataset.xmin = np.min(dataset.x) 531 dataset.xmax = np.max(dataset.x) 532 if dataset.y is not None: 533 dataset.y = dataset.y.astype(np.float64) 534 dataset.ymin = np.min(dataset.y) 535 dataset.ymax = np.max(dataset.y) 536 if dataset.dx is not None: 537 dataset.dx = dataset.dx.astype(np.float64) 538 if dataset.dxl is not None: 539 dataset.dxl = dataset.dxl.astype(np.float64) 540 if dataset.dxw is not None: 541 dataset.dxw = dataset.dxw.astype(np.float64) 542 if dataset.dy is not None: 543 dataset.dy = dataset.dy.astype(np.float64) 544 final_dataset = combine_data_info_with_plottable( 545 dataset, self.current_datainfo) 546 self.output.append(final_dataset) 529 self.current_dataset = dataset 530 self.send_to_output() 547 531 548 532 def add_data_set(self, key=""): … … 651 635 :return: unit for the value passed to the method 652 636 """ 653 unit = value.attrs.get(u'units')637 unit = h5attr(value, u'units') 654 638 if unit is None: 655 unit = value.attrs.get(u'unit')639 unit = h5attr(value, u'unit') 656 640 # Convert the unit formats 657 641 if unit == "1/A": -
src/sas/sascalc/dataloader/readers/danse_reader.py
r235f514 r2469df7 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #If you use DANSE applications to do scientific research that leads to 9 9 #publication, we ask that you acknowledge the use of the software with the … … 14 14 import math 15 15 import os 16 import sys 16 import logging 17 17 18 import numpy as np 18 import logging 19 from sas.sascalc.dataloader.data_info import Data2D, Detector 20 from sas.sascalc.dataloader.manipulations import reader2D_converter 19 20 from ..data_info import plottable_2D, DataInfo, Detector 21 from ..manipulations import reader2D_converter 22 from ..file_reader_base_class import FileReader 23 from ..loader_exceptions import FileContentsException, DataReaderException 21 24 22 25 logger = logging.getLogger(__name__) … … 30 33 31 34 32 class Reader :35 class Reader(FileReader): 33 36 """ 34 37 Example data manipulation … … 40 43 ## Extension 41 44 ext = ['.sans', '.SANS'] 42 43 def read(self, filename=None): 44 """ 45 Open and read the data in a file 46 @param file: path of the file 47 """ 48 49 read_it = False 50 for item in self.ext: 51 if filename.lower().find(item) >= 0: 52 read_it = True 53 54 if read_it: 45 46 def get_file_contents(self): 47 self.current_datainfo = DataInfo() 48 self.current_dataset = plottable_2D() 49 self.output = [] 50 51 loaded_correctly = True 52 error_message = "" 53 54 # defaults 55 # wavelength in Angstrom 56 wavelength = 10.0 57 # Distance in meter 58 distance = 11.0 59 # Pixel number of center in x 60 center_x = 65 61 # Pixel number of center in y 62 center_y = 65 63 # Pixel size [mm] 64 pixel = 5.0 65 # Size in x, in pixels 66 size_x = 128 67 # Size in y, in pixels 68 size_y = 128 69 # Format version 70 fversion = 1.0 71 72 self.current_datainfo.filename = os.path.basename(self.f_open.name) 73 detector = Detector() 74 self.current_datainfo.detector.append(detector) 75 76 self.current_dataset.data = np.zeros([size_x, size_y]) 77 self.current_dataset.err_data = np.zeros([size_x, size_y]) 78 79 read_on = True 80 data_start_line = 1 81 while read_on: 82 line = self.nextline() 83 data_start_line += 1 84 if line.find("DATA:") >= 0: 85 read_on = False 86 break 87 toks = line.split(':') 55 88 try: 56 datafile = open(filename, 'r')57 except:58 raise RuntimeError,"danse_reader cannot open %s" % (filename)59 60 # defaults61 # wavelength in Angstrom62 wavelength = 10.063 # Distance in meter64 distance = 11.065 # Pixel number of center in x66 center_x = 6567 # Pixel number of center in y68 center_y = 6569 # Pixel size [mm]70 pixel = 5.071 # Size in x, in pixels72 size_x = 12873 # Size in y, in pixels74 size_y = 12875 # Format version76 fversion = 1.077 78 output = Data2D()79 output.filename = os.path.basename(filename)80 detector = Detector()81 output.detector.append(detector)82 83 output.data = np.zeros([size_x,size_y])84 output.err_data = np.zeros([size_x, size_y])85 86 data_conv_q = None87 data_conv_i = None88 89 if has_converter == True and output.Q_unit != '1/A':90 data_conv_q = Converter('1/A')91 # Test it92 data_conv_q(1.0, output.Q_unit)93 94 if has_converter == True and output.I_unit != '1/cm':95 data_conv_i = Converter('1/cm')96 # Test it97 data_conv_i(1.0, output.I_unit)98 99 read_on = True100 while read_on:101 line = datafile.readline()102 if line.find("DATA:") >= 0:103 read_on = False104 break105 toks = line.split(':')106 89 if toks[0] == "FORMATVERSION": 107 90 fversion = float(toks[1]) 108 if toks[0] == "WAVELENGTH":91 elif toks[0] == "WAVELENGTH": 109 92 wavelength = float(toks[1]) 110 93 elif toks[0] == "DISTANCE": … … 120 103 elif toks[0] == "SIZE_Y": 121 104 size_y = int(toks[1]) 122 123 # Read the data 124 data = [] 125 error = [] 126 if fversion == 1.0: 127 data_str = datafile.readline() 128 data = data_str.split(' ') 129 else: 130 read_on = True 131 while read_on: 132 data_str = datafile.readline() 133 if len(data_str) == 0: 134 read_on = False 135 else: 136 toks = data_str.split() 137 try: 138 val = float(toks[0]) 139 err = float(toks[1]) 140 if data_conv_i is not None: 141 val = data_conv_i(val, units=output._yunit) 142 err = data_conv_i(err, units=output._yunit) 143 data.append(val) 144 error.append(err) 145 except: 146 logger.info("Skipping line:%s,%s" %(data_str, 147 sys.exc_value)) 148 149 # Initialize 150 x_vals = [] 151 y_vals = [] 152 ymin = None 153 ymax = None 154 xmin = None 155 xmax = None 156 157 # Qx and Qy vectors 158 theta = pixel / distance / 100.0 159 stepq = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 160 for i_x in range(size_x): 161 theta = (i_x - center_x + 1) * pixel / distance / 100.0 162 qx = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 163 164 if has_converter == True and output.Q_unit != '1/A': 165 qx = data_conv_q(qx, units=output.Q_unit) 166 167 x_vals.append(qx) 168 if xmin is None or qx < xmin: 169 xmin = qx 170 if xmax is None or qx > xmax: 171 xmax = qx 172 173 ymin = None 174 ymax = None 175 for i_y in range(size_y): 176 theta = (i_y - center_y + 1) * pixel / distance / 100.0 177 qy = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 178 179 if has_converter == True and output.Q_unit != '1/A': 180 qy = data_conv_q(qy, units=output.Q_unit) 181 182 y_vals.append(qy) 183 if ymin is None or qy < ymin: 184 ymin = qy 185 if ymax is None or qy > ymax: 186 ymax = qy 187 188 # Store the data in the 2D array 189 i_x = 0 190 i_y = -1 191 192 for i_pt in range(len(data)): 193 try: 194 value = float(data[i_pt]) 195 except: 196 # For version 1.0, the data were still 197 # stored as strings at this point. 198 msg = "Skipping entry (v1.0):%s,%s" % (str(data[i_pt]), 199 sys.exc_value) 200 logger.info(msg) 201 202 # Get bin number 203 if math.fmod(i_pt, size_x) == 0: 204 i_x = 0 205 i_y += 1 206 else: 207 i_x += 1 208 209 output.data[i_y][i_x] = value 210 if fversion>1.0: 211 output.err_data[i_y][i_x] = error[i_pt] 212 213 # Store all data 214 # Store wavelength 215 if has_converter == True and output.source.wavelength_unit != 'A': 216 conv = Converter('A') 217 wavelength = conv(wavelength, 218 units=output.source.wavelength_unit) 219 output.source.wavelength = wavelength 220 221 # Store distance 222 if has_converter == True and detector.distance_unit != 'm': 223 conv = Converter('m') 224 distance = conv(distance, units=detector.distance_unit) 225 detector.distance = distance 226 227 # Store pixel size 228 if has_converter == True and detector.pixel_size_unit != 'mm': 229 conv = Converter('mm') 230 pixel = conv(pixel, units=detector.pixel_size_unit) 231 detector.pixel_size.x = pixel 232 detector.pixel_size.y = pixel 233 234 # Store beam center in distance units 235 detector.beam_center.x = center_x * pixel 236 detector.beam_center.y = center_y * pixel 237 238 # Store limits of the image (2D array) 239 xmin = xmin - stepq / 2.0 240 xmax = xmax + stepq / 2.0 241 ymin = ymin - stepq /2.0 242 ymax = ymax + stepq / 2.0 243 244 if has_converter == True and output.Q_unit != '1/A': 245 xmin = data_conv_q(xmin, units=output.Q_unit) 246 xmax = data_conv_q(xmax, units=output.Q_unit) 247 ymin = data_conv_q(ymin, units=output.Q_unit) 248 ymax = data_conv_q(ymax, units=output.Q_unit) 249 output.xmin = xmin 250 output.xmax = xmax 251 output.ymin = ymin 252 output.ymax = ymax 253 254 # Store x and y axis bin centers 255 output.x_bins = x_vals 256 output.y_bins = y_vals 257 258 # Units 259 if data_conv_q is not None: 260 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 261 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 262 else: 263 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 264 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 265 266 if data_conv_i is not None: 267 output.zaxis("\\rm{Intensity}", output.I_unit) 268 else: 269 output.zaxis("\\rm{Intensity}", "cm^{-1}") 270 271 if not fversion >= 1.0: 272 msg = "Danse_reader can't read this file %s" % filename 273 raise ValueError, msg 274 else: 275 logger.info("Danse_reader Reading %s \n" % filename) 276 277 # Store loading process information 278 output.meta_data['loader'] = self.type_name 279 output = reader2D_converter(output) 280 return output 281 282 return None 105 except ValueError as e: 106 error_message += "Unable to parse {}. Default value used.\n".format(toks[0]) 107 loaded_correctly = False 108 109 # Read the data 110 data = [] 111 error = [] 112 if not fversion >= 1.0: 113 msg = "danse_reader can't read this file {}".format(self.f_open.name) 114 raise FileContentsException(msg) 115 116 for line_num, data_str in enumerate(self.nextlines()): 117 toks = data_str.split() 118 try: 119 val = float(toks[0]) 120 err = float(toks[1]) 121 data.append(val) 122 error.append(err) 123 except ValueError as exc: 124 msg = "Unable to parse line {}: {}".format(line_num + data_start_line, data_str.strip()) 125 raise FileContentsException(msg) 126 127 num_pts = size_x * size_y 128 if len(data) < num_pts: 129 msg = "Not enough data points provided. Expected {} but got {}".format( 130 size_x * size_y, len(data)) 131 raise FileContentsException(msg) 132 elif len(data) > num_pts: 133 error_message += ("Too many data points provided. Expected {0} but" 134 " got {1}. Only the first {0} will be used.\n").format(num_pts, len(data)) 135 loaded_correctly = False 136 data = data[:num_pts] 137 error = error[:num_pts] 138 139 # Qx and Qy vectors 140 theta = pixel / distance / 100.0 141 i_x = np.arange(size_x) 142 theta = (i_x - center_x + 1) * pixel / distance / 100.0 143 x_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 144 xmin = x_vals.min() 145 xmax = x_vals.max() 146 147 i_y = np.arange(size_y) 148 theta = (i_y - center_y + 1) * pixel / distance / 100.0 149 y_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 150 ymin = y_vals.min() 151 ymax = y_vals.max() 152 153 self.current_dataset.data = np.array(data, dtype=np.float64).reshape((size_y, size_x)) 154 if fversion > 1.0: 155 self.current_dataset.err_data = np.array(error, dtype=np.float64).reshape((size_y, size_x)) 156 157 # Store all data 158 # Store wavelength 159 if has_converter and self.current_datainfo.source.wavelength_unit != 'A': 160 conv = Converter('A') 161 wavelength = conv(wavelength, 162 units=self.current_datainfo.source.wavelength_unit) 163 self.current_datainfo.source.wavelength = wavelength 164 165 # Store distance 166 if has_converter and detector.distance_unit != 'm': 167 conv = Converter('m') 168 distance = conv(distance, units=detector.distance_unit) 169 detector.distance = distance 170 171 # Store pixel size 172 if has_converter and detector.pixel_size_unit != 'mm': 173 conv = Converter('mm') 174 pixel = conv(pixel, units=detector.pixel_size_unit) 175 detector.pixel_size.x = pixel 176 detector.pixel_size.y = pixel 177 178 # Store beam center in distance units 179 detector.beam_center.x = center_x * pixel 180 detector.beam_center.y = center_y * pixel 181 182 183 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 184 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 185 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 186 187 self.current_dataset.x_bins = x_vals 188 self.current_dataset.y_bins = y_vals 189 190 # Reshape data 191 x_vals = np.tile(x_vals, (size_y, 1)).flatten() 192 y_vals = np.tile(y_vals, (size_x, 1)).T.flatten() 193 if (np.all(self.current_dataset.err_data == None) 194 or np.any(self.current_dataset.err_data <= 0)): 195 new_err_data = np.sqrt(np.abs(self.current_dataset.data)) 196 else: 197 new_err_data = self.current_dataset.err_data.flatten() 198 199 self.current_dataset.err_data = new_err_data 200 self.current_dataset.qx_data = x_vals 201 self.current_dataset.qy_data = y_vals 202 self.current_dataset.q_data = np.sqrt(x_vals**2 + y_vals**2) 203 self.current_dataset.mask = np.ones(len(x_vals), dtype=bool) 204 205 # Store loading process information 206 self.current_datainfo.meta_data['loader'] = self.type_name 207 208 self.send_to_output() 209 210 if not loaded_correctly: 211 raise DataReaderException(error_message) -
src/sas/sascalc/dataloader/readers/red2d_reader.py
ra1b8fee rc8321cfc 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #See the license text in license.txt 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 from __future__ import print_function12 13 11 import os 12 import math 13 import time 14 14 15 import numpy as np 15 import math 16 from sas.sascalc.dataloader.data_info import Data2D, Detector 17 18 # Look for unit converter 19 has_converter = True 20 try: 21 from sas.sascalc.data_util.nxsunit import Converter 22 except: 23 has_converter = False 24 25 16 17 from sas.sascalc.data_util.nxsunit import Converter 18 19 from ..data_info import plottable_2D, DataInfo, Detector 20 from ..file_reader_base_class import FileReader 21 from ..loader_exceptions import FileContentsException 22 23 26 24 def check_point(x_point): 27 25 """ … … 31 29 try: 32 30 return float(x_point) 33 except :31 except Exception: 34 32 return 0 35 36 37 class Reader :33 34 35 class Reader(FileReader): 38 36 """ Simple data reader for Igor data files """ 39 37 ## File type … … 43 41 ## Extension 44 42 ext = ['.DAT', '.dat'] 45 43 46 44 def write(self, filename, data): 47 45 """ 48 46 Write to .dat 49 47 50 48 :param filename: file name to write 51 49 :param data: data2D 52 50 """ 53 import time54 51 # Write the file 55 fd = open(filename, 'w') 56 t = time.localtime() 57 time_str = time.strftime("%H:%M on %b %d %y", t) 58 59 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 60 header_str += " created at %s \n\n" % time_str 61 # simple 2D header 62 fd.write(header_str) 63 # write qx qy I values 64 for i in range(len(data.data)): 65 fd.write("%g %g %g\n" % (data.qx_data[i], 66 data.qy_data[i], 67 data.data[i])) 68 # close 69 fd.close() 70 71 def read(self, filename=None): 72 """ Read file """ 73 if not os.path.isfile(filename): 74 raise ValueError, \ 75 "Specified file %s is not a regular file" % filename 76 52 try: 53 fd = open(filename, 'w') 54 t = time.localtime() 55 time_str = time.strftime("%H:%M on %b %d %y", t) 56 57 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 58 header_str += " created at %s \n\n" % time_str 59 # simple 2D header 60 fd.write(header_str) 61 # write qx qy I values 62 for i in range(len(data.data)): 63 fd.write("%g %g %g\n" % (data.qx_data[i], 64 data.qy_data[i], 65 data.data[i])) 66 finally: 67 fd.close() 68 69 def get_file_contents(self): 77 70 # Read file 78 f = open(filename, 'r') 79 buf = f.read() 80 f.close() 71 buf = self.readall() 72 self.f_open.close() 81 73 # Instantiate data object 82 output = Data2D() 83 output.filename = os.path.basename(filename) 84 detector = Detector() 85 if len(output.detector) > 0: 86 print(str(output.detector[0])) 87 output.detector.append(detector) 88 74 self.current_dataset = plottable_2D() 75 self.current_datainfo = DataInfo() 76 self.current_datainfo.filename = os.path.basename(self.f_open.name) 77 self.current_datainfo.detector.append(Detector()) 78 89 79 # Get content 90 data Started = False91 80 data_started = False 81 92 82 ## Defaults 93 83 lines = buf.split('\n') 94 84 x = [] 95 85 y = [] 96 86 97 87 wavelength = None 98 88 distance = None 99 89 transmission = None 100 90 101 91 pixel_x = None 102 92 pixel_y = None 103 104 isInfo = False 105 isCenter = False 106 107 data_conv_q = None 108 data_conv_i = None 109 110 # Set units: This is the unit assumed for Q and I in the data file. 111 if has_converter == True and output.Q_unit != '1/A': 112 data_conv_q = Converter('1/A') 113 # Test it 114 data_conv_q(1.0, output.Q_unit) 115 116 if has_converter == True and output.I_unit != '1/cm': 117 data_conv_i = Converter('1/cm') 118 # Test it 119 data_conv_i(1.0, output.I_unit) 120 121 93 94 is_info = False 95 is_center = False 96 122 97 # Remove the last lines before the for loop if the lines are empty 123 98 # to calculate the exact number of data points … … 135 110 ## Reading the header applies only to IGOR/NIST 2D q_map data files 136 111 # Find setup info line 137 if is Info:138 is Info = False112 if is_info: 113 is_info = False 139 114 line_toks = line.split() 140 115 # Wavelength in Angstrom 141 116 try: 142 117 wavelength = float(line_toks[1]) 143 # Units 144 if has_converter == True and \ 145 output.source.wavelength_unit != 'A': 118 # Wavelength is stored in angstroms; convert if necessary 119 if self.current_datainfo.source.wavelength_unit != 'A': 146 120 conv = Converter('A') 147 121 wavelength = conv(wavelength, 148 units=output.source.wavelength_unit) 149 except: 150 #Not required 151 pass 152 # Distance in mm 122 units=self.current_datainfo.source.wavelength_unit) 123 except Exception: 124 pass # Not required 153 125 try: 154 126 distance = float(line_toks[3]) 155 # Units156 if has_converter == True and detector.distance_unit != 'm':127 # Distance is stored in meters; convert if necessary 128 if self.current_datainfo.detector[0].distance_unit != 'm': 157 129 conv = Converter('m') 158 distance = conv(distance, units=detector.distance_unit) 159 except: 160 #Not required 161 pass 162 163 # Distance in meters 130 distance = conv(distance, 131 units=self.current_datainfo.detector[0].distance_unit) 132 except Exception: 133 pass # Not required 134 164 135 try: 165 136 transmission = float(line_toks[4]) 166 except: 167 #Not required 168 pass 169 137 except Exception: 138 pass # Not required 139 170 140 if line.count("LAMBDA") > 0: 171 is Info = True172 141 is_info = True 142 173 143 # Find center info line 174 if is Center:175 is Center = False144 if is_center: 145 is_center = False 176 146 line_toks = line.split() 177 147 # Center in bin number … … 180 150 181 151 if line.count("BCENT") > 0: 182 is Center = True152 is_center = True 183 153 # Check version 184 154 if line.count("Data columns") > 0: … … 187 157 # Find data start 188 158 if line.count("ASCII data") > 0: 189 data Started = True159 data_started = True 190 160 continue 191 161 192 162 ## Read and get data. 193 if data Started == True:163 if data_started: 194 164 line_toks = line.split() 195 165 if len(line_toks) == 0: 196 166 #empty line 197 167 continue 198 # the number of columns must be stayed same 168 # the number of columns must be stayed same 199 169 col_num = len(line_toks) 200 170 break 171 201 172 # Make numpy array to remove header lines using index 202 173 lines_array = np.array(lines) … … 204 175 # index for lines_array 205 176 lines_index = np.arange(len(lines)) 206 177 207 178 # get the data lines 208 179 data_lines = lines_array[lines_index >= (line_num - 1)] … … 213 184 # split all data to one big list w/" "separator 214 185 data_list = data_list.split() 215 186 216 187 # Check if the size is consistent with data, otherwise 217 188 #try the tab(\t) separator … … 224 195 # Change it(string) into float 225 196 #data_list = map(float,data_list) 226 data_list1 = map(check_point, data_list)197 data_list1 = list(map(check_point, data_list)) 227 198 228 199 # numpy array form … … 232 203 try: 233 204 data_point = data_array.reshape(row_num, col_num).transpose() 234 except :235 msg = "red2d_reader : Can't read this file: Not a proper file format"236 raise ValueError, msg205 except Exception: 206 msg = "red2d_reader can't read this file: Incorrect number of data points provided." 207 raise FileContentsException(msg) 237 208 ## Get the all data: Let's HARDcoding; Todo find better way 238 209 # Defaults … … 257 228 #if col_num > (6 + ver): mask[data_point[(6 + ver)] < 1] = False 258 229 q_data = np.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 259 260 # Extra protection(it is needed for some data files): 230 231 # Extra protection(it is needed for some data files): 261 232 # If all mask elements are False, put all True 262 233 if not mask.any(): 263 234 mask[mask == False] = True 264 235 265 236 # Store limits of the image in q space 266 237 xmin = np.min(qx_data) … … 269 240 ymax = np.max(qy_data) 270 241 271 # units272 if has_converter == True and output.Q_unit != '1/A':273 xmin = data_conv_q(xmin, units=output.Q_unit)274 xmax = data_conv_q(xmax, units=output.Q_unit)275 ymin = data_conv_q(ymin, units=output.Q_unit)276 ymax = data_conv_q(ymax, units=output.Q_unit)277 278 242 ## calculate the range of the qx and qy_data 279 243 x_size = math.fabs(xmax - xmin) 280 244 y_size = math.fabs(ymax - ymin) 281 245 282 246 # calculate the number of pixels in the each axes 283 247 npix_y = math.floor(math.sqrt(len(data))) 284 248 npix_x = math.floor(len(data) / npix_y) 285 249 286 250 # calculate the size of bins 287 251 xstep = x_size / (npix_x - 1) 288 252 ystep = y_size / (npix_y - 1) 289 253 290 254 # store x and y axis bin centers in q space 291 255 x_bins = np.arange(xmin, xmax + xstep, xstep) 292 256 y_bins = np.arange(ymin, ymax + ystep, ystep) 293 257 294 258 # get the limits of q values 295 259 xmin = xmin - xstep / 2 … … 297 261 ymin = ymin - ystep / 2 298 262 ymax = ymax + ystep / 2 299 263 300 264 #Store data in outputs 301 265 #TODO: Check the lengths 302 output.data = data266 self.current_dataset.data = data 303 267 if (err_data == 1).all(): 304 output.err_data = np.sqrt(np.abs(data))305 output.err_data[output.err_data == 0.0] = 1.0268 self.current_dataset.err_data = np.sqrt(np.abs(data)) 269 self.current_dataset.err_data[self.current_dataset.err_data == 0.0] = 1.0 306 270 else: 307 output.err_data = err_data308 309 output.qx_data = qx_data310 output.qy_data = qy_data311 output.q_data = q_data312 output.mask = mask313 314 output.x_bins = x_bins315 output.y_bins = y_bins316 317 output.xmin = xmin318 output.xmax = xmax319 output.ymin = ymin320 output.ymax = ymax321 322 output.source.wavelength = wavelength323 271 self.current_dataset.err_data = err_data 272 273 self.current_dataset.qx_data = qx_data 274 self.current_dataset.qy_data = qy_data 275 self.current_dataset.q_data = q_data 276 self.current_dataset.mask = mask 277 278 self.current_dataset.x_bins = x_bins 279 self.current_dataset.y_bins = y_bins 280 281 self.current_dataset.xmin = xmin 282 self.current_dataset.xmax = xmax 283 self.current_dataset.ymin = ymin 284 self.current_dataset.ymax = ymax 285 286 self.current_datainfo.source.wavelength = wavelength 287 324 288 # Store pixel size in mm 325 detector.pixel_size.x = pixel_x326 detector.pixel_size.y = pixel_y327 289 self.current_datainfo.detector[0].pixel_size.x = pixel_x 290 self.current_datainfo.detector[0].pixel_size.y = pixel_y 291 328 292 # Store the sample to detector distance 329 detector.distance = distance330 293 self.current_datainfo.detector[0].distance = distance 294 331 295 # optional data: if all of dq data == 0, do not pass to output 332 296 if len(dqx_data) == len(qx_data) and dqx_data.any() != 0: … … 340 304 cos_th = qx_data / diag 341 305 sin_th = qy_data / diag 342 output.dqx_data = np.sqrt((dqx_data * cos_th) * \306 self.current_dataset.dqx_data = np.sqrt((dqx_data * cos_th) * \ 343 307 (dqx_data * cos_th) \ 344 308 + (dqy_data * sin_th) * \ 345 309 (dqy_data * sin_th)) 346 output.dqy_data = np.sqrt((dqx_data * sin_th) * \310 self.current_dataset.dqy_data = np.sqrt((dqx_data * sin_th) * \ 347 311 (dqx_data * sin_th) \ 348 312 + (dqy_data * cos_th) * \ 349 313 (dqy_data * cos_th)) 350 314 else: 351 output.dqx_data = dqx_data352 output.dqy_data = dqy_data315 self.current_dataset.dqx_data = dqx_data 316 self.current_dataset.dqy_data = dqy_data 353 317 354 318 # Units of axes 355 if data_conv_q is not None: 356 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 357 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 358 else: 359 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 360 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 361 if data_conv_i is not None: 362 output.zaxis("\\rm{Intensity}", output.I_unit) 363 else: 364 output.zaxis("\\rm{Intensity}", "cm^{-1}") 365 319 self.current_dataset.xaxis(r"\rm{Q_{x}}", 'A^{-1}') 320 self.current_dataset.yaxis(r"\rm{Q_{y}}", 'A^{-1}') 321 self.current_dataset.zaxis(r"\rm{Intensity}", "cm^{-1}") 322 366 323 # Store loading process information 367 output.meta_data['loader'] = self.type_name368 369 return output324 self.current_datainfo.meta_data['loader'] = self.type_name 325 326 self.send_to_output() -
src/sas/sascalc/dataloader/readers/sesans_reader.py
r149b8f6 r3053a4a 6 6 Jurrian Bakker 7 7 """ 8 import os 9 8 10 import numpy as np 9 import os 10 from sas.sascalc.dataloader.data_info import Data1D 11 12 from ..file_reader_base_class import FileReader 13 from ..data_info import plottable_1D, DataInfo 14 from ..loader_exceptions import FileContentsException 11 15 12 16 # Check whether we have a converter available … … 14 18 try: 15 19 from sas.sascalc.data_util.nxsunit import Converter 16 except :20 except ImportError: 17 21 has_converter = False 18 22 _ZERO = 1e-16 19 23 20 21 class Reader: 24 class Reader(FileReader): 22 25 """ 23 26 Class to load sesans files (6 columns). … … 26 29 type_name = "SESANS" 27 30 28 # Wildcards31 ## Wildcards 29 32 type = ["SESANS files (*.ses)|*.ses", 30 33 "SESANS files (*..sesans)|*.sesans"] … … 35 38 allow_all = True 36 39 37 def read(self, path): 38 """ 39 Load data file 40 def get_file_contents(self): 41 self.current_datainfo = DataInfo() 42 self.current_dataset = plottable_1D(np.array([]), np.array([])) 43 self.current_datainfo.isSesans = True 44 self.output = [] 40 45 41 :param path: file path 46 line = self.nextline() 47 params = {} 48 while line and not line.startswith("BEGIN_DATA"): 49 terms = line.split() 50 if len(terms) >= 2: 51 params[terms[0]] = " ".join(terms[1:]) 52 line = self.nextline() 53 self.params = params 42 54 43 :return: SESANSData1D object, or None 55 if "FileFormatVersion" not in self.params: 56 raise FileContentsException("SES file missing FileFormatVersion") 57 if float(self.params["FileFormatVersion"]) >= 2.0: 58 raise FileContentsException("SASView only supports SES version 1") 44 59 45 :raise RuntimeError: when the file can't be opened 46 :raise ValueError: when the length of the data vectors are inconsistent 47 """ 48 if os.path.isfile(path): 49 basename = os.path.basename(path) 50 _, extension = os.path.splitext(basename) 51 if not (self.allow_all or extension.lower() in self.ext): 52 raise RuntimeError( 53 "{} has an unrecognized file extension".format(path)) 60 if "SpinEchoLength_unit" not in self.params: 61 raise FileContentsException("SpinEchoLength has no units") 62 if "Wavelength_unit" not in self.params: 63 raise FileContentsException("Wavelength has no units") 64 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 65 raise FileContentsException( 66 "The spin echo data has rudely used " 67 "different units for the spin echo length " 68 "and the wavelength. While sasview could " 69 "handle this instance, it is a violation " 70 "of the file format and will not be " 71 "handled by other software.") 72 73 headers = self.nextline().split() 74 75 self._insist_header(headers, "SpinEchoLength") 76 self._insist_header(headers, "Depolarisation") 77 self._insist_header(headers, "Depolarisation_error") 78 self._insist_header(headers, "Wavelength") 79 80 data = np.loadtxt(self.f_open) 81 82 if data.shape[1] != len(headers): 83 raise FileContentsException( 84 "File has {} headers, but {} columns".format( 85 len(headers), 86 data.shape[1])) 87 88 if not data.size: 89 raise FileContentsException("{} is empty".format(self.filepath)) 90 x = data[:, headers.index("SpinEchoLength")] 91 if "SpinEchoLength_error" in headers: 92 dx = data[:, headers.index("SpinEchoLength_error")] 54 93 else: 55 raise RuntimeError("{} is not a file".format(path)) 56 with open(path, 'r') as input_f: 57 line = input_f.readline() 58 params = {} 59 while not line.startswith("BEGIN_DATA"): 60 terms = line.split() 61 if len(terms) >= 2: 62 params[terms[0]] = " ".join(terms[1:]) 63 line = input_f.readline() 64 self.params = params 94 dx = x * 0.05 95 lam = data[:, headers.index("Wavelength")] 96 if "Wavelength_error" in headers: 97 dlam = data[:, headers.index("Wavelength_error")] 98 else: 99 dlam = lam * 0.05 100 y = data[:, headers.index("Depolarisation")] 101 dy = data[:, headers.index("Depolarisation_error")] 65 102 66 if "FileFormatVersion" not in self.params: 67 raise RuntimeError("SES file missing FileFormatVersion") 68 if float(self.params["FileFormatVersion"]) >= 2.0: 69 raise RuntimeError("SASView only supports SES version 1") 103 lam_unit = self._unit_fetch("Wavelength") 104 x, x_unit = self._unit_conversion(x, "A", 105 self._unit_fetch( 106 "SpinEchoLength")) 107 dx, dx_unit = self._unit_conversion( 108 dx, lam_unit, 109 self._unit_fetch("SpinEchoLength")) 110 dlam, dlam_unit = self._unit_conversion( 111 dlam, lam_unit, 112 self._unit_fetch("Wavelength")) 113 y_unit = self._unit_fetch("Depolarisation") 70 114 71 if "SpinEchoLength_unit" not in self.params: 72 raise RuntimeError("SpinEchoLength has no units") 73 if "Wavelength_unit" not in self.params: 74 raise RuntimeError("Wavelength has no units") 75 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 76 raise RuntimeError("The spin echo data has rudely used " 77 "different units for the spin echo length " 78 "and the wavelength. While sasview could " 79 "handle this instance, it is a violation " 80 "of the file format and will not be " 81 "handled by other software.") 115 self.current_dataset.x = x 116 self.current_dataset.y = y 117 self.current_dataset.lam = lam 118 self.current_dataset.dy = dy 119 self.current_dataset.dx = dx 120 self.current_dataset.dlam = dlam 121 self.current_datainfo.isSesans = True 82 122 83 headers = input_f.readline().split() 123 self.current_datainfo._yunit = y_unit 124 self.current_datainfo._xunit = x_unit 125 self.current_datainfo.source.wavelength_unit = lam_unit 126 self.current_datainfo.source.wavelength = lam 127 self.current_datainfo.filename = os.path.basename(self.f_open.name) 128 self.current_dataset.xaxis(r"\rm{z}", x_unit) 129 # Adjust label to ln P/(lam^2 t), remove lam column refs 130 self.current_dataset.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 131 # Store loading process information 132 self.current_datainfo.meta_data['loader'] = self.type_name 133 self.current_datainfo.sample.name = params["Sample"] 134 self.current_datainfo.sample.ID = params["DataFileTitle"] 135 self.current_datainfo.sample.thickness = self._unit_conversion( 136 float(params["Thickness"]), "cm", 137 self._unit_fetch("Thickness"))[0] 84 138 85 self._insist_header(headers, "SpinEchoLength") 86 self._insist_header(headers, "Depolarisation") 87 self._insist_header(headers, "Depolarisation_error") 88 self._insist_header(headers, "Wavelength") 139 self.current_datainfo.sample.zacceptance = ( 140 float(params["Theta_zmax"]), 141 self._unit_fetch("Theta_zmax")) 89 142 90 data = np.loadtxt(input_f) 143 self.current_datainfo.sample.yacceptance = ( 144 float(params["Theta_ymax"]), 145 self._unit_fetch("Theta_ymax")) 91 146 92 if data.shape[1] != len(headers): 93 raise RuntimeError( 94 "File has {} headers, but {} columns".format( 95 len(headers), 96 data.shape[1])) 97 98 if not data.size: 99 raise RuntimeError("{} is empty".format(path)) 100 x = data[:, headers.index("SpinEchoLength")] 101 if "SpinEchoLength_error" in headers: 102 dx = data[:, headers.index("SpinEchoLength_error")] 103 else: 104 dx = x * 0.05 105 lam = data[:, headers.index("Wavelength")] 106 if "Wavelength_error" in headers: 107 dlam = data[:, headers.index("Wavelength_error")] 108 else: 109 dlam = lam * 0.05 110 y = data[:, headers.index("Depolarisation")] 111 dy = data[:, headers.index("Depolarisation_error")] 112 113 lam_unit = self._unit_fetch("Wavelength") 114 x, x_unit = self._unit_conversion(x, "A", 115 self._unit_fetch( 116 "SpinEchoLength")) 117 dx, dx_unit = self._unit_conversion( 118 dx, lam_unit, 119 self._unit_fetch("SpinEchoLength")) 120 dlam, dlam_unit = self._unit_conversion( 121 dlam, lam_unit, 122 self._unit_fetch("Wavelength")) 123 y_unit = self._unit_fetch("Depolarisation") 124 125 output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, 126 isSesans=True) 127 128 output.y_unit = y_unit 129 output.x_unit = x_unit 130 output.source.wavelength_unit = lam_unit 131 output.source.wavelength = lam 132 self.filename = output.filename = basename 133 output.xaxis(r"\rm{z}", x_unit) 134 # Adjust label to ln P/(lam^2 t), remove lam column refs 135 output.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 136 # Store loading process information 137 output.meta_data['loader'] = self.type_name 138 output.sample.name = params["Sample"] 139 output.sample.ID = params["DataFileTitle"] 140 output.sample.thickness = self._unit_conversion( 141 float(params["Thickness"]), "cm", 142 self._unit_fetch("Thickness"))[0] 143 144 output.sample.zacceptance = ( 145 float(params["Theta_zmax"]), 146 self._unit_fetch("Theta_zmax")) 147 148 output.sample.yacceptance = ( 149 float(params["Theta_ymax"]), 150 self._unit_fetch("Theta_ymax")) 151 return output 147 self.send_to_output() 152 148 153 149 @staticmethod 154 150 def _insist_header(headers, name): 155 151 if name not in headers: 156 raise RuntimeError(152 raise FileContentsException( 157 153 "Missing {} column in spin echo data".format(name)) 158 154 -
src/sas/sascalc/dataloader/readers/tiff_reader.py
r959eb01 r574adc7 2 2 #This software was developed by the University of Tennessee as part of the 3 3 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 4 #project funded by the US National Science Foundation. 4 #project funded by the US National Science Foundation. 5 5 #See the license text in license.txt 6 6 #copyright 2008, University of Tennessee … … 31 31 ## Extension 32 32 ext = ['.tif', '.tiff'] 33 33 34 34 def read(self, filename=None): 35 35 """ 36 36 Open and read the data in a file 37 37 38 38 :param file: path of the file 39 39 """ … … 44 44 except: 45 45 msg = "tiff_reader: could not load file. Missing Image module." 46 raise RuntimeError , msg47 46 raise RuntimeError(msg) 47 48 48 # Instantiate data object 49 49 output = Data2D() 50 50 output.filename = os.path.basename(filename) 51 51 52 52 # Read in the image 53 53 try: 54 54 im = Image.open(filename) 55 55 except: 56 raise RuntimeError , "cannot open %s"%(filename)56 raise RuntimeError("cannot open %s"%(filename)) 57 57 data = im.getdata() 58 58 … … 61 61 output.err_data = np.zeros([im.size[0], im.size[1]]) 62 62 output.mask = np.ones([im.size[0], im.size[1]], dtype=bool) 63 63 64 64 # Initialize 65 65 x_vals = [] … … 69 69 for i_x in range(im.size[0]): 70 70 x_vals.append(i_x) 71 71 72 72 itot = 0 73 73 for i_y in range(im.size[1]): … … 80 80 logger.error("tiff_reader: had to skip a non-float point") 81 81 continue 82 82 83 83 # Get bin number 84 84 if math.fmod(itot, im.size[0]) == 0: … … 87 87 else: 88 88 i_x += 1 89 89 90 90 output.data[im.size[1] - 1 - i_y][i_x] = value 91 91 92 92 itot += 1 93 93 94 94 output.xbins = im.size[0] 95 95 output.ybins = im.size[1] … … 102 102 output.ymin = 0 103 103 output.ymax = im.size[0] - 1 104 104 105 105 # Store loading process information 106 106 output.meta_data['loader'] = self.type_name -
src/sas/sascalc/dataloader/readers/xml_reader.py
r235f514 r7b50f14 16 16 17 17 import logging 18 18 19 from lxml import etree 19 20 from lxml.builder import E 20 21 22 from ..file_reader_base_class import FileReader, decode 23 21 24 logger = logging.getLogger(__name__) 22 25 23 26 PARSER = etree.ETCompatXMLParser(remove_comments=True, remove_pis=False) 24 27 25 class XMLreader( ):28 class XMLreader(FileReader): 26 29 """ 27 30 Generic XML read and write class. Mostly helper functions. … … 74 77 except etree.XMLSyntaxError as xml_error: 75 78 logger.info(xml_error) 79 raise xml_error 76 80 except Exception: 77 81 self.xml = None … … 91 95 except etree.XMLSyntaxError as xml_error: 92 96 logger.info(xml_error) 93 except Exception: 97 raise xml_error 98 except Exception as exc: 94 99 self.xml = None 95 100 self.xmldoc = None 96 101 self.xmlroot = None 102 raise exc 97 103 98 104 def set_schema(self, schema): … … 130 136 first_error = schema.assertValid(self.xmldoc) 131 137 except etree.DocumentInvalid as err: 138 # Suppress errors for <'any'> elements 139 if "##other" in str(err): 140 return first_error 132 141 first_error = str(err) 133 142 return first_error … … 144 153 Converts an etree element into a string 145 154 """ 146 return etree.tostring(elem, pretty_print=pretty_print, \147 encoding=encoding)155 return decode(etree.tostring(elem, pretty_print=pretty_print, 156 encoding=encoding)) 148 157 149 158 def break_processing_instructions(self, string, dic): … … 206 215 Create a unique key value for any dictionary to prevent overwriting 207 216 Recurses until a unique key value is found. 208 217 209 218 :param dictionary: A dictionary with any number of entries 210 219 :param name: The index of the item to be added to dictionary … … 222 231 Create an element tree for processing from an etree element 223 232 224 :param root: etree Element(s) 233 :param root: etree Element(s) 225 234 """ 226 235 return etree.ElementTree(root)
Note: See TracChangeset
for help on using the changeset viewer.