Changeset b8080e1 in sasview for src/sas/sascalc/dataloader/file_reader_base_class.py
- Timestamp:
- Aug 29, 2018 8:01:23 AM (6 years ago)
- Branches:
- ESS_GUI, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc
- Children:
- 9463ca2
- Parents:
- ce30949
- git-author:
- Piotr Rozyczko <rozyczko@…> (08/29/18 07:59:56)
- git-committer:
- Piotr Rozyczko <rozyczko@…> (08/29/18 08:01:23)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/file_reader_base_class.py
r9e6aeaf rb8080e1 7 7 import os 8 8 import sys 9 import re9 import math 10 10 import logging 11 11 from abc import abstractmethod … … 26 26 return s.decode() if isinstance(s, bytes) else s 27 27 28 # Data 1D fields for iterative purposes 29 FIELDS_1D = ('x', 'y', 'dx', 'dy', 'dxl', 'dxw') 30 # Data 2D fields for iterative purposes 31 FIELDS_2D = ('data', 'qx_data', 'qy_data', 'q_data', 'err_data', 32 'dqx_data', 'dqy_data', 'mask') 33 DEPRECATION_MESSAGE = ("\rThe extension of this file suggests the data set migh" 34 "t not be fully reduced. Support for the reader associat" 35 "ed with this file type has been removed. An attempt to " 36 "load the file was made, but, should it be successful, " 37 "SasView cannot guarantee the accuracy of the data.") 38 28 39 class FileReader(object): 29 # List of Data1D and Data2D objects to be sent back to data_loader30 output = []31 # Current plottable_(1D/2D) object being loaded in32 current_dataset = None33 # Current DataInfo object being loaded in34 current_datainfo = None35 40 # String to describe the type of data this reader can load 36 41 type_name = "ASCII" … … 39 44 # List of allowed extensions 40 45 ext = ['.txt'] 46 # Deprecated extensions 47 deprecated_extensions = ['.asc', '.nxs'] 41 48 # Bypass extension check and try to load anyway 42 49 allow_all = False 43 50 # Able to import the unit converter 44 51 has_converter = True 45 # Open file handle46 f_open = None47 52 # Default value of zero 48 53 _ZERO = 1e-16 49 54 55 def __init__(self): 56 # List of Data1D and Data2D objects to be sent back to data_loader 57 self.output = [] 58 # Current plottable_(1D/2D) object being loaded in 59 self.current_dataset = None 60 # Current DataInfo object being loaded in 61 self.current_datainfo = None 62 # File path sent to reader 63 self.filepath = None 64 # Open file handle 65 self.f_open = None 66 50 67 def read(self, filepath): 51 68 """ … … 54 71 :param filepath: The full or relative path to a file to be loaded 55 72 """ 73 self.filepath = filepath 56 74 if os.path.isfile(filepath): 57 75 basename, extension = os.path.splitext(os.path.basename(filepath)) … … 75 93 if not self.f_open.closed: 76 94 self.f_open.close() 95 if any(filepath.lower().endswith(ext) for ext in 96 self.deprecated_extensions): 97 self.handle_error_message(DEPRECATION_MESSAGE) 77 98 if len(self.output) > 0: 78 99 # Sort the data that's been loaded … … 85 106 86 107 # Return a list of parsed entries that data_loader can manage 87 return self.output 108 final_data = self.output 109 self.reset_state() 110 return final_data 111 112 def reset_state(self): 113 """ 114 Resets the class state to a base case when loading a new data file so previous 115 data files do not appear a second time 116 """ 117 self.current_datainfo = None 118 self.current_dataset = None 119 self.filepath = None 120 self.ind = None 121 self.output = [] 88 122 89 123 def nextline(self): … … 112 146 """ 113 147 Generic error handler to add an error to the current datainfo to 114 prop ogate the error up the error chain.148 propagate the error up the error chain. 115 149 :param msg: Error message 116 150 """ … … 121 155 else: 122 156 logger.warning(msg) 157 raise NoKnownLoaderException(msg) 123 158 124 159 def send_to_output(self): … … 142 177 # Sort data by increasing x and remove 1st point 143 178 ind = np.lexsort((data.y, data.x)) 144 data.x = np.asarray([data.x[i] for i in ind]).astype(np.float64)145 data.y = np.asarray([data.y[i] for i in ind]).astype(np.float64)179 data.x = self._reorder_1d_array(data.x, ind) 180 data.y = self._reorder_1d_array(data.y, ind) 146 181 if data.dx is not None: 147 182 if len(data.dx) == 0: 148 183 data.dx = None 149 184 continue 150 data.dx = np.asarray([data.dx[i] for i in ind]).astype(np.float64)185 data.dx = self._reorder_1d_array(data.dx, ind) 151 186 if data.dxl is not None: 152 data.dxl = np.asarray([data.dxl[i] for i in ind]).astype(np.float64)187 data.dxl = self._reorder_1d_array(data.dxl, ind) 153 188 if data.dxw is not None: 154 data.dxw = np.asarray([data.dxw[i] for i in ind]).astype(np.float64)189 data.dxw = self._reorder_1d_array(data.dxw, ind) 155 190 if data.dy is not None: 156 191 if len(data.dy) == 0: 157 192 data.dy = None 158 193 continue 159 data.dy = np.asarray([data.dy[i] for i in ind]).astype(np.float64)194 data.dy = self._reorder_1d_array(data.dy, ind) 160 195 if data.lam is not None: 161 data.lam = np.asarray([data.lam[i] for i in ind]).astype(np.float64)196 data.lam = self._reorder_1d_array(data.lam, ind) 162 197 if data.dlam is not None: 163 data.dlam = np.asarray([data.dlam[i] for i in ind]).astype(np.float64) 198 data.dlam = self._reorder_1d_array(data.dlam, ind) 199 data = self._remove_nans_in_data(data) 164 200 if len(data.x) > 0: 165 201 data.xmin = np.min(data.x) … … 167 203 data.ymin = np.min(data.y) 168 204 data.ymax = np.max(data.y) 205 206 @staticmethod 207 def _reorder_1d_array(array, ind): 208 """ 209 Reorders a 1D array based on the indices passed as ind 210 :param array: Array to be reordered 211 :param ind: Indices used to reorder array 212 :return: reordered array 213 """ 214 array = np.asarray(array, dtype=np.float64) 215 return array[ind] 216 217 @staticmethod 218 def _remove_nans_in_data(data): 219 """ 220 Remove data points where nan is loaded 221 :param data: 1D or 2D data object 222 :return: data with nan points removed 223 """ 224 if isinstance(data, Data1D): 225 fields = FIELDS_1D 226 elif isinstance(data, Data2D): 227 fields = FIELDS_2D 228 else: 229 return data 230 # Make array of good points - all others will be removed 231 good = np.isfinite(getattr(data, fields[0])) 232 for name in fields[1:]: 233 array = getattr(data, name) 234 if array is not None: 235 # Update good points only if not already changed 236 good &= np.isfinite(array) 237 if not np.all(good): 238 for name in fields: 239 array = getattr(data, name) 240 if array is not None: 241 setattr(data, name, array[good]) 242 return data 169 243 170 244 def sort_two_d_data(self): … … 197 271 dataset.x_bins = dataset.qx_data[:int(n_cols)] 198 272 dataset.data = dataset.data.flatten() 273 dataset = self._remove_nans_in_data(dataset) 199 274 if len(dataset.data) > 0: 200 275 dataset.xmin = np.min(dataset.qx_data) … … 314 389 def splitline(line): 315 390 """ 316 Splits a line into pieces based on common delim eters391 Splits a line into pieces based on common delimiters 317 392 :param line: A single line of text 318 393 :return: list of values
Note: See TracChangeset
for help on using the changeset viewer.