Changeset 0639476 in sasview for src/sas/sascalc/dataloader
- Timestamp:
- Oct 6, 2016 12:21:22 PM (8 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, costrafo411, magnetic_scatt, release-4.1.1, release-4.1.2, release-4.2.2, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- 8d3d20a
- Parents:
- 3a22ce7 (diff), 5e906207 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - git-author:
- Andrew Jackson <andrew.jackson@…> (10/06/16 12:21:22)
- git-committer:
- GitHub <noreply@…> (10/06/16 12:21:22)
- Location:
- src/sas/sascalc/dataloader/readers
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/readers/cansas_reader.py
r654e8e0 r0639476 1177 1177 written = written | self.write_node(pix, "z", item.pixel_size.z, 1178 1178 {"unit": item.pixel_size_unit}) 1179 written = written | self.write_node(det, "slit_length",1180 item.slit_length,1181 {"unit": item.slit_length_unit})1182 1179 if written == True: 1183 1180 self.append(pix, det) 1181 self.write_node(det, "slit_length", item.slit_length, 1182 {"unit": item.slit_length_unit}) 1183 1184 1184 1185 1185 def _write_process_notes(self, datainfo, entry_node): -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
r479799c r5e906207 23 23 24 24 Any number of SASdata sets may be present in a SASentry and the data within can be either 1D I(Q) or 2D I(Qx, Qy). 25 26 Also supports reading NXcanSAS formatted HDF5 files 25 27 26 28 :Dependencies: … … 76 78 ## Add the last data set to the list of outputs 77 79 self.add_data_set() 80 ## Close the data file 81 self.raw_data.close() 78 82 ## Return data set(s) 79 83 return self.output … … 183 187 elif key == u'run': 184 188 self.current_datainfo.run.append(data_point) 189 try: 190 run_name = value.attrs['name'] 191 run_dict = {data_point: run_name} 192 self.current_datainfo.run_name = run_dict 193 except: 194 pass 185 195 elif key == u'title': 186 196 self.current_datainfo.title = data_point … … 189 199 190 200 ## Sample Information 191 elif key == u'Title' and self.parent_class == u'SASsample': 201 elif key == u'Title' and self.parent_class == u'SASsample': # CanSAS 2.0 format 202 self.current_datainfo.sample.name = data_point 203 elif key == u'ID' and self.parent_class == u'SASsample': # NXcanSAS format 192 204 self.current_datainfo.sample.name = data_point 193 205 elif key == u'thickness' and self.parent_class == u'SASsample': … … 195 207 elif key == u'temperature' and self.parent_class == u'SASsample': 196 208 self.current_datainfo.sample.temperature = data_point 209 elif key == u'transmission' and self.parent_class == u'SASsample': 210 self.current_datainfo.sample.transmission = data_point 211 elif key == u'x_position' and self.parent_class == u'SASsample': 212 self.current_datainfo.sample.position.x = data_point 213 elif key == u'y_position' and self.parent_class == u'SASsample': 214 self.current_datainfo.sample.position.y = data_point 215 elif key == u'polar_angle' and self.parent_class == u'SASsample': 216 self.current_datainfo.sample.orientation.x = data_point 217 elif key == u'azimuthal_angle' and self.parent_class == u'SASsample': 218 self.current_datainfo.sample.orientation.z = data_point 219 elif key == u'details' and self.parent_class == u'SASsample': 220 self.current_datainfo.sample.details.append(data_point) 197 221 198 222 ## Instrumental Information … … 204 228 self.detector.distance = float(data_point) 205 229 self.detector.distance_unit = unit 230 elif key == u'slit_length' and self.parent_class == u'SASdetector': 231 self.detector.slit_length = float(data_point) 232 self.detector.slit_length_unit = unit 233 elif key == u'x_position' and self.parent_class == u'SASdetector': 234 self.detector.offset.x = float(data_point) 235 self.detector.offset_unit = unit 236 elif key == u'y_position' and self.parent_class == u'SASdetector': 237 self.detector.offset.y = float(data_point) 238 self.detector.offset_unit = unit 239 elif key == u'polar_angle' and self.parent_class == u'SASdetector': 240 self.detector.orientation.x = float(data_point) 241 self.detector.orientation_unit = unit 242 elif key == u'azimuthal_angle' and self.parent_class == u'SASdetector': 243 self.detector.orientation.z = float(data_point) 244 self.detector.orientation_unit = unit 245 elif key == u'beam_center_x' and self.parent_class == u'SASdetector': 246 self.detector.beam_center.x = float(data_point) 247 self.detector.beam_center_unit = unit 248 elif key == u'beam_center_y' and self.parent_class == u'SASdetector': 249 self.detector.beam_center.y = float(data_point) 250 self.detector.beam_center_unit = unit 251 elif key == u'x_pixel_size' and self.parent_class == u'SASdetector': 252 self.detector.pixel_size.x = float(data_point) 253 self.detector.pixel_size_unit = unit 254 elif key == u'y_pixel_size' and self.parent_class == u'SASdetector': 255 self.detector.pixel_size.y = float(data_point) 256 self.detector.pixel_size_unit = unit 206 257 elif key == u'SSD' and self.parent_class == u'SAScollimation': 207 258 self.collimation.length = data_point … … 213 264 elif key == u'name' and self.parent_class == u'SASprocess': 214 265 self.process.name = data_point 215 elif key == u'Title' and self.parent_class == u'SASprocess': 266 elif key == u'Title' and self.parent_class == u'SASprocess': # CanSAS 2.0 format 267 self.process.name = data_point 268 elif key == u'name' and self.parent_class == u'SASprocess': # NXcanSAS format 216 269 self.process.name = data_point 217 270 elif key == u'description' and self.parent_class == u'SASprocess': … … 230 283 self.trans_spectrum.wavelength.append(data_point) 231 284 232 ## Other Information285 ## Source 233 286 elif key == u'wavelength' and self.parent_class == u'SASdata': 234 287 self.current_datainfo.source.wavelength = data_point 235 self.current_datainfo.source.wavelength.unit = unit 288 self.current_datainfo.source.wavelength_unit = unit 289 elif key == u'incident_wavelength' and self.parent_class == u'SASsource': 290 self.current_datainfo.source.wavelength = data_point 291 self.current_datainfo.source.wavelength_unit = unit 292 elif key == u'wavelength_max' and self.parent_class == u'SASsource': 293 self.current_datainfo.source.wavelength_max = data_point 294 self.current_datainfo.source.wavelength_max_unit = unit 295 elif key == u'wavelength_min' and self.parent_class == u'SASsource': 296 self.current_datainfo.source.wavelength_min = data_point 297 self.current_datainfo.source.wavelength_min_unit = unit 298 elif key == u'wavelength_spread' and self.parent_class == u'SASsource': 299 self.current_datainfo.source.wavelength_spread = data_point 300 self.current_datainfo.source.wavelength_spread_unit = unit 301 elif key == u'beam_size_x' and self.parent_class == u'SASsource': 302 self.current_datainfo.source.beam_size.x = data_point 303 self.current_datainfo.source.beam_size_unit = unit 304 elif key == u'beam_size_y' and self.parent_class == u'SASsource': 305 self.current_datainfo.source.beam_size.y = data_point 306 self.current_datainfo.source.beam_size_unit = unit 307 elif key == u'beam_shape' and self.parent_class == u'SASsource': 308 self.current_datainfo.source.beam_shape = data_point 236 309 elif key == u'radiation' and self.parent_class == u'SASsource': 237 310 self.current_datainfo.source.radiation = data_point … … 376 449 self.current_datainfo = DataInfo() 377 450 451 378 452 def _initialize_new_data_set(self, parent_list = None): 379 453 """ -
src/sas/sascalc/dataloader/readers/anton_paar_saxs_reader.py
r80c5d46 ra235f715 45 45 output = None 46 46 47 def __init__(self):47 def reset_state(self): 48 48 self.current_dataset = Data1D(np.empty(0), np.empty(0), 49 49 np.empty(0), np.empty(0)) … … 72 72 73 73 ## Reinitialize the class when loading a new data file to reset all class variables 74 self. __init__()74 self.reset_state() 75 75 ## Check that the file exists 76 76 if os.path.isfile(filename): … … 84 84 self.raw_data = buff.splitlines() 85 85 self.read_data() 86 xml_intermediate = self.raw_data[self.upper:]87 xml = ''.join(xml_intermediate)88 self.set_xml_file(xml)89 86 return self.output 90 87 … … 100 97 self.lower = 5 101 98 self.upper = self.lower + self.data_points 102 self.detector.distance = float(line4[1]) 99 self.source.radiation = 'x-ray' 100 normal = float(line4[3]) 103 101 self.current_dataset.source.radiation = "x-ray" 104 102 self.current_dataset.source.name = "Anton Paar SAXSess Instrument" 105 103 self.current_dataset.source.wavelength = float(line4[4]) 106 normal = line4[3] 104 xvals = [] 105 yvals = [] 106 dyvals = [] 107 107 for i in range(self.lower, self.upper): 108 index = i - self.lower 108 109 data = self.raw_data[i].split() 109 x_val = [float(data[0])] 110 y_val = [float(data[1])] 111 dy_val = [float(data[2])] 112 self.current_dataset.x = np.append(self.current_dataset.x, x_val) 113 self.current_dataset.y = np.append(self.current_dataset.y, y_val) 114 self.current_dataset.dy = np.append(self.current_dataset.dy, dy_val) 115 self.current_dataset.xaxis("Q (%s)" % (q_unit), q_unit) 116 self.current_dataset.yaxis("Intensity (%s)" % (i_unit), i_unit) 117 self.current_dataset.detector.append(self.detector) 110 xvals.insert(index, normal * float(data[0])) 111 yvals.insert(index, normal * float(data[1])) 112 dyvals.insert(index, normal * float(data[2])) 113 self.current_dataset.x = np.append(self.current_dataset.x, xvals) 114 self.current_dataset.y = np.append(self.current_dataset.y, yvals) 115 self.current_dataset.dy = np.append(self.current_dataset.dy, dyvals) 116 if self.data_points != self.current_dataset.x.size: 117 self.errors.add("Not all data was loaded properly.") 118 if self.current_dataset.dx.size != self.current_dataset.x.size: 119 dxvals = np.zeros(self.current_dataset.x.size) 120 self.current_dataset.dx = dxvals 121 if self.current_dataset.x.size != self.current_dataset.y.size: 122 self.errors.add("The x and y data sets are not the same size.") 123 if self.current_dataset.y.size != self.current_dataset.dy.size: 124 self.errors.add("The y and dy datasets are not the same size.") 125 self.current_dataset.errors = self.errors 126 self.current_dataset.xaxis("Q", q_unit) 127 self.current_dataset.yaxis("Intensity", i_unit) 128 xml_intermediate = self.raw_data[self.upper:] 129 xml = ''.join(xml_intermediate) 130 self.set_xml_string(xml) 131 dom = self.xmlroot.xpath('/fileinfo') 132 self._parse_child(dom) 118 133 self.output.append(self.current_dataset) 134 135 def _parse_child(self, dom, parent=''): 136 """ 137 Recursive method for stepping through the embedded XML 138 :param dom: XML node with or without children 139 """ 140 for node in dom: 141 tagname = node.tag 142 value = node.text 143 attr = node.attrib 144 key = attr.get("key", '') 145 if len(node.getchildren()) > 1: 146 self._parse_child(node, key) 147 if key == "SampleDetector": 148 self.current_dataset.detector.append(self.detector) 149 self.detector = Detector() 150 else: 151 if key == "value": 152 if parent == "Wavelength": 153 self.current_dataset.source.wavelength = value 154 elif parent == "SampleDetector": 155 self.detector.distance = value 156 elif parent == "Temperature": 157 self.current_dataset.sample.temperature = value 158 elif parent == "CounterSlitLength": 159 self.detector.slit_length = value 160 elif key == "unit": 161 value = value.replace("_", "") 162 if parent == "Wavelength": 163 self.current_dataset.source.wavelength_unit = value 164 elif parent == "SampleDetector": 165 self.detector.distance_unit = value 166 elif parent == "X": 167 self.current_dataset.xaxis(self.current_dataset._xaxis, value) 168 elif parent == "Y": 169 self.current_dataset.yaxis(self.current_dataset._yaxis, value) 170 elif parent == "Temperature": 171 self.current_dataset.sample.temperature_unit = value 172 elif parent == "CounterSlitLength": 173 self.detector.slit_length_unit = value 174 elif key == "quantity": 175 if parent == "X": 176 self.current_dataset.xaxis(value, self.current_dataset._xunit) 177 elif parent == "Y": 178 self.current_dataset.yaxis(value, self.current_dataset._yunit) -
src/sas/sascalc/dataloader/readers/ascii_reader.py
rb699768 r7d94915 33 33 ## File type 34 34 type_name = "ASCII" 35 35 36 36 ## Wildcards 37 37 type = ["ASCII files (*.txt)|*.txt", … … 41 41 ## List of allowed extensions 42 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 43 43 44 44 ## Flag to bypass extension check 45 45 allow_all = True 46 46 47 47 def read(self, path): 48 48 """ 49 49 Load data file 50 50 51 51 :param path: file path 52 53 52 :return: Data1D object, or None 54 53 55 54 :raise RuntimeError: when the file can't be opened 56 55 :raise ValueError: when the length of the data vectors are inconsistent … … 62 61 try: 63 62 # Read in binary mode since GRASP frequently has no-ascii 64 # characters that br akes the open operation63 # characters that breaks the open operation 65 64 input_f = open(path,'rb') 66 65 except: … … 68 67 buff = input_f.read() 69 68 lines = buff.splitlines() 70 71 x = numpy.zeros(0) 72 y = numpy.zeros(0) 73 dy = numpy.zeros(0) 74 dx = numpy.zeros(0) 75 76 #temp. space to sort data 77 tx = numpy.zeros(0) 78 ty = numpy.zeros(0) 69 70 # Arrays for data storage 71 tx = numpy.zeros(0) 72 ty = numpy.zeros(0) 79 73 tdy = numpy.zeros(0) 80 74 tdx = numpy.zeros(0) 81 82 output = Data1D(x, y, dy=dy, dx=dx) 83 self.filename = output.filename = basename 84 85 data_conv_q = None 86 data_conv_i = None 87 88 if has_converter == True and output.x_unit != '1/A': 89 data_conv_q = Converter('1/A') 90 # Test it 91 data_conv_q(1.0, output.x_unit) 92 93 if has_converter == True and output.y_unit != '1/cm': 94 data_conv_i = Converter('1/cm') 95 # Test it 96 data_conv_i(1.0, output.y_unit) 97 75 98 76 # The first good line of data will define whether 99 77 # we have 2-column or 3-column ascii 100 78 has_error_dx = None 101 79 has_error_dy = None 102 80 103 81 #Initialize counters for data lines and header lines. 104 is_data = False # Has more than 5 lines82 is_data = False 105 83 # More than "5" lines of data is considered as actual 106 84 # data unless that is the only data 107 m um_data_lines = 585 min_data_pts = 5 108 86 # To count # of current data candidate lines 109 i = -187 candidate_lines = 0 110 88 # To count total # of previous data candidate lines 111 i1 = -1 112 # To count # of header lines 113 j = -1 114 # Helps to count # of header lines 115 j1 = -1 116 #minimum required number of columns of data; ( <= 4). 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 117 91 lentoks = 2 118 92 for line in lines: 119 # Initial try for CSV (split on ,) 120 toks = line.split(',') 121 # Now try SCSV (split on ;) 122 if len(toks) < 2: 123 toks = line.split(';') 124 # Now go for whitespace 125 if len(toks) < 2: 126 toks = line.split() 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 127 96 try: 97 if new_lentoks == 1 and not is_data: 98 ## If only one item in list, no longer data 99 raise ValueError 100 elif new_lentoks == 0: 101 ## If the line is blank, skip and continue on 102 ## In case of breaks within data sets. 103 continue 104 elif new_lentoks != lentoks and is_data: 105 ## If a footer is found, break the loop and save the data 106 break 107 elif new_lentoks != lentoks and not is_data: 108 ## If header lines are numerical 109 candidate_lines = 0 110 candidate_lines_previous = 0 111 128 112 #Make sure that all columns are numbers. 129 113 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError 130 115 float(toks[colnum]) 131 116 117 candidate_lines += 1 132 118 _x = float(toks[0]) 133 119 _y = float(toks[1]) 134 135 #Reset the header line counters 136 if j == j1: 137 j = 0 138 j1 = 0 139 140 if i > 1: 120 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 141 125 is_data = True 142 143 if data_conv_q is not None: 144 _x = data_conv_q(_x, units=output.x_unit) 145 146 if data_conv_i is not None: 147 _y = data_conv_i(_y, units=output.y_unit) 148 149 # If we have an extra token, check 150 # whether it can be interpreted as a 151 # third column. 152 _dy = None 153 if len(toks) > 2: 154 try: 155 _dy = float(toks[2]) 156 157 if data_conv_i is not None: 158 _dy = data_conv_i(_dy, units=output.y_unit) 159 160 except: 161 # The third column is not a float, skip it. 162 pass 163 164 # If we haven't set the 3rd column 165 # flag, set it now. 166 if has_error_dy == None: 167 has_error_dy = False if _dy == None else True 168 169 #Check for dx 170 _dx = None 171 if len(toks) > 3: 172 try: 173 _dx = float(toks[3]) 174 175 if data_conv_i is not None: 176 _dx = data_conv_i(_dx, units=output.x_unit) 177 178 except: 179 # The 4th column is not a float, skip it. 180 pass 181 182 # If we haven't set the 3rd column 183 # flag, set it now. 184 if has_error_dx == None: 185 has_error_dx = False if _dx == None else True 186 187 #After talked with PB, we decided to take care of only 188 # 4 columns of data for now. 189 #number of columns in the current line 190 #To remember the # of columns in the current 191 #line of data 192 new_lentoks = len(toks) 193 194 #If the previous columns not equal to the current, 195 #mark the previous as non-data and reset the dependents. 196 if lentoks != new_lentoks: 197 if is_data == True: 198 break 199 else: 200 i = -1 201 i1 = 0 202 j = -1 203 j1 = -1 204 205 #Delete the previously stored lines of data candidates 206 # if is not data. 207 if i < 0 and -1 < i1 < mum_data_lines and \ 208 is_data == False: 209 try: 210 x = numpy.zeros(0) 211 y = numpy.zeros(0) 212 except: 213 pass 214 215 x = numpy.append(x, _x) 216 y = numpy.append(y, _y) 217 218 if has_error_dy == True: 219 #Delete the previously stored lines of 220 # data candidates if is not data. 221 if i < 0 and -1 < i1 < mum_data_lines and \ 222 is_data == False: 223 try: 224 dy = numpy.zeros(0) 225 except: 226 pass 227 dy = numpy.append(dy, _dy) 228 229 if has_error_dx == True: 230 #Delete the previously stored lines of 231 # data candidates if is not data. 232 if i < 0 and -1 < i1 < mum_data_lines and \ 233 is_data == False: 234 try: 235 dx = numpy.zeros(0) 236 except: 237 pass 238 dx = numpy.append(dx, _dx) 239 240 #Same for temp. 241 #Delete the previously stored lines of data candidates 242 # if is not data. 243 if i < 0 and -1 < i1 < mum_data_lines and\ 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy == None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx == None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 244 140 is_data == False: 245 141 try: 246 142 tx = numpy.zeros(0) 247 143 ty = numpy.zeros(0) 144 tdy = numpy.zeros(0) 145 tdx = numpy.zeros(0) 248 146 except: 249 147 pass 250 148 149 if has_error_dy == True: 150 tdy = numpy.append(tdy, _dy) 151 if has_error_dx == True: 152 tdx = numpy.append(tdx, _dx) 251 153 tx = numpy.append(tx, _x) 252 154 ty = numpy.append(ty, _y) 253 254 if has_error_dy == True: 255 #Delete the previously stored lines of 256 # data candidates if is not data. 257 if i < 0 and -1 < i1 < mum_data_lines and \ 258 is_data == False: 259 try: 260 tdy = numpy.zeros(0) 261 except: 262 pass 263 tdy = numpy.append(tdy, _dy) 264 if has_error_dx == True: 265 #Delete the previously stored lines of 266 # data candidates if is not data. 267 if i < 0 and -1 < i1 < mum_data_lines and \ 268 is_data == False: 269 try: 270 tdx = numpy.zeros(0) 271 except: 272 pass 273 tdx = numpy.append(tdx, _dx) 274 275 #reset i1 and flag lentoks for the next 276 if lentoks < new_lentoks: 277 if is_data == False: 278 i1 = -1 155 279 156 #To remember the # of columns on the current line 280 157 # for the next line of data 281 lentoks = len(toks) 282 283 #Reset # of header lines and counts # 284 # of data candidate lines 285 if j == 0 and j1 == 0: 286 i1 = i + 1 287 i += 1 288 except: 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 289 161 # It is data and meet non - number, then stop reading 290 162 if is_data == True: 291 163 break 292 164 lentoks = 2 293 #Counting # of header lines 294 j += 1 295 if j == j1 + 1: 296 j1 = j 297 else: 298 j = -1 165 has_error_dx = None 166 has_error_dy = None 299 167 #Reset # of lines of data candidates 300 i = -1 301 302 # Couldn't parse this line, skip it 168 candidate_lines = 0 169 except: 303 170 pass 304 171 305 172 input_f.close() 173 if not is_data: 174 return None 306 175 # Sanity check 307 if has_error_dy == True and not len( y) == len(dy):176 if has_error_dy == True and not len(ty) == len(tdy): 308 177 msg = "ascii_reader: y and dy have different length" 309 178 raise RuntimeError, msg 310 if has_error_dx == True and not len( x) == len(dx):179 if has_error_dx == True and not len(tx) == len(tdx): 311 180 msg = "ascii_reader: y and dy have different length" 312 181 raise RuntimeError, msg 313 182 # If the data length is zero, consider this as 314 183 # though we were not able to read the file. 315 if len( x) == 0:184 if len(tx) == 0: 316 185 raise RuntimeError, "ascii_reader: could not load file" 317 186 318 187 #Let's re-order the data to make cal. 319 188 # curve look better some cases 320 189 ind = numpy.lexsort((ty, tx)) 190 x = numpy.zeros(len(tx)) 191 y = numpy.zeros(len(ty)) 192 dy = numpy.zeros(len(tdy)) 193 dx = numpy.zeros(len(tdx)) 194 output = Data1D(x, y, dy=dy, dx=dx) 195 self.filename = output.filename = basename 196 321 197 for i in ind: 322 198 x[i] = tx[ind[i]] … … 338 214 output.dx = dx[x != 0] if has_error_dx == True\ 339 215 else numpy.zeros(len(output.x)) 340 341 if data_conv_q is not None: 342 output.xaxis("\\rm{Q}", output.x_unit) 343 else: 344 output.xaxis("\\rm{Q}", 'A^{-1}') 345 if data_conv_i is not None: 346 output.yaxis("\\rm{Intensity}", output.y_unit) 347 else: 348 output.yaxis("\\rm{Intensity}", "cm^{-1}") 349 216 217 output.xaxis("\\rm{Q}", 'A^{-1}') 218 output.yaxis("\\rm{Intensity}", "cm^{-1}") 219 350 220 # Store loading process information 351 221 output.meta_data['loader'] = self.type_name … … 353 223 raise RuntimeError, "%s is empty" % path 354 224 return output 355 225 356 226 else: 357 227 raise RuntimeError, "%s is not a file" % path 358 228 return None 229 230 def splitline(self, line): 231 """ 232 Splits a line into pieces based on common delimeters 233 :param line: A single line of text 234 :return: list of values 235 """ 236 # Initial try for CSV (split on ,) 237 toks = line.split(',') 238 # Now try SCSV (split on ;) 239 if len(toks) < 2: 240 toks = line.split(';') 241 # Now go for whitespace 242 if len(toks) < 2: 243 toks = line.split() 244 return toks -
src/sas/sascalc/dataloader/readers/xml_reader.py
rb699768 ra235f715 70 70 self.xmldoc = etree.parse(self.xml, parser=PARSER) 71 71 self.xmlroot = self.xmldoc.getroot() 72 except etree.XMLSyntaxError as xml_error: 73 logging.info(xml_error) 74 except Exception: 75 self.xml = None 76 self.xmldoc = None 77 self.xmlroot = None 78 79 def set_xml_string(self, tag_soup): 80 """ 81 Set an XML string as the working XML. 82 83 :param tag_soup: XML formatted string 84 """ 85 try: 86 self.xml = tag_soup 87 self.xmldoc = tag_soup 88 self.xmlroot = etree.fromstring(tag_soup) 72 89 except etree.XMLSyntaxError as xml_error: 73 90 logging.info(xml_error)
Note: See TracChangeset
for help on using the changeset viewer.