Changeset 7477fb9 in sasview for src/sas/sascalc
- Timestamp:
- Jul 27, 2017 4:38:11 AM (7 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, costrafo411, magnetic_scatt, release-4.2.2, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- 0b79323
- Parents:
- bc570f4
- Location:
- src/sas/sascalc/dataloader
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/file_reader_base_class.py
rbc570f4 r7477fb9 100 100 for data in self.output: 101 101 if isinstance(data, Data1D): 102 # Sort data by increasing x and remove 1st point 102 103 ind = np.lexsort((data.y, data.x)) 104 ind = ind[1:] # Remove 1st point (Q, I) = (0, 0) 103 105 data.x = np.asarray([data.x[i] for i in ind]) 104 106 data.y = np.asarray([data.y[i] for i in ind]) … … 115 117 if data.dlam is not None: 116 118 data.dlam = np.asarray([data.dlam[i] for i in ind]) 119 data.xmin = np.min(data.x) 120 data.xmax = np.max(data.x) 121 data.ymin = np.min(data.y) 122 data.ymax = np.max(data.y) 117 123 final_list.append(data) 118 124 self.output = final_list -
src/sas/sascalc/dataloader/readers/cansas_reader.py
rbc570f4 r7477fb9 1 """2 CanSAS data reader - new recursive cansas_version.3 """4 ############################################################################5 #This software was developed by the University of Tennessee as part of the6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 #project funded by the US National Science Foundation.8 #If you use DANSE applications to do scientific research that leads to9 #publication, we ask that you acknowledge the use of the software with the10 #following sentence:11 #This work benefited from DANSE software developed under NSF award DMR-0520547.12 #copyright 2008,2009 University of Tennessee13 #############################################################################14 15 1 import logging 16 2 import numpy as np … … 36 22 from xml.dom.minidom import parseString 37 23 24 from lxml import etree 25 38 26 logger = logging.getLogger(__name__) 39 27 … … 57 45 58 46 class Reader(XMLreader): 59 """60 Class to load cansas 1D XML files61 62 :Dependencies:63 The CanSAS reader requires PyXML 0.8.4 or later.64 """65 # CanSAS version - defaults to version 1.066 47 cansas_version = "1.0" 67 48 base_ns = "{cansas1d/1.0}" … … 117 98 118 99 def get_file_contents(self, xml_file=None, schema_path="", invalid=True): 119 """ 120 Validate and read in an xml_file file in the canSAS format. 121 122 :param xml_file: A canSAS file path in proper XML format 123 :param schema_path: A file path to an XML schema to validate the xml_file against 124 """ 125 # For every file loaded, reset everything to a base state 100 # Reset everything since we're loading a new file 126 101 self.reset_state() 127 102 self.invalid = invalid 128 # We don't use f_open since libxml handles opening/closing files129 103 if xml_file is None: 130 104 xml_file = self.f_open.name 105 # We don't sure f_open since lxml handles opnening/closing files 131 106 if not self.f_open.closed: 132 107 self.f_open.close() 108 133 109 basename, _ = os.path.splitext(os.path.basename(xml_file)) 110 # Raises FileContentsException - handled by superclass 111 self.load_file_and_schema(xml_file, schema_path) 112 self.current_datainfo = DataInfo() 134 113 try: 135 # Get the file location of 136 self.load_file_and_schema(xml_file, schema_path) 137 self.add_data_set() 138 # Try to load the file, but raise an error if unable to. 139 # Check the file matches the XML schema 140 self.is_cansas(self.extension) # Raises FileContentsException if not CanSAS 141 self.invalid = False 142 # Get each SASentry from XML file and add it to a list. 143 entry_list = self.xmlroot.xpath( 144 '/ns:SASroot/ns:SASentry', 145 namespaces={'ns': self.cansas_defaults.get("ns")}) 114 # Raises FileContentsException if file doesn't meet CanSAS schema 115 self.is_cansas(self.extension) 116 self.invalid = False # If we reach this point then file must be valid CanSAS 117 118 # Parse each SASentry 119 entry_list = self.xmlroot.xpath('/ns:SASroot/ns:SASentry', namespaces={ 120 'ns': self.cansas_defaults.get("ns") 121 }) 122 # Look for a SASentry 146 123 self.names.append("SASentry") 147 148 # Get all preprocessing events and encoding149 124 self.set_processing_instructions() 150 125 151 # Parse each <SASentry> item152 126 for entry in entry_list: 153 # Create a new DataInfo object for every <SASentry>154 155 # Set the file name and then parse the entry.156 127 self.current_datainfo.filename = basename + self.extension 157 128 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 158 self.current_datainfo.meta_data[PREPROCESS] = \ 159 self.processing_instructions 160 161 # Parse the XML SASentry 129 self.current_datainfo.meta_data[PREPROCESS] = self.processing_instructions 162 130 self._parse_entry(entry) 163 # Combine datasets with datainfo 164 self.add_data_set() 131 self.send_to_output() # Combune datasets with DataInfo 165 132 except FileContentsException as fc_exc: 166 try: 167 # Try again with an invalid CanSAS schema, that requires only a data set in each 168 base_name = xml_reader.__file__ 169 base_name = base_name.replace("\\", "/") 170 base = base_name.split("/sas/")[0] 171 if self.cansas_version == "1.1": 172 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 173 else: 174 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 175 self.set_schema(invalid_schema) 176 if self.invalid: 177 self.output = self.read(xml_file, invalid_schema, False) 178 # If the file does not match the schema, but can still be read, raise this error 179 self.load_file_and_schema(xml_file) # Relaod valid schema so we can find errors 133 # File doesn't meet schema - try loading with a less strict schema 134 base_name = xml_reader.__file__ 135 base_name = base_name.replace("\\", "/") 136 base = base_name.split("/sas/")[0] 137 if self.cansas_version == "1.1": 138 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 139 else: 140 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 141 self.set_schema(invalid_schema) 142 if self.invalid: 143 try: 144 # Load data with less strict schema 145 self.read(xml_file, invalid_schema, False) 146 147 # File can still be read but doesn't match schema, so raise exception 148 self.load_file_and_schema(xml_file) # Reload strict schema so we can find where error are in file 180 149 invalid_xml = self.find_invalid_xml() 181 150 invalid_xml = INVALID_XML.format(basename + self.extension) + invalid_xml 182 raise DataReaderException(invalid_xml) 183 else: 184 raise fc_exc 185 except FileContentsException as fc_exc: 186 msg = "CanSAS Reader could not load the file {}".format(xml_file) 187 if not self.extension in self.ext: # If the file has no associated loader 188 raise DefaultReaderException(msg) 189 if fc_exc.message is not None: # Propagate error messages from earlier 190 msg = fc_exc.message 191 raise FileContentsException(msg) 192 except DataReaderException as dr_exc: # Handled by file_reader_base_class 193 raise dr_exc 194 except Exception as e: # Convert any other exceptions to FileContentsExceptions 195 raise FileContentsException(e.message) 196 # Return a list of parsed entries that dataloader can manage 197 return self.output 151 raise DataReaderException(invalid_xml) # Handled by base class 152 except FileContentsException as fc_exc: 153 msg = "CanSAS Reader could not load the file {}".format(xml_file) 154 if not self.extension in self.ext: # If the file has no associated loader 155 raise DefaultReaderException(msg) 156 if fc_exc.message is not None: # Propagate error messages from earlier 157 msg += "\n" + fc_exc.message 158 raise FileContentsException(msg) 159 pass 160 else: 161 raise fc_exc 162 except Exception as e: # Convert all other exceptions to FileContentsExceptions 163 raise FileContentsException(e.message) 164 165 166 def load_file_and_schema(self, xml_file, schema_path=""): 167 base_name = xml_reader.__file__ 168 base_name = base_name.replace("\\", "/") 169 base = base_name.split("/sas/")[0] 170 171 # Try and parse the XML file 172 try: 173 self.set_xml_file(xml_file) 174 except etree.XMLSyntaxError: # File isn't valid XML so can't be loaded 175 msg = "Cansas cannot load {}.\n Invalid XML syntax".format(xml_file) 176 raise FileContentsException(msg) 177 178 self.cansas_version = self.xmlroot.get("version", "1.0") 179 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 180 181 if schema_path == "": 182 schema_path = "{}/sas/sascalc/dataloader/readers/schema/{}".format( 183 base, self.cansas_defaults.get("schema").replace("\\", "/") 184 ) 185 self.set_schema(schema_path) 186 187 def is_cansas(self, ext="xml"): 188 """ 189 Checks to see if the XML file is a CanSAS file 190 191 :param ext: The file extension of the data file 192 :raises FileContentsException: Raised if XML file isn't valid CanSAS 193 """ 194 if self.validate_xml(): # Check file is valid XML 195 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 196 value = self.xmlroot.get(name) 197 # Check schema CanSAS version matches file CanSAS version 198 if CANSAS_NS.get(self.cansas_version).get("ns") == value.rsplit(" ")[0]: 199 return True 200 if ext == "svs": 201 return True # Why is this required? 202 # If we get to this point then file isn't valid CanSAS 203 raise FileContentsException("The file is not valid CanSAS") 198 204 199 205 def _parse_entry(self, dom, recurse=False): 200 """201 Parse a SASEntry - new recursive method for parsing the dom of202 the CanSAS data format. This will allow multiple data files203 and extra nodes to be read in simultaneously.204 205 :param dom: dom object with a namespace base of names206 """207 208 206 if not self._is_call_local() and not recurse: 209 207 self.reset_state() 210 self.add_data_set() 208 self.data = [] 209 self.current_datainfo = DataInfo() 211 210 self.names.append("SASentry") 212 211 self.parent_class = "SASentry" 213 self._check_for_empty_data() 214 self.base_ns = "{0}{1}{2}".format("{", \ 215 CANSAS_NS.get(self.cansas_version).get("ns"), "}") 216 217 # Go through each child in the parent element 212 # Create an empty dataset if no data has been passed to the reader 213 if self.current_dataset is None: 214 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), 215 np.empty(0), np.empty(0)) 216 self.current_dataset.dxl = np.empty(0) 217 self.current_dataset.dxw = np.empty(0) 218 self.base_ns = "{" + CANSAS_NS.get(self.cansas_version).get("ns") + "}" 219 220 # Loop through each child in the parent element 218 221 for node in dom: 219 222 attr = node.attrib … … 226 229 if tagname == "fitting_plug_in" or tagname == "pr_inversion" or tagname == "invariant": 227 230 continue 228 229 231 # Get where to store content 230 232 self.names.append(tagname_original) … … 234 236 self.parent_class = tagname_original 235 237 if tagname == 'SASdata': 236 self._initialize_new_data_set(node) 237 if isinstance(self.current_dataset, plottable_2D): 238 x_bins = attr.get("x_bins", "") 239 y_bins = attr.get("y_bins", "") 240 if x_bins is not "" and y_bins is not "": 241 self.current_dataset.shape = (x_bins, y_bins) 242 else: 243 self.current_dataset.shape = () 244 # Recursion step to access data within the group 245 self._parse_entry(node, True) 238 self.current_dataset = plottable_1D(np.array(0), np.array(0)) 239 # Recurse to access data within the group 240 self._parse_entry(node, recurse=True) 246 241 if tagname == "SASsample": 247 242 self.current_datainfo.sample.name = name … … 253 248 self.aperture.name = name 254 249 self.aperture.type = type 255 self. add_intermediate()250 self._add_intermediate() 256 251 else: 257 if isinstance(self.current_dataset, plottable_2D): 258 data_point = node.text 259 unit = attr.get('unit', '') 260 else: 261 data_point, unit = self._get_node_value(node, tagname) 252 data_point, unit = self._get_node_value(node, tagname) 262 253 263 254 # If this is a dataset, store the data appropriately … … 269 260 elif tagname == 'SASnote': 270 261 self.current_datainfo.notes.append(data_point) 271 272 # I and Q - 1D data 273 elif tagname == 'I' and isinstance(self.current_dataset, plottable_1D): 262 elif tagname == 'I': # I and Q points 274 263 unit_list = unit.split("|") 275 264 if len(unit_list) > 1: … … 279 268 self.current_dataset.yaxis("Intensity", unit) 280 269 self.current_dataset.y = np.append(self.current_dataset.y, data_point) 281 elif tagname == 'Idev' and isinstance(self.current_dataset, plottable_1D):270 elif tagname == 'Idev': 282 271 self.current_dataset.dy = np.append(self.current_dataset.dy, data_point) 283 272 elif tagname == 'Q': … … 305 294 elif tagname == 'zacceptance': 306 295 self.current_datainfo.sample.zacceptance = (data_point, unit) 307 308 # I and Qx, Qy - 2D data309 elif tagname == 'I' and isinstance(self.current_dataset, plottable_2D):310 self.current_dataset.yaxis("Intensity", unit)311 self.current_dataset.data = np.fromstring(data_point, dtype=float, sep=",")312 elif tagname == 'Idev' and isinstance(self.current_dataset, plottable_2D):313 self.current_dataset.err_data = np.fromstring(data_point, dtype=float, sep=",")314 elif tagname == 'Qx':315 self.current_dataset.xaxis("Qx", unit)316 self.current_dataset.qx_data = np.fromstring(data_point, dtype=float, sep=",")317 elif tagname == 'Qy':318 self.current_dataset.yaxis("Qy", unit)319 self.current_dataset.qy_data = np.fromstring(data_point, dtype=float, sep=",")320 elif tagname == 'Qxdev':321 self.current_dataset.xaxis("Qxdev", unit)322 self.current_dataset.dqx_data = np.fromstring(data_point, dtype=float, sep=",")323 elif tagname == 'Qydev':324 self.current_dataset.yaxis("Qydev", unit)325 self.current_dataset.dqy_data = np.fromstring(data_point, dtype=float, sep=",")326 elif tagname == 'Mask':327 inter = [item == "1" for item in data_point.split(",")]328 self.current_dataset.mask = np.asarray(inter, dtype=bool)329 296 330 297 # Sample Information … … 365 332 elif tagname == 'name' and self.parent_class == 'SASinstrument': 366 333 self.current_datainfo.instrument = data_point 334 367 335 # Detector Information 368 336 elif tagname == 'name' and self.parent_class == 'SASdetector': … … 410 378 self.detector.orientation.z = data_point 411 379 self.detector.orientation_unit = unit 380 412 381 # Collimation and Aperture 413 382 elif tagname == 'length' and self.parent_class == 'SAScollimation': … … 443 412 elif tagname == 'term' and self.parent_class == 'SASprocess': 444 413 unit = attr.get("unit", "") 445 dic = {} 446 dic["name"] = name 447 dic["value"] = data_point 448 dic["unit"] = unit 414 dic = { "name": name, "value": data_point, "unit": unit } 449 415 self.process.term.append(dic) 450 416 … … 499 465 if not self._is_call_local() and not recurse: 500 466 self.frm = "" 501 self.add_data_set() 467 self.current_datainfo.errors = set() 468 for error in self.errors: 469 self.current_datainfo.errors.add(error) 470 self.errors.clear() 471 self.send_to_output() 502 472 empty = None 503 473 return self.output[0], empty 504 474 505 506 475 def _is_call_local(self): 507 """508 509 """510 476 if self.frm == "": 511 477 inter = inspect.stack() … … 519 485 return True 520 486 521 def is_cansas(self, ext="xml"): 522 """ 523 Checks to see if the xml file is a CanSAS file 524 525 :param ext: The file extension of the data file 526 """ 527 if self.validate_xml(): 528 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 529 value = self.xmlroot.get(name) 530 if CANSAS_NS.get(self.cansas_version).get("ns") == \ 531 value.rsplit(" ")[0]: 532 return True 533 if ext == "svs": 534 return True 535 raise FileContentsException("Not valid CanSAS") 536 537 def load_file_and_schema(self, xml_file, schema_path=""): 538 """ 539 Loads the file and associates a schema, if a schema is passed in or if one already exists 540 541 :param xml_file: The xml file path sent to Reader.read 542 :param schema_path: The path to a schema associated with the xml_file, or find one based on the file 543 """ 544 base_name = xml_reader.__file__ 545 base_name = base_name.replace("\\", "/") 546 base = base_name.split("/sas/")[0] 547 548 # Load in xml file and get the cansas version from the header 549 from lxml import etree 550 try: 551 self.set_xml_file(xml_file) 552 except etree.XMLSyntaxError: 553 msg = "Cansas cannot load {}.\n Invalid XML syntax.".format(xml_file) 554 raise FileContentsException(msg) 555 self.cansas_version = self.xmlroot.get("version", "1.0") 556 557 # Generic values for the cansas file based on the version 558 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 559 if schema_path == "": 560 schema_path = "{0}/sas/sascalc/dataloader/readers/schema/{1}".format \ 561 (base, self.cansas_defaults.get("schema")).replace("\\", "/") 562 563 # Link a schema to the XML file. 564 self.set_schema(schema_path) 565 566 def add_data_set(self): 567 """ 568 Adds the current_dataset to the list of outputs after preforming final processing on the data and then calls a 569 private method to generate a new data set. 570 571 :param key: NeXus group name for current tree level 572 """ 573 574 if self.current_datainfo and self.current_dataset: 575 self._final_cleanup() 576 self.data = [] 577 self.current_datainfo = DataInfo() 578 579 def _initialize_new_data_set(self, node=None): 580 """ 581 A private class method to generate a new 1D data object. 582 Outside methods should call add_data_set() to be sure any existing data is stored properly. 583 584 :param node: XML node to determine if 1D or 2D data 585 """ 586 x = np.array(0) 587 y = np.array(0) 588 for child in node: 589 if child.tag.replace(self.base_ns, "") == "Idata": 590 for i_child in child: 591 if i_child.tag.replace(self.base_ns, "") == "Qx": 592 self.current_dataset = plottable_2D() 593 return 594 self.current_dataset = plottable_1D(x, y) 595 596 def add_intermediate(self): 487 def _add_intermediate(self): 597 488 """ 598 489 This method stores any intermediate objects within the final data set after fully reading the set. 599 600 :param parent: The NXclass name for the h5py Group object that just finished being processed 601 """ 602 490 """ 603 491 if self.parent_class == 'SASprocess': 604 492 self.current_datainfo.process.append(self.process) … … 619 507 self._check_for_empty_resolution() 620 508 self.data.append(self.current_dataset) 621 622 def _final_cleanup(self):623 """624 Final cleanup of the Data1D object to be sure it has all the625 appropriate information needed for perspectives626 """627 628 # Append errors to dataset and reset class errors629 self.current_datainfo.errors = set()630 for error in self.errors:631 self.current_datainfo.errors.add(error)632 self.errors.clear()633 634 # Combine all plottables with datainfo and append each to output635 # Type cast data arrays to float64 and find min/max as appropriate636 for dataset in self.data:637 if isinstance(dataset, plottable_1D):638 if dataset.x is not None:639 dataset.x = np.delete(dataset.x, [0])640 dataset.x = dataset.x.astype(np.float64)641 dataset.xmin = np.min(dataset.x)642 dataset.xmax = np.max(dataset.x)643 if dataset.y is not None:644 dataset.y = np.delete(dataset.y, [0])645 dataset.y = dataset.y.astype(np.float64)646 dataset.ymin = np.min(dataset.y)647 dataset.ymax = np.max(dataset.y)648 if dataset.dx is not None:649 dataset.dx = np.delete(dataset.dx, [0])650 dataset.dx = dataset.dx.astype(np.float64)651 if dataset.dxl is not None:652 dataset.dxl = np.delete(dataset.dxl, [0])653 dataset.dxl = dataset.dxl.astype(np.float64)654 if dataset.dxw is not None:655 dataset.dxw = np.delete(dataset.dxw, [0])656 dataset.dxw = dataset.dxw.astype(np.float64)657 if dataset.dy is not None:658 dataset.dy = np.delete(dataset.dy, [0])659 dataset.dy = dataset.dy.astype(np.float64)660 np.trim_zeros(dataset.x)661 np.trim_zeros(dataset.y)662 np.trim_zeros(dataset.dy)663 elif isinstance(dataset, plottable_2D):664 dataset.data = dataset.data.astype(np.float64)665 dataset.qx_data = dataset.qx_data.astype(np.float64)666 dataset.xmin = np.min(dataset.qx_data)667 dataset.xmax = np.max(dataset.qx_data)668 dataset.qy_data = dataset.qy_data.astype(np.float64)669 dataset.ymin = np.min(dataset.qy_data)670 dataset.ymax = np.max(dataset.qy_data)671 dataset.q_data = np.sqrt(dataset.qx_data * dataset.qx_data672 + dataset.qy_data * dataset.qy_data)673 if dataset.err_data is not None:674 dataset.err_data = dataset.err_data.astype(np.float64)675 if dataset.dqx_data is not None:676 dataset.dqx_data = dataset.dqx_data.astype(np.float64)677 if dataset.dqy_data is not None:678 dataset.dqy_data = dataset.dqy_data.astype(np.float64)679 if dataset.mask is not None:680 dataset.mask = dataset.mask.astype(dtype=bool)681 682 if len(dataset.shape) == 2:683 n_rows, n_cols = dataset.shape684 dataset.y_bins = dataset.qy_data[0::int(n_cols)]685 dataset.x_bins = dataset.qx_data[:int(n_cols)]686 dataset.data = dataset.data.flatten()687 else:688 dataset.y_bins = []689 dataset.x_bins = []690 dataset.data = dataset.data.flatten()691 692 final_dataset = combine_data(dataset, self.current_datainfo)693 self.output.append(final_dataset)694 695 def _create_unique_key(self, dictionary, name, numb=0):696 """697 Create a unique key value for any dictionary to prevent overwriting698 Recurse until a unique key value is found.699 700 :param dictionary: A dictionary with any number of entries701 :param name: The index of the item to be added to dictionary702 :param numb: The number to be appended to the name, starts at 0703 """704 if dictionary.get(name) is not None:705 numb += 1706 name = name.split("_")[0]707 name += "_{0}".format(numb)708 name = self._create_unique_key(dictionary, name, numb)709 return name710 509 711 510 def _get_node_value(self, node, tagname): … … 815 614 return node_value, value_unit 816 615 817 def _check_for_empty_data(self):818 """819 Creates an empty data set if no data is passed to the reader820 821 :param data1d: presumably a Data1D object822 """823 if self.current_dataset is None:824 x_vals = np.empty(0)825 y_vals = np.empty(0)826 dx_vals = np.empty(0)827 dy_vals = np.empty(0)828 dxl = np.empty(0)829 dxw = np.empty(0)830 self.current_dataset = plottable_1D(x_vals, y_vals, dx_vals, dy_vals)831 self.current_dataset.dxl = dxl832 self.current_dataset.dxw = dxw833 834 616 def _check_for_empty_resolution(self): 835 617 """ 836 A method to check all resolution data sets are the same size as I and Q 837 """ 838 if isinstance(self.current_dataset, plottable_1D): 839 dql_exists = False 840 dqw_exists = False 841 dq_exists = False 842 di_exists = False 843 if self.current_dataset.dxl is not None: 844 dql_exists = True 845 if self.current_dataset.dxw is not None: 846 dqw_exists = True 847 if self.current_dataset.dx is not None: 848 dq_exists = True 849 if self.current_dataset.dy is not None: 850 di_exists = True 851 if dqw_exists and not dql_exists: 852 array_size = self.current_dataset.dxw.size - 1 853 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 854 np.zeros([array_size])) 855 elif dql_exists and not dqw_exists: 856 array_size = self.current_dataset.dxl.size - 1 857 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 858 np.zeros([array_size])) 859 elif not dql_exists and not dqw_exists and not dq_exists: 860 array_size = self.current_dataset.x.size - 1 861 self.current_dataset.dx = np.append(self.current_dataset.dx, 862 np.zeros([array_size])) 863 if not di_exists: 864 array_size = self.current_dataset.y.size - 1 865 self.current_dataset.dy = np.append(self.current_dataset.dy, 866 np.zeros([array_size])) 867 elif isinstance(self.current_dataset, plottable_2D): 868 dqx_exists = False 869 dqy_exists = False 870 di_exists = False 871 mask_exists = False 872 if self.current_dataset.dqx_data is not None: 873 dqx_exists = True 874 if self.current_dataset.dqy_data is not None: 875 dqy_exists = True 876 if self.current_dataset.err_data is not None: 877 di_exists = True 878 if self.current_dataset.mask is not None: 879 mask_exists = True 880 if not dqy_exists: 881 array_size = self.current_dataset.qy_data.size - 1 882 self.current_dataset.dqy_data = np.append( 883 self.current_dataset.dqy_data, np.zeros([array_size])) 884 if not dqx_exists: 885 array_size = self.current_dataset.qx_data.size - 1 886 self.current_dataset.dqx_data = np.append( 887 self.current_dataset.dqx_data, np.zeros([array_size])) 888 if not di_exists: 889 array_size = self.current_dataset.data.size - 1 890 self.current_dataset.err_data = np.append( 891 self.current_dataset.err_data, np.zeros([array_size])) 892 if not mask_exists: 893 array_size = self.current_dataset.data.size - 1 894 self.current_dataset.mask = np.append( 895 self.current_dataset.mask, 896 np.ones([array_size] ,dtype=bool)) 897 898 ####### All methods below are for writing CanSAS XML files ####### 618 a method to check all resolution data sets are the same size as I and q 619 """ 620 dql_exists = False 621 dqw_exists = False 622 dq_exists = False 623 di_exists = False 624 if self.current_dataset.dxl is not None: 625 dql_exists = True 626 if self.current_dataset.dxw is not None: 627 dqw_exists = True 628 if self.current_dataset.dx is not None: 629 dq_exists = True 630 if self.current_dataset.dy is not None: 631 di_exists = True 632 if dqw_exists and not dql_exists: 633 array_size = self.current_dataset.dxw.size - 1 634 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 635 np.zeros([array_size])) 636 elif dql_exists and not dqw_exists: 637 array_size = self.current_dataset.dxl.size - 1 638 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 639 np.zeros([array_size])) 640 elif not dql_exists and not dqw_exists and not dq_exists: 641 array_size = self.current_dataset.x.size - 1 642 self.current_dataset.dx = np.append(self.current_dataset.dx, 643 np.zeros([array_size])) 644 if not di_exists: 645 array_size = self.current_dataset.y.size - 1 646 self.current_dataset.dy = np.append(self.current_dataset.dy, 647 np.zeros([array_size])) 899 648 900 649 def write(self, filename, datainfo): … … 1528 1277 exec "storage.%s = entry.text.strip()" % variable 1529 1278 1530 1531 1279 # DO NOT REMOVE Called by outside packages: 1532 1280 # sas.sasgui.perspectives.invariant.invariant_state
Note: See TracChangeset
for help on using the changeset viewer.