Changeset a7a5886 in sasview for DataLoader
- Timestamp:
- Nov 2, 2010 5:02:56 PM (14 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, costrafo411, magnetic_scatt, release-4.1.1, release-4.1.2, release-4.2.2, release_4.0.1, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- da9ac4e6
- Parents:
- 44148f0
- Location:
- DataLoader
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
DataLoader/data_info.py
r0997158f ra7a5886 33 33 Data1D is a place holder for 1D plottables. 34 34 """ 35 # The presence of these should be mutually exclusive with the presence of Qdev (dx) 35 # The presence of these should be mutually 36 # exclusive with the presence of Qdev (dx) 36 37 x = None 37 38 y = None … … 49 50 _yunit = '' 50 51 51 def __init__(self, x,y,dx=None,dy=None,dxl=None,dxw=None):52 def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None): 52 53 self.x = numpy.asarray(x) 53 54 self.y = numpy.asarray(y) 54 if dx is not None: self.dx = numpy.asarray(dx) 55 if dy is not None: self.dy = numpy.asarray(dy) 56 if dxl is not None: self.dxl = numpy.asarray(dxl) 57 if dxw is not None: self.dxw = numpy.asarray(dxw) 55 if dx is not None: 56 self.dx = numpy.asarray(dx) 57 if dy is not None: 58 self.dy = numpy.asarray(dy) 59 if dxl is not None: 60 self.dxl = numpy.asarray(dxl) 61 if dxw is not None: 62 self.dxw = numpy.asarray(dxw) 58 63 59 64 def xaxis(self, label, unit): 65 """ 66 set the x axis label and unit 67 """ 60 68 self._xaxis = label 61 69 self._xunit = unit 62 70 63 71 def yaxis(self, label, unit): 72 """ 73 set the y axis label and unit 74 """ 64 75 self._yaxis = label 65 76 self._yunit = unit … … 90 101 _zunit = '' 91 102 92 def __init__(self, data=None, err_data=None, qx_data=None, qy_data=None, q_data=None,mask=None, dqx_data=None, dqy_data=None): 103 def __init__(self, data=None, err_data=None, qx_data=None, 104 qy_data=None, q_data=None, mask=None, 105 dqx_data=None, dqy_data=None): 93 106 self.data = numpy.asarray(data) 94 107 self.qx_data = numpy.asarray(qx_data) … … 101 114 102 115 def xaxis(self, label, unit): 116 """ 117 set the x axis label and unit 118 """ 103 119 self._xaxis = label 104 120 self._xunit = unit 105 121 106 122 def yaxis(self, label, unit): 123 """ 124 set the y axis label and unit 125 """ 107 126 self._yaxis = label 108 127 self._yunit = unit 109 128 110 129 def zaxis(self, label, unit): 130 """ 131 set the z axis label and unit 132 """ 111 133 self._zaxis = label 112 134 self._zunit = unit … … 139 161 140 162 def __str__(self): 141 return "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z)) 163 msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z)) 164 return msg 142 165 143 166 … … 151 174 distance = None 152 175 distance_unit = 'mm' 153 ## Offset of this detector position in X, Y, (and Z if necessary) [Vector] [mm] 176 ## Offset of this detector position in X, Y, 177 #(and Z if necessary) [Vector] [mm] 154 178 offset = None 155 179 offset_unit = 'm' 156 ## Orientation (rotation) of this detector in roll, pitch, and yaw [Vector] [degrees] 180 ## Orientation (rotation) of this detector in roll, 181 # pitch, and yaw [Vector] [degrees] 157 182 orientation = None 158 183 orientation_unit = 'degree' 159 ## Center of the beam on the detector in X and Y (and Z if necessary) [Vector] [mm] 184 ## Center of the beam on the detector in X and Y 185 #(and Z if necessary) [Vector] [mm] 160 186 beam_center = None 161 187 beam_center_unit = 'mm' … … 352 378 name = '' 353 379 date = '' 354 description = ''380 description = '' 355 381 term = None 356 382 notes = None … … 460 486 # but should be implemented for each data class inherited from DataInfo 461 487 # that holds actual data (ex.: Data1D) 462 def _perform_operation(self, other, operation): return NotImplemented 488 def _perform_operation(self, other, operation): 489 """ 490 Private method to perform operation. Not implemented for DataInfo, 491 but should be implemented for each data class inherited from DataInfo 492 that holds actual data (ex.: Data1D) 493 """ 494 return NotImplemented 463 495 464 496 def __add__(self, other): … … 470 502 :raise ValueError: raised when two data sets are incompatible 471 503 """ 472 def operation(a, b): return a+b 504 def operation(a, b): 505 return a + b 473 506 return self._perform_operation(other, operation) 474 507 … … 484 517 485 518 """ 486 def operation(a, b): return b+a 519 def operation(a, b): 520 return b + a 487 521 return self._perform_operation(other, operation) 488 522 … … 498 532 499 533 """ 500 def operation(a, b): return a-b 534 def operation(a, b): 535 return a - b 501 536 return self._perform_operation(other, operation) 502 537 … … 512 547 513 548 """ 514 def operation(a, b): return b-a 549 def operation(a, b): 550 return b - a 515 551 return self._perform_operation(other, operation) 516 552 … … 526 562 527 563 """ 528 def operation(a, b): return a*b 564 def operation(a, b): 565 return a * b 529 566 return self._perform_operation(other, operation) 530 567 … … 539 576 :raise ValueError: raised when two data sets are incompatible 540 577 """ 541 def operation(a, b): return b*a 578 def operation(a, b): 579 return b * a 542 580 return self._perform_operation(other, operation) 543 581 … … 553 591 554 592 """ 555 def operation(a, b): return a/b 593 def operation(a, b): 594 return a/b 556 595 return self._perform_operation(other, operation) 557 596 … … 567 606 568 607 """ 569 def operation(a, b): return b/a 608 def operation(a, b): 609 return b/a 570 610 return self._perform_operation(other, operation) 571 611 … … 604 644 """ 605 645 def _check(v): 606 if (v.__class__ ==list or v.__class__==numpy.ndarray) \607 and len(v) >0 and min(v)>0:646 if (v.__class__ == list or v.__class__ == numpy.ndarray) \ 647 and len(v) > 0 and min(v) > 0: 608 648 return True 609 649 … … 664 704 if len(self.x) != len(other.x) or \ 665 705 len(self.y) != len(other.y): 666 raise ValueError, "Unable to perform operation: data length are not equal" 706 msg = "Unable to perform operation: data length are not equal" 707 raise ValueError, msg 667 708 668 709 # Here we could also extrapolate between data points 669 710 for i in range(len(self.x)): 670 711 if self.x[i] != other.x[i]: 671 raise ValueError, "Incompatible data sets: x-values do not match" 712 msg = "Incompatible data sets: x-values do not match" 713 raise ValueError, msg 672 714 673 715 # Check that the other data set has errors, otherwise 674 716 # create zero vector 675 717 dy_other = other.dy 676 if other.dy ==None or (len(other.dy) != len(other.y)):718 if other.dy == None or (len(other.dy) != len(other.y)): 677 719 dy_other = numpy.zeros(len(other.y)) 678 720 679 721 # Check that we have errors, otherwise create zero vector 680 722 dy = self.dy 681 if self.dy ==None or (len(self.dy) != len(self.y)):723 if self.dy == None or (len(self.dy) != len(self.y)): 682 724 dy = numpy.zeros(len(self.y)) 683 725 … … 693 735 for i in range(len(self.x)): 694 736 result.x[i] = self.x[i] 695 if self.dx is not None and len(self.x) ==len(self.dx):737 if self.dx is not None and len(self.x) == len(self.dx): 696 738 result.dx[i] = self.dx[i] 697 739 … … 724 766 725 767 726 def __init__(self, data=None, err_data=None, qx_data=None, qy_data=None, q_data=None, mask=None, dqx_data=None, dqy_data=None): 768 def __init__(self, data=None, err_data=None, qx_data=None, 769 qy_data=None, q_data=None, mask=None, 770 dqx_data=None, dqy_data=None): 727 771 self.y_bins = [] 728 772 self.x_bins = [] 729 773 DataInfo.__init__(self) 730 plottable_2D.__init__(self, data, err_data, qx_data, qy_data, q_data,mask, dqx_data, dqy_data) 731 if len(self.detector)>0: 774 plottable_2D.__init__(self, data, err_data, qx_data, 775 qy_data, q_data,mask, dqx_data, dqy_data) 776 if len(self.detector) > 0: 732 777 raise RuntimeError, "Data2D: Detector bank already filled at init" 733 778 … … 739 784 _str += " X- & Y-axis: %s\t[%s]\n" % (self._yaxis, self._yunit) 740 785 _str += " Z-axis: %s\t[%s]\n" % (self._zaxis, self._zunit) 741 leny = 0742 if len(self.data)>0:743 leny = len(self.data)786 #leny = 0 787 #if len(self.data) > 0: 788 # leny = len(self.data) 744 789 _str += " Length: %g \n" % (len(self.data)) 745 790 … … 766 811 dqx_data = None 767 812 dqy_data = None 768 clone = Data2D(data, err_data, qx_data, qy_data, q_data,mask, dqx_data=dqx_data, dqy_data=dqy_data) 813 clone = Data2D(data, err_data, qx_data, qy_data, 814 q_data,mask, dqx_data=dqx_data, dqy_data=dqy_data) 769 815 770 816 clone.title = self.title … … 802 848 # Check that data lengths are the same 803 849 if numpy.size(self.data) != numpy.size(other.data): 804 raise ValueError, "Unable to perform operation: data length are not equal" 850 msg = "Unable to perform operation: data length are not equal" 851 raise ValueError, msg 805 852 806 853 # Check that the scales match … … 811 858 #TODO: test this 812 859 err_other = other.err_data 813 if other.err_data==None or (numpy.size(other.err_data) != numpy.size(other.data)): 814 err_other = numpy.zeros([numpy.size(other.data,0), numpy.size(other.data,1)]) 860 if other.err_data == None or \ 861 (numpy.size(other.err_data) != numpy.size(other.data)): 862 err_other = numpy.zeros([numpy.size(other.data, 0), 863 numpy.size(other.data, 1)]) 815 864 816 865 # Check that we have errors, otherwise create zero vector 817 866 err = self.err_data 818 if self.err_data==None or (numpy.size(self.err_data) != numpy.size(self.data)): 819 err = numpy.zeros([numpy.size(self.data,0), numpy.size(self.data,1)]) 867 if self.err_data == None or \ 868 (numpy.size(self.err_data) != numpy.size(self.data)): 869 err = numpy.zeros([numpy.size(self.data, 0), 870 numpy.size(self.data, 1)]) 820 871 821 872 return err, err_other … … 833 884 dy, dy_other = self._validity_check(other) 834 885 835 result = self.clone_without_data([numpy.size(self.data,0), numpy.size(self.data,1)]) 836 837 for i in range(numpy.size(self.data,0)): 838 for j in range(numpy.size(self.data,1)): 886 result = self.clone_without_data([numpy.size(self.data, 0), 887 numpy.size(self.data, 1)]) 888 889 for i in range(numpy.size(self.data, 0)): 890 for j in range(numpy.size(self.data, 1)): 839 891 result.data[i][j] = self.data[i][j] 840 if self.err_data is not None and numpy.size(self.data)==numpy.size(self.err_data): 892 if self.err_data is not None and \ 893 numpy.size(self.data) == numpy.size(self.err_data): 841 894 result.err_data[i][j] = self.err_data[i][j] 842 895 -
DataLoader/loader.py
rf118fe2f ra7a5886 23 23 """ 24 24 25 from data_util.registry import ExtensionRegistry26 25 import os 27 26 import sys … … 29 28 import time 30 29 from zipfile import ZipFile 31 30 from data_util.registry import ExtensionRegistry 32 31 # Default readers are defined in the readers sub-module 33 32 import readers 34 from readers import ascii_reader,cansas_reader 33 from readers import ascii_reader 34 from readers import cansas_reader 35 35 36 36 class Registry(ExtensionRegistry): … … 55 55 readers.read_associations(self) 56 56 57 #TODO: remove the following line when ready to switch to the new default readers 57 #TODO: remove the following line when ready to switch to 58 #the new default readers 58 59 #readers.register_readers(self._identify_plugin) 59 60 … … 67 68 68 69 :param path: file path 69 :param format: explicit extension, to force the use of a particular reader 70 :param format: explicit extension, to force the use 71 of a particular reader 70 72 71 73 Defaults to the ascii (multi-column) reader … … 83 85 cansas_loader = cansas_reader.Reader() 84 86 return cansas_loader.read(path) 87 85 88 def find_plugins(self, dir): 86 89 """ … … 112 115 if self._identify_plugin(module): 113 116 readers_found += 1 114 except : 115 logging.error("Loader: Error importing %s\n %s" % (item, sys.exc_value)) 117 except: 118 msg = "Loader: Error importing " 119 msg += "%s\n %s" % (item, sys.exc_value) 120 logging.error(msg) 116 121 117 122 # Process zip files … … 128 133 fullname = mfile.replace('/', '.') 129 134 fullname = os.path.splitext(fullname)[0] 130 module = __import__(fullname, globals(), locals(), [""]) 135 module = __import__(fullname, globals(), 136 locals(), [""]) 131 137 if self._identify_plugin(module): 132 138 readers_found += 1 133 139 except: 134 logging.error("Loader: Error importing %s\n %s" % (mfile, sys.exc_value)) 140 msg = "Loader: Error importing" 141 msg += " %s\n %s" % (mfile, sys.exc_value) 142 logging.error(msg) 135 143 136 144 except: 137 logging.error("Loader: Error importing %s\n %s" % (item, sys.exc_value)) 145 msg = "Loader: Error importing " 146 msg += " %s\n %s" % (item, sys.exc_value) 147 logging.error(msg) 138 148 139 149 return readers_found … … 166 176 type_name = loader.type_name 167 177 168 wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(), ext.lower()) 178 wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(), 179 ext.lower()) 169 180 if wcard not in self.wildcards: 170 181 self.wildcards.append(wcard) … … 178 189 179 190 except: 180 logging.error("Loader: Error accessing Reader in %s\n %s" % (module.__name__, sys.exc_value)) 191 msg = "Loader: Error accessing" 192 msg += " Reader in %s\n %s" % (module.__name__, sys.exc_value) 193 logging.error(msg) 181 194 return reader_found 182 195 … … 203 216 type_name = loader.type_name 204 217 205 wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(), ext.lower()) 218 wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(), 219 ext.lower()) 206 220 if wcard not in self.wildcards: 207 221 self.wildcards.append(wcard) 208 222 209 223 except: 210 logging.error("Loader: Error accessing Reader in %s\n %s" % (module.__name__, sys.exc_value)) 224 msg = "Loader: Error accessing Reader " 225 msg += "in %s\n %s" % (module.__name__, sys.exc_value) 226 logging.error(msg) 211 227 return reader_found 212 228 … … 229 245 if ext not in self.loaders: 230 246 self.loaders[ext] = [] 231 # When finding a reader at run time, treat this reader as the new232 # default233 self.loaders[ext].insert(0, loader.read)247 # When finding a reader at run time, 248 # treat this reader as the new default 249 self.loaders[ext].insert(0, loader.read) 234 250 235 251 reader_found = True … … 239 255 if hasattr(loader, 'type_name'): 240 256 type_name = loader.type_name 241 wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(), ext.lower()) 257 wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(), 258 ext.lower()) 242 259 if wcard not in self.wildcards: 243 260 self.wildcards.append(wcard) … … 251 268 252 269 except: 253 logging.error("Loader: Error accessing Reader in %s\n %s" % (module.__name__, sys.exc_value)) 270 msg = "Loader: Error accessing Reader" 271 msg += " in %s\n %s" % (module.__name__, sys.exc_value) 272 logging.error(msg) 254 273 return reader_found 255 274 … … 263 282 extlist = [ext for ext in self.extensions() if path.endswith(ext)] 264 283 # Sort matching extensions by decreasing order of length 265 extlist.sort(lambda a, b: len(a)<len(b))284 extlist.sort(lambda a, b: len(a) < len(b)) 266 285 # Combine loaders for matching extensions into one big list 267 286 writers = [] … … 276 295 # Raise an error if there are no matching extensions 277 296 if len(writers) == 0: 278 raise ValueError, "Unknown file type for " +path297 raise ValueError, "Unknown file type for " + path 279 298 # All done 280 299 return writers … … 372 391 filemode='w') 373 392 l = Loader() 374 data = l.load('test/cansas1d.xml')375 l.save('test_file.xml', data, '.xml')393 test_data = l.load('test/cansas1d.xml') 394 l.save('test_file.xml', test_data, '.xml') 376 395 377 396 print l.get_wildcards() -
DataLoader/manipulations.py
r0997158f ra7a5886 9 9 10 10 """ 11 12 13 11 Data manipulations for 2D data sets. 12 Using the meta data information, various types of averaging 13 are performed in Q-space 14 14 """ 15 16 17 15 #TODO: copy the meta data from the 2D object to the resulting 1D object 18 19 from data_info import plottable_2D, Data1D20 16 import math 21 17 import numpy 18 19 #from data_info import plottable_2D 20 from data_info import Data1D 21 22 22 23 23 def get_q(dx, dy, det_dist, wavelength): … … 31 31 plane_dist = math.sqrt(dx*dx + dy*dy) 32 32 # Half of the scattering angle 33 theta = 0.5*math.atan(plane_dist/det_dist)34 return (4.0 *math.pi/wavelength)*math.sin(theta)35 36 def get_q_compo(dx, dy, det_dist, wavelength, compo=None):33 theta = 0.5 * math.atan(plane_dist/det_dist) 34 return (4.0 * math.pi/wavelength) * math.sin(theta) 35 36 def get_q_compo(dx, dy, det_dist, wavelength, compo=None): 37 37 """ 38 38 This reduces tiny error at very large q. 39 39 Implementation of this func is not started yet.<--ToDo 40 40 """ 41 if dy ==0:42 if dx >=0:43 angle_xy =041 if dy == 0: 42 if dx >= 0: 43 angle_xy = 0 44 44 else: 45 angle_xy =math.pi45 angle_xy = math.pi 46 46 else: 47 angle_xy =math.atan(dx/dy)48 49 if compo =="x":50 out =get_q(dx, dy, det_dist, wavelength)*cos(angle_xy)51 elif compo =="y":52 out =get_q(dx, dy, det_dist, wavelength)*sin(angle_xy)47 angle_xy = math.atan(dx/dy) 48 49 if compo == "x": 50 out = get_q(dx, dy, det_dist, wavelength) * math.cos(angle_xy) 51 elif compo == "y": 52 out = get_q(dx, dy, det_dist, wavelength) * math.sin(angle_xy) 53 53 else: 54 out =get_q(dx, dy, det_dist, wavelength)54 out = get_q(dx, dy, det_dist, wavelength) 55 55 return out 56 56 … … 63 63 Pi = math.pi 64 64 if phi < 0: 65 phi_out = phi + 2*Pi66 elif phi > 2*Pi:67 phi_out = phi - 2*Pi65 phi_out = phi + (2 * Pi) 66 elif phi > (2 * Pi): 67 phi_out = phi - (2 * Pi) 68 68 else: 69 69 phi_out = phi … … 72 72 def reader2D_converter(data2d=None): 73 73 """ 74 convert old 2d format opened by IhorReader or danse_reader to new Data2D format 74 convert old 2d format opened by IhorReader or danse_reader 75 to new Data2D format 75 76 76 77 :param data2d: 2d array of Data2D object … … 79 80 80 81 """ 81 if data2d.data ==None or data2d.x_bins==None or data2d.y_bins==None:82 raise ValueError, "Can't convert this data: data=None..."82 if data2d.data == None or data2d.x_bins == None or data2d.y_bins == None: 83 raise ValueError, "Can't convert this data: data=None..." 83 84 84 85 from DataLoader.data_info import Data2D 85 86 86 new_x = numpy.tile(data2d.x_bins, (len(data2d.y_bins), 1))87 new_y = numpy.tile(data2d.y_bins, (len(data2d.x_bins), 1))88 new_y = new_y.swapaxes(0, 1)87 new_x = numpy.tile(data2d.x_bins, (len(data2d.y_bins), 1)) 88 new_y = numpy.tile(data2d.y_bins, (len(data2d.x_bins), 1)) 89 new_y = new_y.swapaxes(0, 1) 89 90 90 91 new_data = data2d.data.flatten() 91 92 qx_data = new_x.flatten() 92 93 qy_data = new_y.flatten() 93 q_data = numpy.sqrt(qx_data*qx_data +qy_data*qy_data)94 if data2d.err_data == None or numpy.any(data2d.err_data <=0):94 q_data = numpy.sqrt(qx_data*qx_data + qy_data*qy_data) 95 if data2d.err_data == None or numpy.any(data2d.err_data <= 0): 95 96 new_err_data = numpy.sqrt(numpy.abs(new_data)) 96 97 else: 97 98 new_err_data = data2d.err_data.flatten() 98 mask = numpy.ones(len(new_data), dtype =bool)99 mask = numpy.ones(len(new_data), dtype=bool) 99 100 100 101 output = Data2D() … … 113 114 Compute average I(Q) for a region of interest 114 115 """ 115 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0, bin_width=0.001): 116 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, 117 y_max=0.0, bin_width=0.001): 116 118 # Minimum Qx value [A-1] 117 119 self.x_min = x_min … … 124 126 # Bin width (step size) [A-1] 125 127 self.bin_width = bin_width 126 # If True, I(|Q|) will be return, otherwise, negative q-values are allowed 128 # If True, I(|Q|) will be return, otherwise, 129 # negative q-values are allowed 127 130 self.fold = False 128 131 129 def __call__(self, data2D): return NotImplemented 132 def __call__(self, data2D): 133 return NotImplemented 130 134 131 135 def _avg(self, data2D, maj): … … 141 145 """ 142 146 if len(data2D.detector) != 1: 143 raise RuntimeError, "_Slab._avg: invalid number of detectors: %g" % len(data2D.detector) 147 msg = "_Slab._avg: invalid number of " 148 msg += " detectors: %g" % len(data2D.detector) 149 raise RuntimeError, msg 144 150 145 151 # Get data … … 151 157 152 158 # Build array of Q intervals 153 if maj=='x': 154 if self.fold: x_min = 0 159 if maj == 'x': 160 if self.fold: 161 x_min = 0 155 162 else: x_min = self.x_min 156 nbins = int(math.ceil((self.x_max -x_min)/self.bin_width))157 qbins = self.bin_width *numpy.arange(nbins)+ x_min158 elif maj =='y':163 nbins = int(math.ceil((self.x_max - x_min)/self.bin_width)) 164 qbins = self.bin_width * numpy.arange(nbins) + x_min 165 elif maj == 'y': 159 166 if self.fold: y_min = 0 160 167 else: y_min = self.y_min 161 nbins = int(math.ceil((self.y_max -y_min)/self.bin_width))162 qbins = self.bin_width *numpy.arange(nbins)+ y_min168 nbins = int(math.ceil((self.y_max - y_min)/self.bin_width)) 169 qbins = self.bin_width * numpy.arange(nbins) + y_min 163 170 else: 164 171 raise RuntimeError, "_Slab._avg: unrecognized axis %s" % str(maj) … … 179 186 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]: 180 187 frac_y = 1 181 182 188 frac = frac_x * frac_y 183 189 184 if frac == 0: continue185 190 if frac == 0: 191 continue 186 192 # binning: find axis of q 187 if maj =='x':193 if maj == 'x': 188 194 q_value = qx_data[npts] 189 195 min = x_min 190 if maj =='y':196 if maj == 'y': 191 197 q_value = qy_data[npts] 192 198 min = y_min 193 if self.fold and q_value <0: q_value = -q_value194 199 if self.fold and q_value < 0: 200 q_value = -q_value 195 201 # bin 196 i_q = int(math.ceil((q_value -min)/self.bin_width)) - 1202 i_q = int(math.ceil((q_value - min)/self.bin_width)) - 1 197 203 198 204 # skip outside of max bins 199 if i_q<0 or i_q>=nbins: continue 205 if i_q < 0 or i_q >= nbins: 206 continue 200 207 201 208 # give it full weight … … 203 210 204 211 #TODO: find better definition of x[i_q] based on q_data 205 x[i_q] = min +(i_q+1)*self.bin_width/2.0206 y[i_q] 207 208 if err_data == None or err_data[npts] ==0.0:209 if data[npts] < 0: data[npts] = -data[npts]212 x[i_q] = min + (i_q + 1) * self.bin_width / 2.0 213 y[i_q] += frac * data[npts] 214 215 if err_data == None or err_data[npts] == 0.0: 216 if data[npts] < 0: data[npts] = -data[npts] 210 217 err_y[i_q] += frac * frac * data[npts] 211 218 else: … … 217 224 err_y[n] = math.sqrt(err_y[n]) 218 225 219 err_y = err_y /y_counts220 y = y /y_counts221 222 idx = (numpy.isfinite(y) & numpy.isfinite(x))226 err_y = err_y / y_counts 227 y = y / y_counts 228 229 idx = (numpy.isfinite(y) & numpy.isfinite(x)) 223 230 224 231 if not idx.any(): 225 raise ValueError, "Average Error: No points inside ROI to average..." 226 elif len(y[idx])!= nbins: 227 print "resulted",nbins- len(y[idx]),"empty bin(s) due to tight binning..." 232 msg = "Average Error: No points inside ROI to average..." 233 raise ValueError, msg 234 #elif len(y[idx])!= nbins: 235 # msg = "empty bin(s) due to tight binning..." 236 # print "resulted",nbins- len(y[idx]), msg 228 237 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) 229 238 … … 283 292 284 293 # Average the sums 285 counts = 0 if y_counts ==0 else y286 error = 0 if y_counts ==0 else math.sqrt(err_y)294 counts = 0 if y_counts == 0 else y 295 error = 0 if y_counts == 0 else math.sqrt(err_y) 287 296 288 297 return counts, error … … 294 303 :param data2D: Data2D object 295 304 296 :return: number of counts, error on number of counts, number of entries summed 305 :return: number of counts, 306 error on number of counts, number of entries summed 297 307 298 308 """ 299 309 if len(data2D.detector) != 1: 300 raise RuntimeError, "Circular averaging: invalid number of detectors: %g" % len(data2D.detector) 301 310 msg = "Circular averaging: invalid number " 311 msg += "of detectors: %g" % len(data2D.detector) 312 raise RuntimeError, msg 302 313 # Get data 303 314 data = data2D.data[numpy.isfinite(data2D.data)] … … 326 337 if self.y_min <= qy and self.y_max > qy: 327 338 frac_y = 1 328 329 339 #Find the fraction along each directions 330 340 frac = frac_x * frac_y 331 if frac == 0: continue332 341 if frac == 0: 342 continue 333 343 y += frac * data[npts] 334 if err_data == None or err_data[npts]==0.0: 335 if data[npts] <0: data[npts] = -data[npts] 344 if err_data == None or err_data[npts] == 0.0: 345 if data[npts] < 0: 346 data[npts] = -data[npts] 336 347 err_y += frac * frac * data[npts] 337 348 else: 338 349 err_y += frac * frac * err_data[npts] * err_data[npts] 339 y_counts += frac 340 350 y_counts += frac 341 351 return y, err_y, y_counts 342 352 … … 348 358 """ 349 359 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 350 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max) 360 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, 361 y_min=y_min, y_max=y_max) 351 362 352 363 def __call__(self, data2D): … … 362 373 363 374 # Average the sums 364 counts = 0 if y_counts ==0 else y/y_counts365 error = 0 if y_counts ==0 else math.sqrt(err_y)/y_counts375 counts = 0 if y_counts == 0 else y/y_counts 376 error = 0 if y_counts == 0 else math.sqrt(err_y)/y_counts 366 377 367 378 return counts, error … … 384 395 385 396 """ 386 if x <=xmin:397 if x <= xmin: 387 398 return 0.0 388 if x >xmin and x<xmax:389 return (x -xmin)/(xmax-xmin)399 if x > xmin and x < xmax: 400 return (x - xmin) / (xmax - xmin) 390 401 else: 391 402 return 1.0 … … 423 434 424 435 if len(data2D.q_data) == None: 425 raise RuntimeError, "Circular averaging: invalid q_data: %g" % data2D.q_data 436 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 437 raise RuntimeError, msg 426 438 427 439 # Build array of Q intervals 428 nbins = int(math.ceil((self.r_max -self.r_min)/self.bin_width))429 qbins = self.bin_width *numpy.arange(nbins)+self.r_min440 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 441 qbins = self.bin_width * numpy.arange(nbins) + self.r_min 430 442 431 443 x = numpy.zeros(nbins) … … 438 450 439 451 # q-value at the pixel (j,i) 440 q_value = q_data[npt] 441 452 q_value = q_data[npt] 442 453 data_n = data[npt] 443 454 444 455 ## No need to calculate the frac when all data are within range 445 456 if self.r_min >= self.r_max: 446 raise ValueError, "Limit Error: min > max ???"447 448 if self.r_min <= q_value and q_value <= self.r_max: frac = 1449 450 if frac == 0: continue451 452 i_q = int(math.floor((q_value -self.r_min)/self.bin_width))457 raise ValueError, "Limit Error: min > max" 458 459 if self.r_min <= q_value and q_value <= self.r_max: 460 frac = 1 461 if frac == 0: 462 continue 463 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 453 464 454 465 # Take care of the edge case at phi = 2pi. 455 466 if i_q == nbins: 456 i_q = nbins -1 457 467 i_q = nbins -1 458 468 y[i_q] += frac * data_n 459 469 460 if err_data == None or err_data[npt]==0.0: 461 if data_n <0: data_n = -data_n 470 if err_data == None or err_data[npt] == 0.0: 471 if data_n < 0: 472 data_n = -data_n 462 473 err_y[i_q] += frac * frac * data_n 463 474 else: … … 466 477 467 478 ## x should be the center value of each bins 468 x = qbins +self.bin_width/2479 x = qbins + self.bin_width / 2 469 480 470 481 # Average the sums 471 482 for n in range(nbins): 472 if err_y[n] < 0: err_y[n] = -err_y[n]483 if err_y[n] < 0: err_y[n] = -err_y[n] 473 484 err_y[n] = math.sqrt(err_y[n]) 474 485 475 err_y = err_y /y_counts476 y = y /y_counts477 idx = (numpy.isfinite(y)) &(numpy.isfinite(x))486 err_y = err_y / y_counts 487 y = y / y_counts 488 idx = (numpy.isfinite(y)) & (numpy.isfinite(x)) 478 489 479 490 if not idx.any(): 480 raise ValueError, "Average Error: No points inside ROI to average..." 481 elif len(y[idx])!= nbins: 482 print "resulted",nbins- len(y[idx]),"empty bin(s) due to tight binning..." 483 491 msg = "Average Error: No points inside ROI to average..." 492 raise ValueError, msg 493 #elif len(y[idx])!= nbins: 494 # print "resulted",nbins- len(y[idx]) 495 #,"empty bin(s) due to tight binning..." 484 496 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) 485 497 … … 498 510 """ 499 511 #Todo: remove center. 500 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=20):512 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=20): 501 513 # Minimum radius 502 514 self.r_min = r_min … … 541 553 for npt in range(len(data)): 542 554 frac = 0 543 544 555 # q-value at the point (npt) 545 556 q_value = q_data[npt] 546 547 557 data_n = data[npt] 548 558 549 559 # phi-value at the point (npt) 550 phi_value =math.atan2(qy_data[npt],qx_data[npt])+Pi551 552 if self.r_min <= q_value and q_value <= self.r_max: frac = 1553 554 if frac == 0: continue555 560 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 561 562 if self.r_min <= q_value and q_value <= self.r_max: 563 frac = 1 564 if frac == 0: 565 continue 556 566 # binning 557 i_phi = int(math.floor((self.nbins_phi) *phi_value/(2*Pi)))567 i_phi = int(math.floor((self.nbins_phi) * phi_value / (2 * Pi))) 558 568 559 569 # Take care of the edge case at phi = 2pi. 560 570 if i_phi == self.nbins_phi: 561 i_phi = self.nbins_phi -1 562 571 i_phi = self.nbins_phi - 1 563 572 phi_bins[i_phi] += frac * data[npt] 564 573 565 if err_data == None or err_data[npt] ==0.0: 566 if data_n <0: data_n = -data_n 574 if err_data == None or err_data[npt] == 0.0: 575 if data_n < 0: 576 data_n = -data_n 567 577 phi_err[i_phi] += frac * frac * math.fabs(data_n) 568 578 else: 569 phi_err[i_phi] += frac * frac * err_data[npt]*err_data[npt]579 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 570 580 phi_counts[i_phi] += frac 571 581 … … 573 583 phi_bins[i] = phi_bins[i] / phi_counts[i] 574 584 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 575 phi_values[i] = 2.0 *math.pi/self.nbins_phi*(1.0*i + 0.5)585 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i + 0.5) 576 586 577 587 idx = (numpy.isfinite(phi_bins)) 578 588 579 if not idx.any(): 580 raise ValueError, "Average Error: No points inside ROI to average..." 581 elif len(phi_bins[idx])!= self.nbins_phi: 582 print "resulted",self.nbins_phi- len(phi_bins[idx]),"empty bin(s) due to tight binning..." 589 if not idx.any(): 590 msg = "Average Error: No points inside ROI to average..." 591 raise ValueError, msg 592 #elif len(phi_bins[idx])!= self.nbins_phi: 593 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 594 #,"empty bin(s) due to tight binning..." 583 595 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 584 596 … … 600 612 601 613 """ 602 603 614 # y side for x = minx 604 615 x_0 = get_intercept(qmax, q_00, q_01) … … 615 626 616 627 if x_0 and x_1: 617 frac_max = (x_0+x_1)/2.0 618 628 frac_max = (x_0 + x_1) / 2.0 619 629 elif y_0 and y_1: 620 frac_max = (y_0+y_1)/2.0 621 630 frac_max = (y_0 + y_1) / 2.0 622 631 elif x_0 and y_0: 623 632 if q_00 < q_10: 624 frac_max = x_0 *y_0/2.0633 frac_max = x_0 * y_0 / 2.0 625 634 else: 626 frac_max = 1.0-x_0*y_0/2.0 627 635 frac_max = 1.0 - x_0 * y_0 / 2.0 628 636 elif x_0 and y_1: 629 637 if q_00 < q_10: 630 frac_max = x_0 *y_1/2.0638 frac_max = x_0 * y_1 / 2.0 631 639 else: 632 frac_max = 1.0-x_0*y_1/2.0 633 640 frac_max = 1.0 - x_0 * y_1 / 2.0 634 641 elif x_1 and y_0: 635 642 if q_00 > q_10: 636 frac_max = x_1 *y_0/2.0643 frac_max = x_1 * y_0 / 2.0 637 644 else: 638 frac_max = 1.0-x_1*y_0/2.0 639 645 frac_max = 1.0 - x_1 * y_0 / 2.0 640 646 elif x_1 and y_1: 641 647 if q_00 < q_10: 642 frac_max = 1.0 - (1.0 -x_1)*(1.0-y_1)/2.0648 frac_max = 1.0 - (1.0 - x_1) * (1.0 - y_1) / 2.0 643 649 else: 644 frac_max = (1.0 -x_1)*(1.0-y_1)/2.0650 frac_max = (1.0 - x_1) * (1.0 - y_1) / 2.0 645 651 646 652 # If we make it here, there is no intercept between 647 653 # this pixel and the constant-q ring. We only need 648 654 # to know if we have to include it or exclude it. 649 elif (q_00 +q_01+q_10+q_11)/4.0 < qmax:655 elif (q_00 + q_01 + q_10 + q_11)/4.0 < qmax: 650 656 frac_max = 1.0 651 657 … … 671 677 if q_1 > q_0: 672 678 if (q > q_0 and q <= q_1): 673 return (q -q_0)/(q_1 - q_0)679 return (q - q_0)/(q_1 - q_0) 674 680 else: 675 681 if (q > q_1 and q <= q_0): 676 return (q -q_1)/(q_0 - q_1)682 return (q - q_1)/(q_0 - q_1) 677 683 return None 678 684 … … 682 688 The sector is defined by r_min, r_max, phi_min, phi_max, 683 689 and the position of the center of the ring 684 where phi_min and phi_max are defined by the right and left lines wrt central line 690 where phi_min and phi_max are defined by the right 691 and left lines wrt central line 685 692 and phi_max could be less than phi_min. 686 693 687 Phi is defined between 0 and 2*pi in anti-clockwise starting from the x- axis on the left-hand side 688 """ 689 def __init__(self, r_min, r_max, phi_min=0, phi_max=2*math.pi,nbins=20): 694 Phi is defined between 0 and 2*pi in anti-clockwise 695 starting from the x- axis on the left-hand side 696 """ 697 def __init__(self, r_min, r_max, phi_min=0, phi_max=2*math.pi, nbins=20): 690 698 self.r_min = r_min 691 699 self.r_max = r_max … … 728 736 729 737 for n in range(len(data)): 730 frac = 0 738 frac = 0 739 740 # q-value at the pixel (j,i) 741 q_value = q_data[n] 742 data_n = data[n] 743 744 # Is pixel within range? 745 is_in = False 746 747 # phi-value of the pixel (j,i) 748 phi_value = math.atan2(qy_data[n], qx_data[n]) + Pi 749 750 ## No need to calculate the frac when all data are within range 751 if self.r_min <= q_value and q_value <= self.r_max: 752 frac = 1 753 if frac == 0: 754 continue 755 #In case of two ROIs (symmetric major and minor regions)(for 'q2') 756 if run.lower()=='q2': 757 ## For minor sector wing 758 # Calculate the minor wing phis 759 phi_min_minor = flip_phi(phi_min - Pi) 760 phi_max_minor = flip_phi(phi_max - Pi) 761 # Check if phis of the minor ring is within 0 to 2pi 762 if phi_min_minor > phi_max_minor: 763 is_in = (phi_value > phi_min_minor or \ 764 phi_value < phi_max_minor) 765 else: 766 is_in = (phi_value > phi_min_minor and \ 767 phi_value < phi_max_minor) 768 769 #For all cases(i.e.,for 'q', 'q2', and 'phi') 770 #Find pixels within ROI 771 if phi_min > phi_max: 772 is_in = is_in or (phi_value > phi_min or \ 773 phi_value < phi_max) 774 else: 775 is_in = is_in or (phi_value >= phi_min and \ 776 phi_value < phi_max) 777 778 if not is_in: 779 frac = 0 780 if frac == 0: 781 continue 782 # Check which type of averaging we need 783 if run.lower() == 'phi': 784 temp_x = (self.nbins) * (phi_value - self.phi_min) 785 temp_y = (self.phi_max - self.phi_min) 786 i_bin = int(math.floor(temp_x / temp_y)) 787 else: 788 temp_x = (self.nbins) * (q_value - self.r_min) 789 tem_y = (self.r_max - self.r_min) 790 i_bin = int(math.floor(temp_x / temp_y)) 791 792 # Take care of the edge case at phi = 2pi. 793 if i_bin == self.nbins: 794 i_bin = self.nbins - 1 731 795 732 # q-value at the pixel (j,i) 733 q_value = q_data[n] 734 735 736 data_n = data[n] 737 738 # Is pixel within range? 739 is_in = False 740 741 # phi-value of the pixel (j,i) 742 phi_value=math.atan2(qy_data[n],qx_data[n])+Pi 743 744 ## No need to calculate the frac when all data are within range 745 if self.r_min <= q_value and q_value <= self.r_max: frac = 1 746 747 if frac == 0: continue 748 749 #In case of two ROIs (symmetric major and minor regions)(for 'q2') 750 if run.lower()=='q2': 751 ## For minor sector wing 752 # Calculate the minor wing phis 753 phi_min_minor = flip_phi(phi_min-Pi) 754 phi_max_minor = flip_phi(phi_max-Pi) 755 # Check if phis of the minor ring is within 0 to 2pi 756 if phi_min_minor > phi_max_minor: 757 is_in = (phi_value > phi_min_minor or phi_value < phi_max_minor) 758 else: 759 is_in = (phi_value > phi_min_minor and phi_value < phi_max_minor) 760 761 #For all cases(i.e.,for 'q', 'q2', and 'phi') 762 #Find pixels within ROI 763 if phi_min > phi_max: 764 is_in = is_in or (phi_value > phi_min or phi_value < phi_max) 765 else: 766 is_in = is_in or (phi_value>= phi_min and phi_value <phi_max) 767 768 if not is_in: frac = 0 769 if frac == 0: continue 770 771 # Check which type of averaging we need 772 if run.lower()=='phi': 773 i_bin = int(math.floor((self.nbins)*(phi_value-self.phi_min)\ 774 /(self.phi_max-self.phi_min))) 775 else: 776 i_bin = int(math.floor((self.nbins)*(q_value-self.r_min)/(self.r_max-self.r_min))) 777 778 # Take care of the edge case at phi = 2pi. 779 if i_bin == self.nbins: 780 i_bin = self.nbins -1 781 782 ## Get the total y 783 y[i_bin] += frac * data_n 784 785 if err_data == None or err_data[n] ==0.0: 786 if data_n<0: data_n= -data_n 787 y_err[i_bin] += frac * frac * data_n 788 else: 789 y_err[i_bin] += frac * frac * err_data[n]*err_data[n] 790 y_counts[i_bin] += frac 791 796 ## Get the total y 797 y[i_bin] += frac * data_n 798 799 if err_data == None or err_data[n] == 0.0: 800 if data_n < 0: 801 data_n = -data_n 802 y_err[i_bin] += frac * frac * data_n 803 else: 804 y_err[i_bin] += frac * frac * err_data[n] * err_data[n] 805 y_counts[i_bin] += frac 806 792 807 # Organize the results 793 808 for i in range(self.nbins): … … 797 812 # The type of averaging: phi,q2, or q 798 813 # Calculate x[i]should be at the center of the bin 799 if run.lower()=='phi': 800 x[i] = (self.phi_max-self.phi_min)/self.nbins*(1.0*i + 0.5)+self.phi_min 814 if run.lower() == 'phi': 815 temp = self.nbins * (1.0 * i + 0.5) + self.phi_min 816 x[i] = (self.phi_max - self.phi_min) / temp 801 817 else: 802 x[i] = (self.r_max-self.r_min)/self.nbins*(1.0*i + 0.5)+self.r_min 818 temp = self.nbins * (1.0 * i + 0.5) + self.r_min 819 x[i] = (self.r_max - self.r_min) / temp 803 820 804 idx = (numpy.isfinite(y)& numpy.isfinite(y_err)) 805 806 if not idx.any(): 807 raise ValueError, "Average Error: No points inside sector of ROI to average..." 808 elif len(y[idx])!= self.nbins: 809 print "resulted",self.nbins- len(y[idx]),"empty bin(s) due to tight binning..." 821 idx = (numpy.isfinite(y) & numpy.isfinite(y_err)) 822 823 if not idx.any(): 824 msg = "Average Error: No points inside sector of ROI to average..." 825 raise ValueError, msg 826 #elif len(y[idx])!= self.nbins: 827 # print "resulted",self.nbins- len(y[idx]), 828 #"empty bin(s) due to tight binning..." 810 829 return Data1D(x=x[idx], y=y[idx], dy=y_err[idx]) 811 830 … … 885 904 qy_data = data2D.qy_data 886 905 mask = data2D.mask 887 q_data = numpy.sqrt(qx_data *qx_data+qy_data*qy_data)906 q_data = numpy.sqrt(qx_data * qx_data + qy_data * qy_data) 888 907 #q_data_max = numpy.max(q_data) 889 908 … … 946 965 Defines a sector (major + minor) region on a 2D data set. 947 966 The sector is defined by phi_min, phi_max, 948 where phi_min and phi_max are defined by the right and left lines wrt central line. 967 where phi_min and phi_max are defined by the right 968 and left lines wrt central line. 949 969 950 970 Phi_min and phi_max are given in units of radian 951 971 and (phi_max-phi_min) should not be larger than pi 952 972 """ 953 def __init__(self, phi_min=0, phi_max=math.pi):973 def __init__(self, phi_min=0, phi_max=math.pi): 954 974 self.phi_min = phi_min 955 975 self.phi_max = phi_max … … 991 1011 992 1012 # Get the min and max into the region: -pi <= phi < Pi 993 phi_min_major = flip_phi(self.phi_min +Pi)-Pi994 phi_max_major = flip_phi(self.phi_max +Pi)-Pi1013 phi_min_major = flip_phi(self.phi_min + Pi) - Pi 1014 phi_max_major = flip_phi(self.phi_max + Pi) - Pi 995 1015 # check for major sector 996 1016 if phi_min_major > phi_max_major: … … 1001 1021 # minor sector 1002 1022 # Get the min and max into the region: -pi <= phi < Pi 1003 phi_min_minor = flip_phi(self.phi_min) -Pi1004 phi_max_minor = flip_phi(self.phi_max) -Pi1023 phi_min_minor = flip_phi(self.phi_min) - Pi 1024 phi_max_minor = flip_phi(self.phi_max) - Pi 1005 1025 1006 1026 # check for minor sector 1007 1027 if phi_min_minor > phi_max_minor: 1008 out_minor= (phi_min_minor <= phi_data) + (phi_max_minor>= phi_data) 1028 out_minor = (phi_min_minor <= phi_data) + \ 1029 (phi_max_minor >= phi_data) 1009 1030 else: 1010 out_minor = (phi_min_minor <= phi_data) & (phi_max_minor >= phi_data) 1031 out_minor = (phi_min_minor <= phi_data) & \ 1032 (phi_max_minor >= phi_data) 1011 1033 out = out_major + out_minor 1012 1034 -
DataLoader/qsmearing.py
r023c8e2 ra7a5886 7 7 #copyright 2008, University of Tennessee 8 8 ###################################################################### 9 9 import numpy 10 #import math 11 import logging 12 import sys 10 13 import DataLoader.extensions.smearer as smearer 11 import numpy12 import math13 import logging, sys14 14 from DataLoader.smearing_2d import Smearer2D 15 15 … … 40 40 return Smearer2D(data1D) 41 41 42 if not hasattr(data1D, "dx") and not hasattr(data1D, "dxl") and not hasattr(data1D, "dxw"): 42 if not hasattr(data1D, "dx") and not hasattr(data1D, "dxl")\ 43 and not hasattr(data1D, "dxw"): 43 44 return None 44 45 45 46 # Look for resolution smearing data 46 47 _found_resolution = False 47 if data1D.dx is not None and len(data1D.dx) ==len(data1D.x):48 if data1D.dx is not None and len(data1D.dx) == len(data1D.x): 48 49 49 50 # Check that we have non-zero data 50 if data1D.dx[0] >0.0:51 if data1D.dx[0] > 0.0: 51 52 _found_resolution = True 52 53 #print "_found_resolution",_found_resolution … … 58 59 # Look for slit smearing data 59 60 _found_slit = False 60 if data1D.dxl is not None and len(data1D.dxl) ==len(data1D.x) \61 and data1D.dxw is not None and len(data1D.dxw) ==len(data1D.x):61 if data1D.dxl is not None and len(data1D.dxl) == len(data1D.x) \ 62 and data1D.dxw is not None and len(data1D.dxw) == len(data1D.x): 62 63 63 64 # Check that we have non-zero data 64 if data1D.dxl[0] >0.0 or data1D.dxw[0]>0.0:65 if data1D.dxl[0] > 0.0 or data1D.dxw[0] > 0.0: 65 66 _found_slit = True 66 67 … … 78 79 if _found_slit == True: 79 80 return SlitSmearer(data1D) 80 81 81 return None 82 82 … … 101 101 return result 102 102 103 104 def _compute_matrix(self): return NotImplemented 103 def _compute_matrix(self): 104 """ 105 """ 106 return NotImplemented 105 107 106 108 def get_bin_range(self, q_min=None, q_max=None): … … 115 117 if not self._init_complete: 116 118 self._initialize_smearer() 117 118 119 if q_min == None: 119 120 q_min = self.min 120 121 121 if q_max == None: 122 122 q_max = self.max 123 124 _qmin_unsmeared, _qmax_unsmeared = self.get_unsmeared_range(q_min, q_max) 125 123 _qmin_unsmeared, _qmax_unsmeared = self.get_unsmeared_range(q_min, 124 q_max) 126 125 _first_bin = None 127 126 _last_bin = None 128 127 129 step = (self.max -self.min)/(self.nbins-1.0)128 step = (self.max - self.min) / (self.nbins - 1.0) 130 129 try: 131 130 for i in range(self.nbins): … … 138 137 _last_bin = i 139 138 except: 140 raise RuntimeError, "_BaseSmearer.get_bin_range: error getting range\n %s" % sys.exc_value 139 msg = "_BaseSmearer.get_bin_range: " 140 msg += " error getting range\n %s" % sys.exc_value 141 raise RuntimeError, msg 141 142 142 143 return _first_bin, _last_bin … … 152 153 153 154 # Get the max value for the last bin 154 if last_bin is None or last_bin >=len(iq_in):155 last_bin = len(iq_in) -1155 if last_bin is None or last_bin >= len(iq_in): 156 last_bin = len(iq_in) - 1 156 157 # Check that the first bin is positive 157 if first_bin <0:158 if first_bin < 0: 158 159 first_bin = 0 159 160 160 161 # Sanity check 161 162 if len(iq_in) != self.nbins: 162 raise RuntimeError, "Invalid I(q) vector: inconsistent array length %d != %s" % (len(iq_in), str(self.nbins)) 163 msg = "Invalid I(q) vector: inconsistent array " 164 msg += " length %d != %s" % (len(iq_in), str(self.nbins)) 165 raise RuntimeError, msg 163 166 164 167 # Storage for smeared I(q) 165 168 iq_out = numpy.zeros(self.nbins) 166 smear_output = smearer.smear(self._smearer, iq_in, iq_out, first_bin, last_bin) 169 smear_output = smearer.smear(self._smearer, iq_in, iq_out, 170 first_bin, last_bin) 167 171 if smear_output < 0: 168 raise RuntimeError, "_BaseSmearer: could not smear, code = %g" % smear_output 172 msg = "_BaseSmearer: could not smear, code = %g" % smear_output 173 raise RuntimeError, msg 169 174 return iq_out 175 176 def _initialize_smearer(self): 177 """ 178 """ 179 return NotImplemented 170 180 171 181 class _SlitSmearer(_BaseSmearer): … … 207 217 This method HAS to be called before smearing 208 218 """ 209 #self._smearer = smearer.new_slit_smearer(self.width, self.height, self.min, self.max, self.nbins) 210 self._smearer = smearer.new_slit_smearer_with_q(self.width, self.height, self.qvalues) 219 #self._smearer = smearer.new_slit_smearer(self.width, 220 # self.height, self.min, self.max, self.nbins) 221 self._smearer = smearer.new_slit_smearer_with_q(self.width, 222 self.height, self.qvalues) 211 223 self._init_complete = True 212 224 … … 241 253 ## Slit width 242 254 self.width = 0 243 if data1D.dxw is not None and len(data1D.dxw) ==len(data1D.x):255 if data1D.dxw is not None and len(data1D.dxw) == len(data1D.x): 244 256 self.width = data1D.dxw[0] 245 257 # Sanity check 246 258 for value in data1D.dxw: 247 259 if value != self.width: 248 raise RuntimeError, "Slit smearing parameters must be the same for all data" 249 260 msg = "Slit smearing parameters must " 261 msg += " be the same for all data" 262 raise RuntimeError, msg 250 263 ## Slit height 251 264 self.height = 0 252 if data1D.dxl is not None and len(data1D.dxl) ==len(data1D.x):265 if data1D.dxl is not None and len(data1D.dxl) == len(data1D.x): 253 266 self.height = data1D.dxl[0] 254 267 # Sanity check 255 268 for value in data1D.dxl: 256 269 if value != self.height: 257 raise RuntimeError, "Slit smearing parameters must be the same for all data" 270 msg = "Slit smearing parameters must be" 271 msg += " the same for all data" 272 raise RuntimeError, msg 258 273 259 274 ## Number of Q bins … … 283 298 _BaseSmearer.__init__(self) 284 299 ## Standard deviation in Q [A-1] 285 self.width 300 self.width = width 286 301 ## Q_min (Min Q-value for I(q)) 287 self.min 302 self.min = min 288 303 ## Q_max (Max Q_value for I(q)) 289 self.max 304 self.max = max 290 305 ## Number of Q bins 291 self.nbins 306 self.nbins = nbins 292 307 ## Smearing matrix 293 308 self._weights = None … … 299 314 This method HAS to be called before smearing 300 315 """ 301 #self._smearer = smearer.new_q_smearer(numpy.asarray(self.width), self.min, self.max, self.nbins) 302 self._smearer = smearer.new_q_smearer_with_q(numpy.asarray(self.width), self.qvalues) 316 #self._smearer = smearer.new_q_smearer(numpy.asarray(self.width), 317 # self.min, self.max, self.nbins) 318 self._smearer = smearer.new_q_smearer_with_q(numpy.asarray(self.width), 319 self.qvalues) 303 320 self._init_complete = True 304 321 … … 313 330 _qmax_unsmeared = q_max 314 331 try: 315 offset = 3.0 *max(self.width)316 _qmin_unsmeared = max([self.min, q_min -offset])317 _qmax_unsmeared = min([self.max, q_max +offset])332 offset = 3.0 * max(self.width) 333 _qmin_unsmeared = max([self.min, q_min - offset]) 334 _qmax_unsmeared = min([self.max, q_max + offset]) 318 335 except: 319 336 logging.error("_QSmearer.get_bin_range: %s" % sys.exc_value) … … 336 353 ## Resolution 337 354 self.width = numpy.zeros(len(data1D.x)) 338 if data1D.dx is not None and len(data1D.dx) ==len(data1D.x):355 if data1D.dx is not None and len(data1D.dx) == len(data1D.x): 339 356 self.width = data1D.dx 340 357 … … 350 367 351 368 if __name__ == '__main__': 352 x = 0.001 *numpy.arange(1,11)353 y = 12.0 -numpy.arange(1,11)369 x = 0.001 * numpy.arange(1, 11) 370 y = 12.0 - numpy.arange(1, 11) 354 371 print x 355 372 #for i in range(10): print i, 0.001 + i*0.008/9.0 356 373 #for i in range(100): print i, int(math.floor( (i/ (100/9.0)) )) 357 358 359 374 s = _SlitSmearer(nbins=10, width=0.0, height=0.005, min=0.001, max=0.010) 360 375 #s = _QSmearer(nbins=10, width=0.001, min=0.001, max=0.010) … … 366 381 if True: 367 382 for i in range(10): 368 print x[i], y[i], sy[i]383 print x[i], y[i], sy[i] 369 384 #print q, ' : ', s.weight(q), s._compute_iq(q) 370 385 #print q, ' : ', s(q), s._compute_iq(q) -
DataLoader/readers/IgorReader.py
r0997158f ra7a5886 15 15 """ 16 16 17 import os, sys 17 import os 18 #import sys 18 19 import numpy 19 import math, logging 20 from DataLoader.data_info import Data2D, Detector 20 import math 21 #import logging 22 from DataLoader.data_info import Data2D 23 from DataLoader.data_info import Detector 21 24 from DataLoader.manipulations import reader2D_converter 22 25 … … 100 103 wavelength = float(line_toks[1]) 101 104 except: 102 raise ValueError,"IgorReader: can't read this file, missing wavelength" 105 msg = "IgorReader: can't read this file, missing wavelength" 106 raise ValueError, msg 103 107 104 108 #Find # of bins in a row assuming the detector is square. … … 130 134 wavelength = float(line_toks[1]) 131 135 except: 132 raise ValueError,"IgorReader: can't read this file, missing wavelength" 136 msg = "IgorReader: can't read this file, missing wavelength" 137 raise ValueError, msg 133 138 # Distance in meters 134 139 try: 135 140 distance = float(line_toks[3]) 136 141 except: 137 raise ValueError,"IgorReader: can't read this file, missing distance" 142 msg = "IgorReader: can't read this file, missing distance" 143 raise ValueError, msg 138 144 139 145 # Distance in meters … … 141 147 transmission = float(line_toks[4]) 142 148 except: 143 raise ValueError,"IgorReader: can't read this file, missing transmission" 149 msg = "IgorReader: can't read this file, " 150 msg += "missing transmission" 151 raise ValueError, msg 144 152 145 153 if line.count("LAMBDA")>0: … … 151 159 line_toks = line.split() 152 160 153 # Center in bin number: Must substrate 1 because the index starts from 1 161 # Center in bin number: Must substrate 1 because 162 #the index starts from 1 154 163 center_x = float(line_toks[0])-1 155 164 center_y = float(line_toks[1])-1 … … 168 177 or center_x == None \ 169 178 or center_y == None: 170 raise ValueError, "IgorReader:Missing information in data file" 179 msg = "IgorReader:Missing information in data file" 180 raise ValueError, msg 171 181 172 182 if dataStarted == True: … … 190 200 # Q = 4pi/lambda sin(theta/2) 191 201 # Bin size is 0.5 cm 192 #REmoved +1 from theta = (i_x-center_x+1)*0.5 / distance / 100.0 and 193 #REmoved +1 from theta = (i_y-center_y+1)*0.5 / distance / 100.0 194 #ToDo: Need complete check if the following covert process is consistent with fitting.py. 202 #REmoved +1 from theta = (i_x-center_x+1)*0.5 / distance 203 # / 100.0 and 204 #REmoved +1 from theta = (i_y-center_y+1)*0.5 / 205 # distance / 100.0 206 #ToDo: Need complete check if the following 207 # covert process is consistent with fitting.py. 195 208 theta = (i_x-center_x)*0.5 / distance / 100.0 196 209 qx = 4.0*math.pi/wavelength * math.sin(theta/2.0) -
DataLoader/readers/abs_reader.py
r0997158f ra7a5886 1 """ 2 """ 1 3 ##################################################################### 2 4 #This software was developed by the University of Tennessee as part of the … … 9 11 import numpy 10 12 import os 11 from DataLoader.data_info import Data1D, Detector 13 from DataLoader.data_info import Data1D 14 from DataLoader.data_info import Detector 12 15 13 16 has_converter = True … … 26 29 type = ["IGOR 1D files (*.abs)|*.abs"] 27 30 ## List of allowed extensions 28 ext =['.abs', '.ABS']31 ext = ['.abs', '.ABS'] 29 32 30 33 def read(self, path): … … 78 81 79 82 # Information line 1 80 if is_info ==True:83 if is_info == True: 81 84 is_info = False 82 85 line_toks = line.split() … … 85 88 try: 86 89 value = float(line_toks[1]) 87 if has_converter==True and output.source.wavelength_unit != 'A': 90 if has_converter == True and \ 91 output.source.wavelength_unit != 'A': 88 92 conv = Converter('A') 89 output.source.wavelength = conv(value, units=output.source.wavelength_unit) 93 output.source.wavelength = conv(value, 94 units=output.source.wavelength_unit) 90 95 else: 91 96 output.source.wavelength = value 92 97 except: 93 98 #goes to ASC reader 94 raise RuntimeError, "abs_reader: cannot open %s" % path 95 #raise ValueError,"IgorReader: can't read this file, missing wavelength" 99 msg = "abs_reader: cannot open %s" % path 100 raise RuntimeError, msg 101 #raise ValueError,"IgorReader: can't read this file, 102 # missing wavelength" 96 103 97 104 # Distance in meters 98 105 try: 99 106 value = float(line_toks[3]) 100 if has_converter==True and detector.distance_unit != 'm': 107 if has_converter == True and \ 108 detector.distance_unit != 'm': 101 109 conv = Converter('m') 102 detector.distance = conv(value, units=detector.distance_unit) 110 detector.distance = conv(value, 111 units=detector.distance_unit) 103 112 else: 104 113 detector.distance = value 105 114 except: 106 115 #goes to ASC reader 107 raise RuntimeError,"abs_reader: cannot open %s" % path108 116 msg = "abs_reader: cannot open %s" % path 117 raise RuntimeError, msg 109 118 # Transmission 110 119 try: … … 117 126 try: 118 127 value = float(line_toks[5]) 119 if has_converter==True and output.sample.thickness_unit != 'cm': 128 if has_converter == True and \ 129 output.sample.thickness_unit != 'cm': 120 130 conv = Converter('cm') 121 output.sample.thickness = conv(value, units=output.sample.thickness_unit) 131 output.sample.thickness = conv(value, 132 units=output.sample.thickness_unit) 122 133 else: 123 134 output.sample.thickness = value … … 126 137 pass 127 138 128 #MON CNT LAMBDA DET ANG DET DIST TRANS THICK AVE STEP 129 if line.count("LAMBDA")>0: 139 #MON CNT LAMBDA DET ANG DET DIST TRANS THICK 140 # AVE STEP 141 if line.count("LAMBDA") > 0: 130 142 is_info = True 131 143 132 144 # Find center info line 133 if is_center ==True:145 if is_center == True: 134 146 is_center = False 135 147 line_toks = line.split() … … 139 151 140 152 # Bin size 141 if has_converter==True and detector.pixel_size_unit != 'mm': 153 if has_converter == True and \ 154 detector.pixel_size_unit != 'mm': 142 155 conv = Converter('mm') 143 detector.pixel_size.x = conv(5.0, units=detector.pixel_size_unit) 144 detector.pixel_size.y = conv(5.0, units=detector.pixel_size_unit) 156 detector.pixel_size.x = conv(5.0, 157 units=detector.pixel_size_unit) 158 detector.pixel_size.y = conv(5.0, 159 units=detector.pixel_size_unit) 145 160 else: 146 161 detector.pixel_size.x = 5.0 … … 149 164 # Store beam center in distance units 150 165 # Det 640 x 640 mm 151 if has_converter==True and detector.beam_center_unit != 'mm': 166 if has_converter==True and \ 167 detector.beam_center_unit != 'mm': 152 168 conv = Converter('mm') 153 detector.beam_center.x = conv(center_x*5.0, units=detector.beam_center_unit) 154 detector.beam_center.y = conv(center_y*5.0, units=detector.beam_center_unit) 169 detector.beam_center.x = conv(center_x * 5.0, 170 units=detector.beam_center_unit) 171 detector.beam_center.y = conv(center_y * 5.0, 172 units=detector.beam_center_unit) 155 173 else: 156 detector.beam_center.x = center_x *5.0157 detector.beam_center.y = center_y *5.0174 detector.beam_center.x = center_x * 5.0 175 detector.beam_center.y = center_y * 5.0 158 176 159 177 # Detector type … … 164 182 pass 165 183 166 #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L BSTOP(mm) DET_TYP 167 if line.count("BCENT")>0: 184 #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L 185 # BSTOP(mm) DET_TYP 186 if line.count("BCENT") > 0: 168 187 is_center = True 169 188 170 189 # Parse the data 171 if is_data_started ==True:190 if is_data_started == True: 172 191 toks = line.split() 173 192 … … 197 216 pass 198 217 199 #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 218 #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 219 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 200 220 if line.count("The 6 columns")>0: 201 221 is_data_started = True … … 203 223 # Sanity check 204 224 if not len(y) == len(dy): 205 raise ValueError,"abs_reader: y and dy have different length"206 225 msg = "abs_reader: y and dy have different length" 226 raise ValueError, msg 207 227 # If the data length is zero, consider this as 208 228 # though we were not able to read the file. 209 if len(x) ==0:229 if len(x) == 0: 210 230 raise ValueError, "ascii_reader: could not load file" 211 212 231 output.x = x 213 232 output.y = y -
DataLoader/readers/ascii_reader.py
rfca90f82 ra7a5886 36 36 "ASCII files (*.abs)|*.abs"] 37 37 ## List of allowed extensions 38 ext =['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS']38 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS'] 39 39 40 40 ## Flag to bypass extension check … … 54 54 if os.path.isfile(path): 55 55 basename = os.path.basename(path) 56 root, extension = os.path.splitext(basename)56 _, extension = os.path.splitext(basename) 57 57 if self.allow_all or extension.lower() in self.ext: 58 58 try: … … 63 63 lines = buff.split('\n') 64 64 65 #Jae could not find python universal line spliter: keep the below for now 66 # some ascii data has \r line separator, try it when the data is on only one long line 65 #Jae could not find python universal line spliter: 66 #keep the below for now 67 # some ascii data has \r line separator, 68 # try it when the data is on only one long line 67 69 if len(lines) < 2 : 68 70 lines = buff.split('\r') … … 103 105 #Initialize counters for data lines and header lines. 104 106 is_data = False #Has more than 5 lines 105 mum_data_lines = 5 # More than "5" lines of data is considered as actual data unless that is the only data 106 107 i=-1 # To count # of current data candidate lines 108 i1=-1 # To count total # of previous data candidate lines 109 j=-1 # To count # of header lines 110 j1=-1 # Helps to count # of header lines 111 lentoks = 2 # minimum required number of columns of data; ( <= 4). 112 107 # More than "5" lines of data is considered as actual 108 # data unless that is the only data 109 mum_data_lines = 5 110 # To count # of current data candidate lines 111 i = -1 112 # To count total # of previous data candidate lines 113 i1 = -1 114 # To count # of header lines 115 j = -1 116 # Helps to count # of header lines 117 j1 = -1 118 #minimum required number of columns of data; ( <= 4). 119 lentoks = 2 113 120 for line in lines: 114 121 toks = line.split() 115 116 122 try: 117 123 #Make sure that all columns are numbers. … … 140 146 # third column. 141 147 _dy = None 142 if len(toks) >2:148 if len(toks) > 2: 143 149 try: 144 150 _dy = float(toks[2]) … … 158 164 #Check for dx 159 165 _dx = None 160 if len(toks) >3:166 if len(toks) > 3: 161 167 try: 162 168 _dx = float(toks[3]) … … 174 180 has_error_dx = False if _dx == None else True 175 181 176 #After talked with PB, we decided to take care of only 4 columns of data for now. 182 #After talked with PB, we decided to take care of only 183 # 4 columns of data for now. 177 184 #number of columns in the current line 178 #To remember the # of columns in the current line of data 185 #To remember the # of columns in the current 186 #line of data 179 187 new_lentoks = len(toks) 180 188 181 #If the previous columns not equal to the current, mark the previous as non-data and reset the dependents. 189 #If the previous columns not equal to the current, 190 #mark the previous as non-data and reset the dependents. 182 191 if lentoks != new_lentoks : 183 192 if is_data == True: … … 190 199 191 200 192 #Delete the previously stored lines of data candidates if is not data. 193 if i < 0 and -1< i1 < mum_data_lines and is_data == False: 201 #Delete the previously stored lines of data candidates 202 # if is not data. 203 if i < 0 and -1 < i1 < mum_data_lines and \ 204 is_data == False: 194 205 try: 195 x= numpy.zeros(0) 196 y= numpy.zeros(0) 197 206 x = numpy.zeros(0) 207 y = numpy.zeros(0) 198 208 except: 199 209 pass … … 203 213 204 214 if has_error_dy == True: 205 #Delete the previously stored lines of data candidates if is not data. 206 if i < 0 and -1< i1 < mum_data_lines and is_data== False: 215 #Delete the previously stored lines of 216 # data candidates if is not data. 217 if i < 0 and -1 < i1 < mum_data_lines and \ 218 is_data == False: 207 219 try: 208 220 dy = numpy.zeros(0) … … 212 224 213 225 if has_error_dx == True: 214 #Delete the previously stored lines of data candidates if is not data. 215 if i < 0 and -1< i1 < mum_data_lines and is_data== False: 226 #Delete the previously stored lines of 227 # data candidates if is not data. 228 if i < 0 and -1 < i1 < mum_data_lines and \ 229 is_data == False: 216 230 try: 217 231 dx = numpy.zeros(0) … … 221 235 222 236 #Same for temp. 223 #Delete the previously stored lines of data candidates if is not data. 224 if i < 0 and -1< i1 < mum_data_lines and is_data== False: 237 #Delete the previously stored lines of data candidates 238 # if is not data. 239 if i < 0 and -1 < i1 < mum_data_lines and\ 240 is_data == False: 225 241 try: 226 242 tx = numpy.zeros(0) 227 243 ty = numpy.zeros(0) 228 244 except: 229 pass 245 pass 230 246 231 247 tx = numpy.append(tx, _x) … … 233 249 234 250 if has_error_dy == True: 235 #Delete the previously stored lines of data candidates if is not data. 236 if i < 0 and -1<i1 < mum_data_lines and is_data== False: 251 #Delete the previously stored lines of 252 # data candidates if is not data. 253 if i < 0 and -1 < i1 < mum_data_lines and \ 254 is_data == False: 237 255 try: 238 256 tdy = numpy.zeros(0) … … 241 259 tdy = numpy.append(tdy, _dy) 242 260 if has_error_dx == True: 243 #Delete the previously stored lines of data candidates if is not data. 244 if i < 0 and -1< i1 < mum_data_lines and is_data== False: 261 #Delete the previously stored lines of 262 # data candidates if is not data. 263 if i < 0 and -1 < i1 < mum_data_lines and \ 264 is_data == False: 245 265 try: 246 266 tdx = numpy.zeros(0) 247 267 except: 248 pass 268 pass 249 269 tdx = numpy.append(tdx, _dx) 250 270 251 271 #reset i1 and flag lentoks for the next 252 if lentoks < new_lentoks 272 if lentoks < new_lentoks: 253 273 if is_data == False: 254 274 i1 = -1 255 #To remember the # of columns on the current line for the next line of data 275 #To remember the # of columns on the current line 276 # for the next line of data 256 277 lentoks = len(toks) 257 278 258 #Reset # of header lines and counts # of data candidate lines 259 if j == 0 and j1 ==0: 279 #Reset # of header lines and counts # 280 # of data candidate lines 281 if j == 0 and j1 == 0: 260 282 i1 = i + 1 261 i+=1 262 283 i += 1 263 284 except: 264 285 … … 268 289 lentoks = 2 269 290 #Counting # of header lines 270 j +=1271 if j == j1 +1:291 j += 1 292 if j == j1 + 1: 272 293 j1 = j 273 294 else: … … 279 300 pass 280 301 281 282 302 input_f.close() 283 303 # Sanity check 284 304 if has_error_dy == True and not len(y) == len(dy): 285 raise RuntimeError, "ascii_reader: y and dy have different length" 305 msg = "ascii_reader: y and dy have different length" 306 raise RuntimeError, msg 286 307 if has_error_dx == True and not len(x) == len(dx): 287 raise RuntimeError,"ascii_reader: y and dy have different length"288 308 msg = "ascii_reader: y and dy have different length" 309 raise RuntimeError, msg 289 310 # If the data length is zero, consider this as 290 311 # though we were not able to read the file. 291 if len(x) ==0:312 if len(x) == 0: 292 313 raise RuntimeError, "ascii_reader: could not load file" 293 314 294 #Let's re-order the data to make cal. curve look better some cases 295 ind = numpy.lexsort((ty,tx)) 315 #Let's re-order the data to make cal. 316 # curve look better some cases 317 ind = numpy.lexsort((ty, tx)) 296 318 for i in ind: 297 319 x[i] = tx[ind[i]] … … 301 323 if has_error_dx == True: 302 324 dx[i] = tdx[ind[i]] 303 304 305 325 #Data 306 326 output.x = x -
DataLoader/readers/associations.py
r0997158f ra7a5886 19 19 """ 20 20 21 import os, sys 21 import os 22 import sys 22 23 import logging 23 24 from lxml import etree … … 52 53 53 54 # Read in the file extension associations 54 entry_list = root.xpath('/ns:SansLoader/ns:FileType', namespaces={'ns': VERSION}) 55 entry_list = root.xpath('/ns:SansLoader/ns:FileType', 56 namespaces={'ns': VERSION}) 55 57 56 58 # For each FileType entry, get the associated reader and extension … … 61 63 if reader is not None and ext is not None: 62 64 # Associate the extension with a particular reader 63 # TODO: Modify the Register code to be case-insensitive and remove the64 # 65 # TODO: Modify the Register code to be case-insensitive 66 # and remove the extra line below. 65 67 try: 66 68 exec "import %s" % reader 67 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), reader) 68 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), reader) 69 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 70 reader) 71 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 72 reader) 69 73 except: 70 logging.error("read_associations: skipping association for %s\n %s" % (attr['extension'], sys.exc_value)) 74 msg = "read_associations: skipping association" 75 msg += " for %s\n %s" % (attr['extension'], sys.exc_value) 76 logging.error(msg) 71 77 72 78 -
DataLoader/readers/cansas_reader.py
r7406040 ra7a5886 15 15 # within a single SASentry. Will raise a runtime error. 16 16 17 #TODO: check that all vectors are written only if they have at least one non-empty value 18 #TODO: Writing only allows one SASentry per file. Would be best to allow multiple entries. 17 #TODO: check that all vectors are written only if they have at 18 # least one non-empty value 19 #TODO: Writing only allows one SASentry per file. 20 # Would be best to allow multiple entries. 19 21 #TODO: Store error list 20 22 #TODO: Allow for additional meta data for each section 21 #TODO: Notes need to be implemented. They can be any XML structure in version 1.0 23 #TODO: Notes need to be implemented. They can be any XML 24 # structure in version 1.0 22 25 # Process notes have the same problem. 23 26 #TODO: Unit conversion is not complete (temperature units are missing) … … 26 29 import logging 27 30 import numpy 28 import os, sys 29 from DataLoader.data_info import Data1D, Collimation, Detector, Process, Aperture 31 import os 32 import sys 33 from DataLoader.data_info import Data1D 34 from DataLoader.data_info import Collimation 35 from DataLoader.data_info import Detector 36 from DataLoader.data_info import Process 37 from DataLoader.data_info import Aperture 30 38 from lxml import etree 31 39 import xml.dom.minidom … … 85 93 value = None 86 94 attr = {} 87 88 if len(nodes)>0: 95 if len(nodes) > 0: 89 96 try: 90 97 value = float(nodes[0].text) 91 98 except: 92 99 # Could not pass, skip and return None 93 logging.error("cansas_reader.get_float: could not convert '%s' to float" % nodes[0].text) 94 100 msg = "cansas_reader.get_float: could not " 101 msg += " convert '%s' to float" % nodes[0].text 102 logging.error(msg) 95 103 if nodes[0].get('unit') is not None: 96 104 attr['unit'] = nodes[0].get('unit') 97 98 105 return value, attr 99 106 100 107 101 102 108 class Reader: 103 109 """ … … 117 123 118 124 ## List of allowed extensions 119 ext =['.xml', '.XML','.avex', '.AVEx', '.absx', 'ABSx']125 ext = ['.xml', '.XML','.avex', '.AVEx', '.absx', 'ABSx'] 120 126 121 127 def __init__(self): … … 144 150 tree = etree.parse(path, parser=etree.ETCompatXMLParser()) 145 151 # Check the format version number 146 # Specifying the namespace will take care of the file format version 152 # Specifying the namespace will take care of the file 153 # format version 147 154 root = tree.getroot() 148 155 149 entry_list = root.xpath('/ns:SASroot/ns:SASentry', namespaces={'ns': CANSAS_NS}) 156 entry_list = root.xpath('/ns:SASroot/ns:SASentry', 157 namespaces={'ns': CANSAS_NS}) 150 158 151 159 for entry in entry_list: … … 196 204 197 205 # Look up instrument name 198 self._store_content('ns:SASinstrument/ns:name', dom, 'instrument', data_info) 206 self._store_content('ns:SASinstrument/ns:name', dom, 'instrument', 207 data_info) 199 208 200 209 # Notes … … 207 216 data_info.notes.append(note_value) 208 217 except: 209 err_mess = "cansas_reader.read: error processing entry notes\n %s" % sys.exc_value 218 err_mess = "cansas_reader.read: error processing" 219 err_mess += " entry notes\n %s" % sys.exc_value 210 220 self.errors.append(err_mess) 211 221 logging.error(err_mess) … … 225 235 dom, 'temperature', data_info.sample) 226 236 227 nodes = dom.xpath('ns:SASsample/ns:details', namespaces={'ns': CANSAS_NS}) 237 nodes = dom.xpath('ns:SASsample/ns:details', 238 namespaces={'ns': CANSAS_NS}) 228 239 for item in nodes: 229 240 try: … … 233 244 data_info.sample.details.append(detail_value) 234 245 except: 235 err_mess = "cansas_reader.read: error processing sample details\n %s" % sys.exc_value 246 err_mess = "cansas_reader.read: error processing " 247 err_mess += " sample details\n %s" % sys.exc_value 236 248 self.errors.append(err_mess) 237 249 logging.error(err_mess) … … 284 296 285 297 # Collimation info ################### 286 nodes = dom.xpath('ns:SASinstrument/ns:SAScollimation', namespaces={'ns': CANSAS_NS}) 298 nodes = dom.xpath('ns:SASinstrument/ns:SAScollimation', 299 namespaces={'ns': CANSAS_NS}) 287 300 for item in nodes: 288 301 collim = Collimation() … … 315 328 316 329 # Detector info ###################### 317 nodes = dom.xpath('ns:SASinstrument/ns:SASdetector', namespaces={'ns': CANSAS_NS}) 330 nodes = dom.xpath('ns:SASinstrument/ns:SASdetector', 331 namespaces={'ns': CANSAS_NS}) 318 332 for item in nodes: 319 333 … … 329 343 330 344 # Detector orientation (as a vector) 331 self._store_float('ns:orientation/ns:roll', item, 'orientation.x', detector) 332 self._store_float('ns:orientation/ns:pitch', item, 'orientation.y', detector) 333 self._store_float('ns:orientation/ns:yaw', item, 'orientation.z', detector) 345 self._store_float('ns:orientation/ns:roll', item, 'orientation.x', 346 detector) 347 self._store_float('ns:orientation/ns:pitch', item, 'orientation.y', 348 detector) 349 self._store_float('ns:orientation/ns:yaw', item, 'orientation.z', 350 detector) 334 351 335 352 # Beam center (as a vector) 336 self._store_float('ns:beam_center/ns:x', item, 'beam_center.x', detector) 337 self._store_float('ns:beam_center/ns:y', item, 'beam_center.y', detector) 338 self._store_float('ns:beam_center/ns:z', item, 'beam_center.z', detector) 353 self._store_float('ns:beam_center/ns:x', item, 'beam_center.x', 354 detector) 355 self._store_float('ns:beam_center/ns:y', item, 'beam_center.y', 356 detector) 357 self._store_float('ns:beam_center/ns:z', item, 'beam_center.z', 358 detector) 339 359 340 360 # Pixel size (as a vector) 341 self._store_float('ns:pixel_size/ns:x', item, 'pixel_size.x', detector) 342 self._store_float('ns:pixel_size/ns:y', item, 'pixel_size.y', detector) 343 self._store_float('ns:pixel_size/ns:z', item, 'pixel_size.z', detector) 361 self._store_float('ns:pixel_size/ns:x', item, 'pixel_size.x', 362 detector) 363 self._store_float('ns:pixel_size/ns:y', item, 'pixel_size.y', 364 detector) 365 self._store_float('ns:pixel_size/ns:z', item, 'pixel_size.z', 366 detector) 344 367 345 368 self._store_float('ns:slit_length', item, 'slit_length', detector) … … 365 388 process.term.append(term_attr) 366 389 except: 367 err_mess = "cansas_reader.read: error processing process term\n %s" % sys.exc_value 390 err_mess = "cansas_reader.read: error processing " 391 err_mess += " process term\n %s" % sys.exc_value 368 392 self.errors.append(err_mess) 369 393 logging.error(err_mess) 370 394 371 note_list = item.xpath('ns:SASprocessnote', namespaces={'ns': CANSAS_NS}) 395 note_list = item.xpath('ns:SASprocessnote', 396 namespaces={'ns': CANSAS_NS}) 372 397 for note in note_list: 373 398 if note.text is not None: … … 379 404 # Data info ###################### 380 405 nodes = dom.xpath('ns:SASdata', namespaces={'ns': CANSAS_NS}) 381 if len(nodes)>1: 382 raise RuntimeError, "CanSAS reader is not compatible with multiple SASdata entries" 406 if len(nodes) > 1: 407 msg = "CanSAS reader is not compatible with multiple" 408 msg += " SASdata entries" 409 raise RuntimeError, msg 383 410 384 411 nodes = dom.xpath('ns:SASdata/ns:Idata', namespaces={'ns': CANSAS_NS}) … … 403 430 _dxw = 0.0 404 431 405 if attr.has_key('unit') and attr['unit'].lower() != data_info.x_unit.lower(): 432 if attr.has_key('unit') and \ 433 attr['unit'].lower() != data_info.x_unit.lower(): 406 434 if has_converter==True: 407 435 try: … … 409 437 _x = data_conv_q(_x, units=data_info.x_unit) 410 438 except: 411 raise ValueError, "CanSAS reader: could not convert Q unit [%s]; expecting [%s]\n %s" \ 412 % (attr['unit'], data_info.x_unit, sys.exc_value) 439 msg = "CanSAS reader: could not convert " 440 msg += "Q unit [%s]; " 441 msg += "expecting [%s]\n %s" % (attr['unit'], 442 data_info.x_unit, sys.exc_value) 443 raise ValueError, msg 444 413 445 else: 414 raise ValueError, "CanSAS reader: unrecognized Q unit [%s]; expecting [%s]" \ 415 % (attr['unit'], data_info.x_unit) 446 msg = "CanSAS reader: unrecognized Q unit [%s]; " 447 msg += "expecting [%s]" % (attr['unit'], data_info.x_unit) 448 raise ValueError, msg 449 416 450 # Error in Q 417 if attr_d.has_key('unit') and attr_d['unit'].lower() != data_info.x_unit.lower(): 451 if attr_d.has_key('unit') and \ 452 attr_d['unit'].lower() != data_info.x_unit.lower(): 418 453 if has_converter==True: 419 454 try: … … 421 456 _dx = data_conv_q(_dx, units=data_info.x_unit) 422 457 except: 423 raise ValueError, "CanSAS reader: could not convert dQ unit [%s]; expecting [%s]\n %s" \ 424 % (attr['unit'], data_info.x_unit, sys.exc_value) 458 msg = "CanSAS reader: could not convert dQ unit [%s];" 459 msg += " expecting " 460 msg += "[%s]\n %s" % (attr['unit'], 461 data_info.x_unit, sys.exc_value) 462 raise ValueError, msg 463 425 464 else: 426 raise ValueError, "CanSAS reader: unrecognized dQ unit [%s]; expecting [%s]" \ 427 % (attr['unit'], data_info.x_unit) 465 msg = "CanSAS reader: unrecognized dQ unit [%s]; " 466 msg += "expecting [%s]" % (attr['unit'], data_info.x_unit) 467 raise ValueError, msg 468 428 469 # Slit length 429 if attr_l.has_key('unit') and attr_l['unit'].lower() != data_info.x_unit.lower(): 430 if has_converter==True: 470 if attr_l.has_key('unit') and \ 471 attr_l['unit'].lower() != data_info.x_unit.lower(): 472 if has_converter == True: 431 473 try: 432 474 data_conv_q = Converter(attr_l['unit']) 433 475 _dxl = data_conv_q(_dxl, units=data_info.x_unit) 434 476 except: 435 raise ValueError, "CanSAS reader: could not convert dQl unit [%s]; expecting [%s]\n %s" \ 436 % (attr['unit'], data_info.x_unit, sys.exc_value) 477 msg = "CanSAS reader: could not convert dQl unit [%s];" 478 msg += " expecting [%s]\n %s" % (attr['unit'], 479 data_info.x_unit, sys.exc_value) 480 raise ValueError, msg 481 437 482 else: 438 raise ValueError, "CanSAS reader: unrecognized dQl unit [%s]; expecting [%s]" \ 439 % (attr['unit'], data_info.x_unit) 483 msg = "CanSAS reader: unrecognized dQl unit [%s];" 484 msg += " expecting [%s]" % (attr['unit'], data_info.x_unit) 485 raise ValueError, msg 486 440 487 # Slit width 441 if attr_w.has_key('unit') and attr_w['unit'].lower() != data_info.x_unit.lower(): 442 if has_converter==True: 488 if attr_w.has_key('unit') and \ 489 attr_w['unit'].lower() != data_info.x_unit.lower(): 490 if has_converter == True: 443 491 try: 444 492 data_conv_q = Converter(attr_w['unit']) 445 493 _dxw = data_conv_q(_dxw, units=data_info.x_unit) 446 494 except: 447 raise ValueError, "CanSAS reader: could not convert dQw unit [%s]; expecting [%s]\n %s" \ 448 % (attr['unit'], data_info.x_unit, sys.exc_value) 495 msg = "CanSAS reader: could not convert dQw unit [%s];" 496 msg += " expecting [%s]\n %s" % (attr['unit'], 497 data_info.x_unit, sys.exc_value) 498 raise ValueError, msg 499 449 500 else: 450 raise ValueError, "CanSAS reader: unrecognized dQw unit [%s]; expecting [%s]" \451 452 501 msg = "CanSAS reader: unrecognized dQw unit [%s];" 502 msg += " expecting [%s]" % (attr['unit'], data_info.x_unit) 503 raise ValueError, msg 453 504 _y, attr = get_float('ns:I', item) 454 505 _dy, attr_d = get_float('ns:Idev', item) 455 506 if _dy == None: 456 507 _dy = 0.0 457 if attr.has_key('unit') and attr['unit'].lower() != data_info.y_unit.lower(): 508 if attr.has_key('unit') and \ 509 attr['unit'].lower() != data_info.y_unit.lower(): 458 510 if has_converter==True: 459 511 try: … … 461 513 _y = data_conv_i(_y, units=data_info.y_unit) 462 514 except: 463 raise ValueError, "CanSAS reader: could not convert I(q) unit [%s]; expecting [%s]\n %s" \ 464 % (attr['unit'], data_info.y_unit, sys.exc_value) 515 msg = "CanSAS reader: could not convert I(q) unit [%s];" 516 msg += " expecting [%s]\n %s" % (attr['unit'], 517 data_info.y_unit, sys.exc_value) 518 raise ValueError, msg 465 519 else: 466 raise ValueError, "CanSAS reader: unrecognized I(q) unit [%s]; expecting [%s]" \ 467 % (attr['unit'], data_info.y_unit) 468 if attr_d.has_key('unit') and attr_d['unit'].lower() != data_info.y_unit.lower(): 520 msg = "CanSAS reader: unrecognized I(q) unit [%s];" 521 msg += " expecting [%s]" % (attr['unit'], data_info.y_unit) 522 raise ValueError, msg 523 524 if attr_d.has_key('unit') and \ 525 attr_d['unit'].lower() != data_info.y_unit.lower(): 469 526 if has_converter==True: 470 527 try: … … 472 529 _dy = data_conv_i(_dy, units=data_info.y_unit) 473 530 except: 474 raise ValueError, "CanSAS reader: could not convert dI(q) unit [%s]; expecting [%s]\n %s" \ 475 % (attr_d['unit'], data_info.y_unit, sys.exc_value) 531 msg = "CanSAS reader: could not convert dI(q) unit " 532 msg += "[%s]; expecting [%s]\n %s" % (attr_d['unit'], 533 data_info.y_unit, sys.exc_value) 534 raise ValueError, msg 476 535 else: 477 raise ValueError, "CanSAS reader: unrecognized dI(q) unit [%s]; expecting [%s]" \ 478 % (attr_d['unit'], data_info.y_unit) 536 msg = "CanSAS reader: unrecognized dI(q) unit [%s]; " 537 msg += "expecting [%s]" % (attr_d['unit'], data_info.y_unit) 538 raise ValueError, msg 479 539 480 540 if _x is not None and _y is not None: … … 486 546 dxw = numpy.append(dxw, _dxw) 487 547 488 489 548 data_info.x = x 490 549 data_info.y = y … … 532 591 main_node.setAttribute("version", self.version) 533 592 main_node.setAttribute("xmlns", "cansas1d/%s" % self.version) 534 main_node.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance") 535 main_node.setAttribute("xsi:schemaLocation", "cansas1d/%s http://svn.smallangles.net/svn/canSAS/1dwg/trunk/cansas1d.xsd" % self.version) 593 main_node.setAttribute("xmlns:xsi", 594 "http://www.w3.org/2001/XMLSchema-instance") 595 main_node.setAttribute("xsi:schemaLocation", 596 "cansas1d/%s http://svn.smallangles.net/svn/canSAS/1dwg/trunk/cansas1d.xsd" % self.version) 536 597 537 598 doc.appendChild(main_node) … … 543 604 for item in datainfo.run: 544 605 runname = {} 545 if datainfo.run_name.has_key(item) and len(str(datainfo.run_name[item]))>1: 606 if datainfo.run_name.has_key(item) and \ 607 len(str(datainfo.run_name[item]))>1: 546 608 runname = {'name': datainfo.run_name[item] } 547 609 write_node(doc, entry_node, "Run", item, runname) … … 556 618 write_node(doc, pt, "Q", datainfo.x[i], {'unit':datainfo.x_unit}) 557 619 if len(datainfo.y)>=i: 558 write_node(doc, pt, "I", datainfo.y[i], {'unit':datainfo.y_unit}) 620 write_node(doc, pt, "I", datainfo.y[i], 621 {'unit':datainfo.y_unit}) 559 622 if datainfo.dx !=None and len(datainfo.dx)>=i: 560 write_node(doc, pt, "Qdev", datainfo.dx[i], {'unit':datainfo.x_unit}) 623 write_node(doc, pt, "Qdev", datainfo.dx[i], 624 {'unit':datainfo.x_unit}) 561 625 if datainfo.dy !=None and len(datainfo.dy)>=i: 562 write_node(doc, pt, "Idev", datainfo.dy[i], {'unit':datainfo.y_unit}) 626 write_node(doc, pt, "Idev", datainfo.dy[i], 627 {'unit':datainfo.y_unit}) 563 628 564 629 … … 569 634 entry_node.appendChild(sample) 570 635 write_node(doc, sample, "ID", str(datainfo.sample.ID)) 571 write_node(doc, sample, "thickness", datainfo.sample.thickness, {"unit":datainfo.sample.thickness_unit}) 636 write_node(doc, sample, "thickness", datainfo.sample.thickness, 637 {"unit":datainfo.sample.thickness_unit}) 572 638 write_node(doc, sample, "transmission", datainfo.sample.transmission) 573 write_node(doc, sample, "temperature", datainfo.sample.temperature, {"unit":datainfo.sample.temperature_unit}) 639 write_node(doc, sample, "temperature", datainfo.sample.temperature, 640 {"unit":datainfo.sample.temperature_unit}) 574 641 575 642 for item in datainfo.sample.details: … … 577 644 578 645 pos = doc.createElement("position") 579 written = write_node(doc, pos, "x", datainfo.sample.position.x, {"unit":datainfo.sample.position_unit}) 580 written = written | write_node(doc, pos, "y", datainfo.sample.position.y, {"unit":datainfo.sample.position_unit}) 581 written = written | write_node(doc, pos, "z", datainfo.sample.position.z, {"unit":datainfo.sample.position_unit}) 646 written = write_node(doc, pos, "x", datainfo.sample.position.x, 647 {"unit":datainfo.sample.position_unit}) 648 written = written | write_node(doc, pos, "y", 649 datainfo.sample.position.y, 650 {"unit":datainfo.sample.position_unit}) 651 written = written | write_node(doc, pos, "z", 652 datainfo.sample.position.z, 653 {"unit":datainfo.sample.position_unit}) 582 654 if written == True: 583 655 sample.appendChild(pos) 584 656 585 657 ori = doc.createElement("orientation") 586 written = write_node(doc, ori, "roll", datainfo.sample.orientation.x, {"unit":datainfo.sample.orientation_unit}) 587 written = written | write_node(doc, ori, "pitch", datainfo.sample.orientation.y, {"unit":datainfo.sample.orientation_unit}) 588 written = written | write_node(doc, ori, "yaw", datainfo.sample.orientation.z, {"unit":datainfo.sample.orientation_unit}) 658 written = write_node(doc, ori, "roll", 659 datainfo.sample.orientation.x, 660 {"unit":datainfo.sample.orientation_unit}) 661 written = written | write_node(doc, ori, "pitch", 662 datainfo.sample.orientation.y, 663 {"unit":datainfo.sample.orientation_unit}) 664 written = written | write_node(doc, ori, "yaw", 665 datainfo.sample.orientation.z, 666 {"unit":datainfo.sample.orientation_unit}) 589 667 if written == True: 590 668 sample.appendChild(ori) … … 607 685 if datainfo.source.beam_size_name is not None: 608 686 size.setAttribute("name", str(datainfo.source.beam_size_name)) 609 written = write_node(doc, size, "x", datainfo.source.beam_size.x, {"unit":datainfo.source.beam_size_unit}) 610 written = written | write_node(doc, size, "y", datainfo.source.beam_size.y, {"unit":datainfo.source.beam_size_unit}) 611 written = written | write_node(doc, size, "z", datainfo.source.beam_size.z, {"unit":datainfo.source.beam_size_unit}) 687 written = write_node(doc, size, "x", datainfo.source.beam_size.x, 688 {"unit":datainfo.source.beam_size_unit}) 689 written = written | write_node(doc, size, "y", 690 datainfo.source.beam_size.y, 691 {"unit":datainfo.source.beam_size_unit}) 692 written = written | write_node(doc, size, "z", 693 datainfo.source.beam_size.z, 694 {"unit":datainfo.source.beam_size_unit}) 612 695 if written == True: 613 696 source.appendChild(size) 614 697 615 write_node(doc, source, "wavelength", datainfo.source.wavelength, {"unit":datainfo.source.wavelength_unit}) 616 write_node(doc, source, "wavelength_min", datainfo.source.wavelength_min, {"unit":datainfo.source.wavelength_min_unit}) 617 write_node(doc, source, "wavelength_max", datainfo.source.wavelength_max, {"unit":datainfo.source.wavelength_max_unit}) 618 write_node(doc, source, "wavelength_spread", datainfo.source.wavelength_spread, {"unit":datainfo.source.wavelength_spread_unit}) 698 write_node(doc, source, "wavelength", 699 datainfo.source.wavelength, 700 {"unit":datainfo.source.wavelength_unit}) 701 write_node(doc, source, "wavelength_min", 702 datainfo.source.wavelength_min, 703 {"unit":datainfo.source.wavelength_min_unit}) 704 write_node(doc, source, "wavelength_max", 705 datainfo.source.wavelength_max, 706 {"unit":datainfo.source.wavelength_max_unit}) 707 write_node(doc, source, "wavelength_spread", 708 datainfo.source.wavelength_spread, 709 {"unit":datainfo.source.wavelength_spread_unit}) 619 710 620 711 # Collimation … … 625 716 instr.appendChild(coll) 626 717 627 write_node(doc, coll, "length", item.length, {"unit":item.length_unit}) 718 write_node(doc, coll, "length", item.length, 719 {"unit":item.length_unit}) 628 720 629 721 for apert in item.aperture: … … 635 727 coll.appendChild(ap) 636 728 637 write_node(doc, ap, "distance", apert.distance, {"unit":apert.distance_unit}) 729 write_node(doc, ap, "distance", apert.distance, 730 {"unit":apert.distance_unit}) 638 731 639 732 size = doc.createElement("size") 640 733 if apert.size_name is not None: 641 734 size.setAttribute("name", str(apert.size_name)) 642 written = write_node(doc, size, "x", apert.size.x, {"unit":apert.size_unit}) 643 written = written | write_node(doc, size, "y", apert.size.y, {"unit":apert.size_unit}) 644 written = written | write_node(doc, size, "z", apert.size.z, {"unit":apert.size_unit}) 735 written = write_node(doc, size, "x", apert.size.x, 736 {"unit":apert.size_unit}) 737 written = written | write_node(doc, size, "y", apert.size.y, 738 {"unit":apert.size_unit}) 739 written = written | write_node(doc, size, "z", apert.size.z, 740 {"unit":apert.size_unit}) 645 741 if written == True: 646 742 ap.appendChild(size) … … 650 746 det = doc.createElement("SASdetector") 651 747 written = write_node(doc, det, "name", item.name) 652 written = written | write_node(doc, det, "SDD", item.distance, {"unit":item.distance_unit}) 653 written = written | write_node(doc, det, "slit_length", item.slit_length, {"unit":item.slit_length_unit}) 748 written = written | write_node(doc, det, "SDD", item.distance, 749 {"unit":item.distance_unit}) 750 written = written | write_node(doc, det, "slit_length", 751 item.slit_length, 752 {"unit":item.slit_length_unit}) 654 753 if written == True: 655 754 instr.appendChild(det) 656 755 657 756 off = doc.createElement("offset") 658 written = write_node(doc, off, "x", item.offset.x, {"unit":item.offset_unit}) 659 written = written | write_node(doc, off, "y", item.offset.y, {"unit":item.offset_unit}) 660 written = written | write_node(doc, off, "z", item.offset.z, {"unit":item.offset_unit}) 757 written = write_node(doc, off, "x", item.offset.x, 758 {"unit":item.offset_unit}) 759 written = written | write_node(doc, off, "y", item.offset.y, 760 {"unit":item.offset_unit}) 761 written = written | write_node(doc, off, "z", item.offset.z, 762 {"unit":item.offset_unit}) 661 763 if written == True: 662 764 det.appendChild(off) 663 765 664 766 center = doc.createElement("beam_center") 665 written = write_node(doc, center, "x", item.beam_center.x, {"unit":item.beam_center_unit}) 666 written = written | write_node(doc, center, "y", item.beam_center.y, {"unit":item.beam_center_unit}) 667 written = written | write_node(doc, center, "z", item.beam_center.z, {"unit":item.beam_center_unit}) 767 written = write_node(doc, center, "x", item.beam_center.x, 768 {"unit":item.beam_center_unit}) 769 written = written | write_node(doc, center, "y", 770 item.beam_center.y, 771 {"unit":item.beam_center_unit}) 772 written = written | write_node(doc, center, "z", 773 item.beam_center.z, 774 {"unit":item.beam_center_unit}) 668 775 if written == True: 669 776 det.appendChild(center) 670 777 671 778 pix = doc.createElement("pixel_size") 672 written = write_node(doc, pix, "x", item.pixel_size.x, {"unit":item.pixel_size_unit}) 673 written = written | write_node(doc, pix, "y", item.pixel_size.y, {"unit":item.pixel_size_unit}) 674 written = written | write_node(doc, pix, "z", item.pixel_size.z, {"unit":item.pixel_size_unit}) 779 written = write_node(doc, pix, "x", item.pixel_size.x, 780 {"unit":item.pixel_size_unit}) 781 written = written | write_node(doc, pix, "y", item.pixel_size.y, 782 {"unit":item.pixel_size_unit}) 783 written = written | write_node(doc, pix, "z", item.pixel_size.z, 784 {"unit":item.pixel_size_unit}) 675 785 if written == True: 676 786 det.appendChild(pix) 677 787 678 788 ori = doc.createElement("orientation") 679 written = write_node(doc, ori, "roll", item.orientation.x, {"unit":item.orientation_unit}) 680 written = written | write_node(doc, ori, "pitch", item.orientation.y, {"unit":item.orientation_unit}) 681 written = written | write_node(doc, ori, "yaw", item.orientation.z, {"unit":item.orientation_unit}) 789 written = write_node(doc, ori, "roll", item.orientation.x, 790 {"unit":item.orientation_unit}) 791 written = written | write_node(doc, ori, "pitch", 792 item.orientation.y, 793 {"unit":item.orientation_unit}) 794 written = written | write_node(doc, ori, "yaw", 795 item.orientation.z, 796 {"unit":item.orientation_unit}) 682 797 if written == True: 683 798 det.appendChild(ori) … … 731 846 :param variable: name of the data member to store it in [string] 732 847 :param storage: data object that has the 'variable' data member 733 :param optional: if True, no exception will be raised if unit conversion can't be done 848 :param optional: if True, no exception will be raised 849 if unit conversion can't be done 734 850 735 851 :raise ValueError: raised when the units are not recognized … … 752 868 try: 753 869 conv = Converter(units) 754 exec "storage.%s = %g" % (variable, conv(value, units=local_unit)) 870 exec "storage.%s = %g" % (variable, 871 conv(value, units=local_unit)) 755 872 except: 756 err_mess = "CanSAS reader: could not convert %s unit [%s]; expecting [%s]\n %s" \ 873 err_mess = "CanSAS reader: could not convert" 874 err_mess += " %s unit [%s]; expecting [%s]\n %s" \ 757 875 % (variable, units, local_unit, sys.exc_value) 758 876 self.errors.append(err_mess) … … 762 880 raise ValueError, err_mess 763 881 else: 764 err_mess = "CanSAS reader: unrecognized %s unit [%s]; expecting [%s]" \ 765 % (variable, units, local_unit) 882 err_mess = "CanSAS reader: unrecognized %s unit [%s];" 883 err_mess += " expecting [%s]" % (variable, 884 units, local_unit) 766 885 self.errors.append(err_mess) 767 886 if optional: -
DataLoader/readers/danse_reader.py
r0997158f ra7a5886 18 18 import math 19 19 import os 20 import copy20 #import copy 21 21 import numpy 22 22 import logging … … 50 50 read_it = False 51 51 for item in self.ext: 52 if filename.lower().find(item) >=0:52 if filename.lower().find(item) >= 0: 53 53 read_it = True 54 54 55 55 if read_it: 56 56 try: 57 57 datafile = open(filename, 'r') 58 58 except : 59 raise RuntimeError,"danse_reader cannot open %s"%(filename) 60 61 59 raise RuntimeError,"danse_reader cannot open %s" % (filename) 60 62 61 # defaults 63 62 # wavelength in Angstrom … … 84 83 85 84 output.data = numpy.zeros([size_x,size_y]) 86 output.err_data = numpy.zeros([size_x, size_y])85 output.err_data = numpy.zeros([size_x, size_y]) 87 86 88 87 data_conv_q = None … … 102 101 while read_on: 103 102 line = datafile.readline() 104 if line.find("DATA:") >=0:103 if line.find("DATA:") >= 0: 105 104 read_on = False 106 105 break 107 106 toks = line.split(':') 108 if toks[0] =="FORMATVERSION":107 if toks[0] == "FORMATVERSION": 109 108 fversion = float(toks[1]) 110 if toks[0] =="WAVELENGTH":109 if toks[0] == "WAVELENGTH": 111 110 wavelength = float(toks[1]) 112 elif toks[0] =="DISTANCE":111 elif toks[0] == "DISTANCE": 113 112 distance = float(toks[1]) 114 elif toks[0] =="CENTER_X":113 elif toks[0] == "CENTER_X": 115 114 center_x = float(toks[1]) 116 elif toks[0] =="CENTER_Y":115 elif toks[0] == "CENTER_Y": 117 116 center_y = float(toks[1]) 118 elif toks[0] =="PIXELSIZE":117 elif toks[0] == "PIXELSIZE": 119 118 pixel = float(toks[1]) 120 elif toks[0] =="SIZE_X":119 elif toks[0] == "SIZE_X": 121 120 size_x = int(toks[1]) 122 elif toks[0] =="SIZE_Y":121 elif toks[0] == "SIZE_Y": 123 122 size_y = int(toks[1]) 124 123 … … 126 125 data = [] 127 126 error = [] 128 if fversion ==1.0:127 if fversion == 1.0: 129 128 data_str = datafile.readline() 130 129 data = data_str.split(' ') … … 133 132 while read_on: 134 133 data_str = datafile.readline() 135 if len(data_str) ==0:134 if len(data_str) == 0: 136 135 read_on = False 137 136 else: … … 146 145 error.append(err) 147 146 except: 148 logging.info("Skipping line:%s,%s" %( data_str,sys.exc_value)) 147 logging.info("Skipping line:%s,%s" %( data_str, 148 sys.exc_value)) 149 149 150 150 # Initialize … … 158 158 # Qx and Qy vectors 159 159 theta = pixel / distance / 100.0 160 stepq = 4.0 *math.pi/wavelength * math.sin(theta/2.0)160 stepq = 4.0 * math.pi/wavelength * math.sin(theta/2.0) 161 161 for i_x in range(size_x): 162 theta = (i_x -center_x+1)*pixel / distance / 100.0163 qx = 4.0 *math.pi/wavelength * math.sin(theta/2.0)162 theta = (i_x - center_x + 1) * pixel / distance / 100.0 163 qx = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 164 164 165 165 if has_converter == True and output.Q_unit != '1/A': … … 167 167 168 168 x_vals.append(qx) 169 if xmin ==None or qx<xmin:169 if xmin == None or qx < xmin: 170 170 xmin = qx 171 if xmax ==None or qx>xmax:171 if xmax == None or qx > xmax: 172 172 xmax = qx 173 173 … … 175 175 ymax = None 176 176 for i_y in range(size_y): 177 theta = (i_y -center_y+1)*pixel / distance / 100.0178 qy = 4.0 *math.pi/wavelength * math.sin(theta/2.0)177 theta = (i_y - center_y + 1) * pixel / distance / 100.0 178 qy = 4.0 * math.pi/wavelength * math.sin(theta/2.0) 179 179 180 180 if has_converter == True and output.Q_unit != '1/A': … … 182 182 183 183 y_vals.append(qy) 184 if ymin ==None or qy<ymin:184 if ymin == None or qy < ymin: 185 185 ymin = qy 186 if ymax ==None or qy>ymax:186 if ymax == None or qy > ymax: 187 187 ymax = qy 188 188 … … 198 198 # For version 1.0, the data were still 199 199 # stored as strings at this point. 200 logging.info("Skipping entry (v1.0):%s,%s" %(str(data[i_pt]), sys.exc_value)) 200 msg = "Skipping entry (v1.0):%s,%s" %(str(data[i_pt]), 201 sys.exc_value) 202 logging.info(msg) 201 203 202 204 # Get bin number 203 if math.fmod(i_pt, size_x) ==0:205 if math.fmod(i_pt, size_x) == 0: 204 206 i_x = 0 205 207 i_y += 1 … … 213 215 #output.err_data[size_y-1-i_y][i_x] = error[i_pt] 214 216 215 217 216 218 # Store all data 217 219 # Store wavelength 218 if has_converter ==True and output.source.wavelength_unit != 'A':220 if has_converter == True and output.source.wavelength_unit != 'A': 219 221 conv = Converter('A') 220 wavelength = conv(wavelength, units=output.source.wavelength_unit) 222 wavelength = conv(wavelength, 223 units=output.source.wavelength_unit) 221 224 output.source.wavelength = wavelength 222 225 223 226 # Store distance 224 if has_converter ==True and detector.distance_unit != 'm':227 if has_converter == True and detector.distance_unit != 'm': 225 228 conv = Converter('m') 226 229 distance = conv(distance, units=detector.distance_unit) … … 228 231 229 232 # Store pixel size 230 if has_converter ==True and detector.pixel_size_unit != 'mm':233 if has_converter == True and detector.pixel_size_unit != 'mm': 231 234 conv = Converter('mm') 232 235 pixel = conv(pixel, units=detector.pixel_size_unit) … … 239 242 240 243 # Store limits of the image (2D array) 241 xmin =xmin-stepq/2.0242 xmax =xmax+stepq/2.0243 ymin =ymin-stepq/2.0244 ymax =ymax+stepq/2.0244 xmin = xmin - stepq / 2.0 245 xmax = xmax + stepq / 2.0 246 ymin = ymin - stepq /2.0 247 ymax = ymax + stepq / 2.0 245 248 246 249 if has_converter == True and output.Q_unit != '1/A': … … 271 274 output.zaxis("\\rm{Intensity}","cm^{-1}") 272 275 273 if not fversion>=1.0: 274 raise ValueError,"Danse_reader can't read this file %s"%filename 275 else: 276 logging.info("Danse_reader Reading %s \n"%filename) 276 if not fversion >= 1.0: 277 msg = "Danse_reader can't read this file %s" % filename 278 raise ValueError, msg 279 else: 280 logging.info("Danse_reader Reading %s \n" % filename) 277 281 278 282 # Store loading process information -
DataLoader/readers/hfir1d_reader.py
r0997158f ra7a5886 28 28 type = ["HFIR 1D files (*.d1d)|*.d1d"] 29 29 ## List of allowed extensions 30 ext =['.d1d']30 ext = ['.d1d'] 31 31 32 32 def read(self, path): … … 87 87 _dy = data_conv_i(_dy, units=output.y_unit) 88 88 89 90 89 x = numpy.append(x, _x) 91 90 y = numpy.append(y, _y) … … 99 98 # Sanity check 100 99 if not len(y) == len(dy): 101 raise RuntimeError, "hfir1d_reader: y and dy have different length" 100 msg = "hfir1d_reader: y and dy have different length" 101 raise RuntimeError, msg 102 102 if not len(x) == len(dx): 103 raise RuntimeError, "hfir1d_reader: x and dx have different length" 103 msg = "hfir1d_reader: x and dx have different length" 104 raise RuntimeError, msg 104 105 105 106 # If the data length is zero, consider this as 106 107 # though we were not able to read the file. 107 if len(x) ==0:108 if len(x) == 0: 108 109 raise RuntimeError, "hfir1d_reader: could not load file" 109 110 -
DataLoader/readers/red2d_reader.py
rd2539aa ra7a5886 13 13 14 14 15 import os, sys 15 import os 16 #import sys 16 17 import numpy 17 import math, logging 18 import math 19 #import logging 18 20 from DataLoader.data_info import Data2D, Detector 19 21 … … 121 123 wavelength = float(line_toks[1]) 122 124 # Units 123 if has_converter==True and output.source.wavelength_unit != 'A': 125 if has_converter == True and \ 126 output.source.wavelength_unit != 'A': 124 127 conv = Converter('A') 125 wavelength = conv(wavelength, units=output.source.wavelength_unit) 128 wavelength = conv(wavelength, 129 units=output.source.wavelength_unit) 126 130 except: 127 131 #Not required … … 131 135 distance = float(line_toks[3]) 132 136 # Units 133 if has_converter ==True and detector.distance_unit != 'm':137 if has_converter == True and detector.distance_unit != 'm': 134 138 conv = Converter('m') 135 139 distance = conv(distance, units=detector.distance_unit) … … 145 149 pass 146 150 147 if line.count("LAMBDA") >0:151 if line.count("LAMBDA") > 0: 148 152 isInfo = True 149 153 … … 156 160 center_y = float(line_toks[1]) 157 161 158 if line.count("BCENT") >0:162 if line.count("BCENT") > 0: 159 163 isCenter = True 160 164 161 165 # Find data start 162 if line.count("Data columns") or line.count("ASCII data") >0:166 if line.count("Data columns") or line.count("ASCII data") > 0: 163 167 dataStarted = True 164 168 continue … … 173 177 col_num = len(line_toks) 174 178 break 175 176 177 179 # Make numpy array to remove header lines using index 178 180 lines_array = numpy.array(lines) … … 182 184 183 185 # get the data lines 184 data_lines = lines_array[lines_index >=(line_num-1)]186 data_lines = lines_array[lines_index >= (line_num - 1)] 185 187 # Now we get the total number of rows (i.e., # of data points) 186 188 row_num = len(data_lines) … … 190 192 data_list = data_list.split() 191 193 192 # Check if the size is consistent with data, otherwise try the tab(\t) separator 193 # (this may be removed once get the confidence the former working all cases). 194 # Check if the size is consistent with data, otherwise 195 #try the tab(\t) separator 196 # (this may be removed once get the confidence 197 #the former working all cases). 194 198 if len(data_list) != (len(data_lines)) * col_num: 195 199 data_list = "\t".join(data_lines.tolist()) … … 202 206 # numpy array form 203 207 data_array = numpy.array(data_list1) 204 # Redimesion based on the row_num and col_num, otherwise raise an error. 208 # Redimesion based on the row_num and col_num, 209 #otherwise raise an error. 205 210 try: 206 data_point = data_array.reshape(row_num, col_num).transpose()211 data_point = data_array.reshape(row_num, col_num).transpose() 207 212 except: 208 raise ValueError, "red2d_reader: Can't read this file: Not a proper file format" 213 msg = "red2d_reader: Can't read this file: Not a proper file format" 214 raise ValueError, msg 209 215 210 216 ## Get the all data: Let's HARDcoding; Todo find better way … … 218 224 qy_data = data_point[1] 219 225 data = data_point[2] 220 if col_num > 3: qz_data = data_point[3]221 if col_num > 4: dqx_data = data_point[4]222 if col_num > 5: dqy_data = data_point[5]223 if col_num > 6: mask[data_point[6]<1] = False226 if col_num > 3: qz_data = data_point[3] 227 if col_num > 4: dqx_data = data_point[4] 228 if col_num > 5: dqy_data = data_point[5] 229 if col_num > 6: mask[data_point[6] < 1] = False 224 230 q_data = numpy.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 225 231 … … 291 297 # optional data: if all of dq data == 0, do not pass to output 292 298 if len(dqx_data) == len(qx_data) and dqx_data.any()!=0: 293 # if no dqx_data, do not pass dqy_data.(1 axis dq is not supported yet). 299 # if no dqx_data, do not pass dqy_data. 300 #(1 axis dq is not supported yet). 294 301 if len(dqy_data) == len(qy_data) and dqy_data.any()!=0: 295 302 output.dqx_data = dqx_data -
DataLoader/readers/tiff_reader.py
r0997158f ra7a5886 41 41 import Image 42 42 except: 43 raise RuntimeError, "tiff_reader: could not load file. Missing Image module." 43 msg = "tiff_reader: could not load file. Missing Image module." 44 raise RuntimeError, msg 44 45 45 46 # Instantiate data object
Note: See TracChangeset
for help on using the changeset viewer.