Changeset 3d6c010 in sasview for src/sas/sascalc/dataloader
- Timestamp:
- Apr 5, 2017 5:40:56 AM (8 years ago)
- Children:
- a2573fc
- Parents:
- d619341 (diff), a2e980b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Location:
- src/sas/sascalc/dataloader
- Files:
-
- 4 added
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/data_info.py
r345e7e4 r9a5097c 23 23 #from sas.guitools.plottables import Data1D as plottable_1D 24 24 from sas.sascalc.data_util.uncertainty import Uncertainty 25 import numpy 25 import numpy as np 26 26 import math 27 28 class plottable_sesans1D(object):29 """30 SESANS is a place holder for 1D SESANS plottables.31 32 #TODO: This was directly copied from the plottables_1D. Modified Somewhat.33 #Class has been updated.34 """35 # The presence of these should be mutually36 # exclusive with the presence of Qdev (dx)37 x = None38 y = None39 lam = None40 dx = None41 dy = None42 dlam = None43 ## Slit smearing length44 dxl = None45 ## Slit smearing width46 dxw = None47 48 # Units49 _xaxis = ''50 _xunit = ''51 _yaxis = ''52 _yunit = ''53 54 def __init__(self, x, y, lam, dx=None, dy=None, dlam=None):55 # print "SESANS plottable working"56 self.x = numpy.asarray(x)57 self.y = numpy.asarray(y)58 self.lam = numpy.asarray(lam)59 if dx is not None:60 self.dx = numpy.asarray(dx)61 if dy is not None:62 self.dy = numpy.asarray(dy)63 if dlam is not None:64 self.dlam = numpy.asarray(dlam)65 66 def xaxis(self, label, unit):67 """68 set the x axis label and unit69 """70 self._xaxis = label71 self._xunit = unit72 73 def yaxis(self, label, unit):74 """75 set the y axis label and unit76 """77 self._yaxis = label78 self._yunit = unit79 80 27 81 28 class plottable_1D(object): … … 93 40 ## Slit smearing width 94 41 dxw = None 42 ## SESANS specific params (wavelengths for spin echo length calculation) 43 lam = None 44 dlam = None 95 45 96 46 # Units … … 100 50 _yunit = '' 101 51 102 def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None ):103 self.x = n umpy.asarray(x)104 self.y = n umpy.asarray(y)52 def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None): 53 self.x = np.asarray(x) 54 self.y = np.asarray(y) 105 55 if dx is not None: 106 self.dx = n umpy.asarray(dx)56 self.dx = np.asarray(dx) 107 57 if dy is not None: 108 self.dy = n umpy.asarray(dy)58 self.dy = np.asarray(dy) 109 59 if dxl is not None: 110 self.dxl = n umpy.asarray(dxl)60 self.dxl = np.asarray(dxl) 111 61 if dxw is not None: 112 self.dxw = numpy.asarray(dxw) 62 self.dxw = np.asarray(dxw) 63 if lam is not None: 64 self.lam = np.asarray(lam) 65 if dlam is not None: 66 self.dlam = np.asarray(dlam) 113 67 114 68 def xaxis(self, label, unit): … … 155 109 qy_data=None, q_data=None, mask=None, 156 110 dqx_data=None, dqy_data=None): 157 self.data = n umpy.asarray(data)158 self.qx_data = n umpy.asarray(qx_data)159 self.qy_data = n umpy.asarray(qy_data)160 self.q_data = n umpy.asarray(q_data)161 self.mask = n umpy.asarray(mask)162 self.err_data = n umpy.asarray(err_data)111 self.data = np.asarray(data) 112 self.qx_data = np.asarray(qx_data) 113 self.qy_data = np.asarray(qy_data) 114 self.q_data = np.asarray(q_data) 115 self.mask = np.asarray(mask) 116 self.err_data = np.asarray(err_data) 163 117 if dqx_data is not None: 164 self.dqx_data = n umpy.asarray(dqx_data)118 self.dqx_data = np.asarray(dqx_data) 165 119 if dqy_data is not None: 166 self.dqy_data = n umpy.asarray(dqy_data)120 self.dqy_data = np.asarray(dqy_data) 167 121 168 122 def xaxis(self, label, unit): … … 398 352 ## Details 399 353 details = None 354 ## SESANS zacceptance 355 zacceptance = None 400 356 401 357 def __init__(self): … … 535 491 ## Loading errors 536 492 errors = None 493 ## SESANS data check 494 isSesans = None 495 537 496 538 497 def __init__(self): … … 567 526 ## Loading errors 568 527 self.errors = [] 528 ## SESANS data check 529 self.isSesans = False 569 530 570 531 def append_empty_process(self): … … 586 547 _str += "Title: %s\n" % self.title 587 548 _str += "Run: %s\n" % str(self.run) 549 _str += "SESANS: %s\n" % str(self.isSesans) 588 550 _str += "Instrument: %s\n" % str(self.instrument) 589 551 _str += "%s\n" % str(self.sample) … … 736 698 return self._perform_union(other) 737 699 738 class SESANSData1D(plottable_sesans1D, DataInfo): 739 """ 740 SESANS 1D data class 741 """ 742 x_unit = 'nm' 743 y_unit = 'pol' 744 745 def __init__(self, x=None, y=None, lam=None, dx=None, dy=None, dlam=None): 700 class Data1D(plottable_1D, DataInfo): 701 """ 702 1D data class 703 """ 704 def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=None): 746 705 DataInfo.__init__(self) 747 plottable_sesans1D.__init__(self, x, y, lam, dx, dy, dlam) 706 plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam) 707 self.isSesans = isSesans 708 try: 709 if self.isSesans: # the data is SESANS 710 self.x_unit = 'A' 711 self.y_unit = 'pol' 712 elif not self.isSesans: # the data is SANS 713 self.x_unit = '1/A' 714 self.y_unit = '1/cm' 715 except: # the data is not recognized/supported, and the user is notified 716 raise(TypeError, 'data not recognized, check documentation for supported 1D data formats') 748 717 749 718 def __str__(self): … … 759 728 return _str 760 729 730 def is_slit_smeared(self): 731 """ 732 Check whether the data has slit smearing information 733 :return: True is slit smearing info is present, False otherwise 734 """ 735 def _check(v): 736 if (v.__class__ == list or v.__class__ == np.ndarray) \ 737 and len(v) > 0 and min(v) > 0: 738 return True 739 return False 740 return _check(self.dxl) or _check(self.dxw) 741 761 742 def clone_without_data(self, length=0, clone=None): 762 743 """ … … 769 750 """ 770 751 from copy import deepcopy 752 771 753 if clone is None or not issubclass(clone.__class__, Data1D): 772 x = numpy.zeros(length) 773 dx = numpy.zeros(length) 774 y = numpy.zeros(length) 775 dy = numpy.zeros(length) 776 clone = Data1D(x, y, dx=dx, dy=dy) 754 x = np.zeros(length) 755 dx = np.zeros(length) 756 y = np.zeros(length) 757 dy = np.zeros(length) 758 lam = np.zeros(length) 759 dlam = np.zeros(length) 760 clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam) 777 761 778 762 clone.title = self.title … … 792 776 return clone 793 777 794 class Data1D(plottable_1D, DataInfo):795 """796 1D data class797 """798 x_unit = '1/A'799 y_unit = '1/cm'800 801 def __init__(self, x, y, dx=None, dy=None):802 DataInfo.__init__(self)803 plottable_1D.__init__(self, x, y, dx, dy)804 805 def __str__(self):806 """807 Nice printout808 """809 _str = "%s\n" % DataInfo.__str__(self)810 _str += "Data:\n"811 _str += " Type: %s\n" % self.__class__.__name__812 _str += " X-axis: %s\t[%s]\n" % (self._xaxis, self._xunit)813 _str += " Y-axis: %s\t[%s]\n" % (self._yaxis, self._yunit)814 _str += " Length: %g\n" % len(self.x)815 return _str816 817 def is_slit_smeared(self):818 """819 Check whether the data has slit smearing information820 :return: True is slit smearing info is present, False otherwise821 """822 def _check(v):823 if (v.__class__ == list or v.__class__ == numpy.ndarray) \824 and len(v) > 0 and min(v) > 0:825 return True826 return False827 return _check(self.dxl) or _check(self.dxw)828 829 def clone_without_data(self, length=0, clone=None):830 """831 Clone the current object, without copying the data (which832 will be filled out by a subsequent operation).833 The data arrays will be initialized to zero.834 835 :param length: length of the data array to be initialized836 :param clone: if provided, the data will be copied to clone837 """838 from copy import deepcopy839 840 if clone is None or not issubclass(clone.__class__, Data1D):841 x = numpy.zeros(length)842 dx = numpy.zeros(length)843 y = numpy.zeros(length)844 dy = numpy.zeros(length)845 clone = Data1D(x, y, dx=dx, dy=dy)846 847 clone.title = self.title848 clone.run = self.run849 clone.filename = self.filename850 clone.instrument = self.instrument851 clone.notes = deepcopy(self.notes)852 clone.process = deepcopy(self.process)853 clone.detector = deepcopy(self.detector)854 clone.sample = deepcopy(self.sample)855 clone.source = deepcopy(self.source)856 clone.collimation = deepcopy(self.collimation)857 clone.trans_spectrum = deepcopy(self.trans_spectrum)858 clone.meta_data = deepcopy(self.meta_data)859 clone.errors = deepcopy(self.errors)860 861 return clone862 863 778 def _validity_check(self, other): 864 779 """ … … 891 806 dy_other = other.dy 892 807 if other.dy == None or (len(other.dy) != len(other.y)): 893 dy_other = n umpy.zeros(len(other.y))808 dy_other = np.zeros(len(other.y)) 894 809 895 810 # Check that we have errors, otherwise create zero vector 896 811 dy = self.dy 897 812 if self.dy == None or (len(self.dy) != len(self.y)): 898 dy = n umpy.zeros(len(self.y))813 dy = np.zeros(len(self.y)) 899 814 900 815 return dy, dy_other … … 909 824 result.dxw = None 910 825 else: 911 result.dxw = n umpy.zeros(len(self.x))826 result.dxw = np.zeros(len(self.x)) 912 827 if self.dxl == None: 913 828 result.dxl = None 914 829 else: 915 result.dxl = n umpy.zeros(len(self.x))830 result.dxl = np.zeros(len(self.x)) 916 831 917 832 for i in range(len(self.x)): … … 971 886 result.dy = None 972 887 else: 973 result.dy = n umpy.zeros(len(self.x) + len(other.x))888 result.dy = np.zeros(len(self.x) + len(other.x)) 974 889 if self.dx == None or other.dx is None: 975 890 result.dx = None 976 891 else: 977 result.dx = n umpy.zeros(len(self.x) + len(other.x))892 result.dx = np.zeros(len(self.x) + len(other.x)) 978 893 if self.dxw == None or other.dxw is None: 979 894 result.dxw = None 980 895 else: 981 result.dxw = n umpy.zeros(len(self.x) + len(other.x))896 result.dxw = np.zeros(len(self.x) + len(other.x)) 982 897 if self.dxl == None or other.dxl is None: 983 898 result.dxl = None 984 899 else: 985 result.dxl = n umpy.zeros(len(self.x) + len(other.x))986 987 result.x = n umpy.append(self.x, other.x)900 result.dxl = np.zeros(len(self.x) + len(other.x)) 901 902 result.x = np.append(self.x, other.x) 988 903 #argsorting 989 ind = n umpy.argsort(result.x)904 ind = np.argsort(result.x) 990 905 result.x = result.x[ind] 991 result.y = n umpy.append(self.y, other.y)906 result.y = np.append(self.y, other.y) 992 907 result.y = result.y[ind] 993 908 if result.dy != None: 994 result.dy = n umpy.append(self.dy, other.dy)909 result.dy = np.append(self.dy, other.dy) 995 910 result.dy = result.dy[ind] 996 911 if result.dx is not None: 997 result.dx = n umpy.append(self.dx, other.dx)912 result.dx = np.append(self.dx, other.dx) 998 913 result.dx = result.dx[ind] 999 914 if result.dxw is not None: 1000 result.dxw = n umpy.append(self.dxw, other.dxw)915 result.dxw = np.append(self.dxw, other.dxw) 1001 916 result.dxw = result.dxw[ind] 1002 917 if result.dxl is not None: 1003 result.dxl = n umpy.append(self.dxl, other.dxl)918 result.dxl = np.append(self.dxl, other.dxl) 1004 919 result.dxl = result.dxl[ind] 1005 920 return result … … 1018 933 ## Vector of Q-values at the center of each bin in y 1019 934 y_bins = None 935 ## No 2D SESANS data as of yet. Always set it to False 936 isSesans = False 1020 937 1021 938 def __init__(self, data=None, err_data=None, qx_data=None, 1022 939 qy_data=None, q_data=None, mask=None, 1023 940 dqx_data=None, dqy_data=None): 1024 self.y_bins = []1025 self.x_bins = []1026 941 DataInfo.__init__(self) 1027 942 plottable_2D.__init__(self, data, err_data, qx_data, 1028 943 qy_data, q_data, mask, dqx_data, dqy_data) 944 self.y_bins = [] 945 self.x_bins = [] 946 1029 947 if len(self.detector) > 0: 1030 948 raise RuntimeError, "Data2D: Detector bank already filled at init" … … 1052 970 1053 971 if clone is None or not issubclass(clone.__class__, Data2D): 1054 data = n umpy.zeros(length)1055 err_data = n umpy.zeros(length)1056 qx_data = n umpy.zeros(length)1057 qy_data = n umpy.zeros(length)1058 q_data = n umpy.zeros(length)1059 mask = n umpy.zeros(length)972 data = np.zeros(length) 973 err_data = np.zeros(length) 974 qx_data = np.zeros(length) 975 qy_data = np.zeros(length) 976 q_data = np.zeros(length) 977 mask = np.zeros(length) 1060 978 dqx_data = None 1061 979 dqy_data = None … … 1113 1031 if other.err_data == None or \ 1114 1032 (len(other.err_data) != len(other.data)): 1115 err_other = n umpy.zeros(len(other.data))1033 err_other = np.zeros(len(other.data)) 1116 1034 1117 1035 # Check that we have errors, otherwise create zero vector … … 1119 1037 if self.err_data == None or \ 1120 1038 (len(self.err_data) != len(self.data)): 1121 err = n umpy.zeros(len(other.data))1039 err = np.zeros(len(other.data)) 1122 1040 return err, err_other 1123 1041 … … 1131 1049 # First, check the data compatibility 1132 1050 dy, dy_other = self._validity_check(other) 1133 result = self.clone_without_data(n umpy.size(self.data))1051 result = self.clone_without_data(np.size(self.data)) 1134 1052 if self.dqx_data == None or self.dqy_data == None: 1135 1053 result.dqx_data = None 1136 1054 result.dqy_data = None 1137 1055 else: 1138 result.dqx_data = n umpy.zeros(len(self.data))1139 result.dqy_data = n umpy.zeros(len(self.data))1140 for i in range(n umpy.size(self.data)):1056 result.dqx_data = np.zeros(len(self.data)) 1057 result.dqy_data = np.zeros(len(self.data)) 1058 for i in range(np.size(self.data)): 1141 1059 result.data[i] = self.data[i] 1142 1060 if self.err_data is not None and \ 1143 numpy.size(self.data) == numpy.size(self.err_data):1061 np.size(self.data) == np.size(self.err_data): 1144 1062 result.err_data[i] = self.err_data[i] 1145 1063 if self.dqx_data is not None: … … 1200 1118 # First, check the data compatibility 1201 1119 self._validity_check_union(other) 1202 result = self.clone_without_data(n umpy.size(self.data) + \1203 n umpy.size(other.data))1120 result = self.clone_without_data(np.size(self.data) + \ 1121 np.size(other.data)) 1204 1122 result.xmin = self.xmin 1205 1123 result.xmax = self.xmax … … 1211 1129 result.dqy_data = None 1212 1130 else: 1213 result.dqx_data = n umpy.zeros(len(self.data) + \1214 numpy.size(other.data))1215 result.dqy_data = n umpy.zeros(len(self.data) + \1216 numpy.size(other.data))1217 1218 result.data = n umpy.append(self.data, other.data)1219 result.qx_data = n umpy.append(self.qx_data, other.qx_data)1220 result.qy_data = n umpy.append(self.qy_data, other.qy_data)1221 result.q_data = n umpy.append(self.q_data, other.q_data)1222 result.mask = n umpy.append(self.mask, other.mask)1131 result.dqx_data = np.zeros(len(self.data) + \ 1132 np.size(other.data)) 1133 result.dqy_data = np.zeros(len(self.data) + \ 1134 np.size(other.data)) 1135 1136 result.data = np.append(self.data, other.data) 1137 result.qx_data = np.append(self.qx_data, other.qx_data) 1138 result.qy_data = np.append(self.qy_data, other.qy_data) 1139 result.q_data = np.append(self.q_data, other.q_data) 1140 result.mask = np.append(self.mask, other.mask) 1223 1141 if result.err_data is not None: 1224 result.err_data = n umpy.append(self.err_data, other.err_data)1142 result.err_data = np.append(self.err_data, other.err_data) 1225 1143 if self.dqx_data is not None: 1226 result.dqx_data = n umpy.append(self.dqx_data, other.dqx_data)1144 result.dqx_data = np.append(self.dqx_data, other.dqx_data) 1227 1145 if self.dqy_data is not None: 1228 result.dqy_data = n umpy.append(self.dqy_data, other.dqy_data)1146 result.dqy_data = np.append(self.dqy_data, other.dqy_data) 1229 1147 1230 1148 return result … … 1265 1183 final_dataset.xmin = data.xmin 1266 1184 final_dataset.ymin = data.ymin 1185 final_dataset.isSesans = datainfo.isSesans 1267 1186 final_dataset.title = datainfo.title 1268 1187 final_dataset.run = datainfo.run -
src/sas/sascalc/dataloader/manipulations.py
rb2b36932 rdd11014 80 80 81 81 """ 82 if data2d.data == None or data2d.x_bins == None or data2d.y_bins ==None:82 if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: 83 83 raise ValueError, "Can't convert this data: data=None..." 84 84 new_x = numpy.tile(data2d.x_bins, (len(data2d.y_bins), 1)) … … 90 90 qy_data = new_y.flatten() 91 91 q_data = numpy.sqrt(qx_data * qx_data + qy_data * qy_data) 92 if data2d.err_data ==None or numpy.any(data2d.err_data <= 0):92 if data2d.err_data is None or numpy.any(data2d.err_data <= 0): 93 93 new_err_data = numpy.sqrt(numpy.abs(new_data)) 94 94 else: -
src/sas/sascalc/dataloader/readers/IgorReader.py
rb699768 rdd11014 13 13 ############################################################################# 14 14 import os 15 import numpy 16 import math 17 #import logging 15 18 16 from sas.sascalc.dataloader.data_info import Data2D 19 17 from sas.sascalc.dataloader.data_info import Detector 20 18 from sas.sascalc.dataloader.manipulations import reader2D_converter 19 import numpy as np 21 20 22 21 # Look for unit converter … … 40 39 """ Read file """ 41 40 if not os.path.isfile(filename): 42 raise ValueError, \ 43 "Specified file %s is not a regular file" % filename 44 45 # Read file 46 f = open(filename, 'r') 47 buf = f.read() 48 49 # Instantiate data object 41 raise ValueError("Specified file %s is not a regular " 42 "file" % filename) 43 50 44 output = Data2D() 45 51 46 output.filename = os.path.basename(filename) 52 47 detector = Detector() 53 if len(output.detector) > 0:54 print str(output.detector[0])48 if len(output.detector): 49 print(str(output.detector[0])) 55 50 output.detector.append(detector) 56 57 # Get content 58 dataStarted = False 59 60 lines = buf.split('\n') 61 itot = 0 62 x = [] 63 y = [] 64 65 ncounts = 0 66 67 xmin = None 68 xmax = None 69 ymin = None 70 ymax = None 71 72 i_x = 0 73 i_y = -1 74 i_tot_row = 0 75 76 isInfo = False 77 isCenter = False 78 79 data_conv_q = None 80 data_conv_i = None 81 82 if has_converter == True and output.Q_unit != '1/A': 51 52 data_conv_q = data_conv_i = None 53 54 if has_converter and output.Q_unit != '1/A': 83 55 data_conv_q = Converter('1/A') 84 56 # Test it 85 57 data_conv_q(1.0, output.Q_unit) 86 58 87 if has_converter == Trueand output.I_unit != '1/cm':59 if has_converter and output.I_unit != '1/cm': 88 60 data_conv_i = Converter('1/cm') 89 61 # Test it 90 62 data_conv_i(1.0, output.I_unit) 91 92 for line in lines: 93 94 # Find setup info line 95 if isInfo: 96 isInfo = False 97 line_toks = line.split() 98 # Wavelength in Angstrom 99 try: 100 wavelength = float(line_toks[1]) 101 except: 102 msg = "IgorReader: can't read this file, missing wavelength" 103 raise ValueError, msg 104 105 #Find # of bins in a row assuming the detector is square. 106 if dataStarted == True: 107 try: 108 value = float(line) 109 except: 110 # Found a non-float entry, skip it 111 continue 112 113 # Get total bin number 114 115 i_tot_row += 1 116 i_tot_row = math.ceil(math.sqrt(i_tot_row)) - 1 117 #print "i_tot", i_tot_row 118 size_x = i_tot_row # 192#128 119 size_y = i_tot_row # 192#128 120 output.data = numpy.zeros([size_x, size_y]) 121 output.err_data = numpy.zeros([size_x, size_y]) 122 123 #Read Header and 2D data 124 for line in lines: 125 # Find setup info line 126 if isInfo: 127 isInfo = False 128 line_toks = line.split() 129 # Wavelength in Angstrom 130 try: 131 wavelength = float(line_toks[1]) 132 except: 133 msg = "IgorReader: can't read this file, missing wavelength" 134 raise ValueError, msg 135 # Distance in meters 136 try: 137 distance = float(line_toks[3]) 138 except: 139 msg = "IgorReader: can't read this file, missing distance" 140 raise ValueError, msg 141 142 # Distance in meters 143 try: 144 transmission = float(line_toks[4]) 145 except: 146 msg = "IgorReader: can't read this file, " 147 msg += "missing transmission" 148 raise ValueError, msg 149 150 if line.count("LAMBDA") > 0: 151 isInfo = True 152 153 # Find center info line 154 if isCenter: 155 isCenter = False 156 line_toks = line.split() 157 158 # Center in bin number: Must substrate 1 because 159 #the index starts from 1 160 center_x = float(line_toks[0]) - 1 161 center_y = float(line_toks[1]) - 1 162 163 if line.count("BCENT") > 0: 164 isCenter = True 165 166 # Find data start 167 if line.count("***")>0: 168 dataStarted = True 169 170 # Check that we have all the info 171 if wavelength == None \ 172 or distance == None \ 173 or center_x == None \ 174 or center_y == None: 175 msg = "IgorReader:Missing information in data file" 176 raise ValueError, msg 177 178 if dataStarted == True: 179 try: 180 value = float(line) 181 except: 182 # Found a non-float entry, skip it 183 continue 184 185 # Get bin number 186 if math.fmod(itot, i_tot_row) == 0: 187 i_x = 0 188 i_y += 1 189 else: 190 i_x += 1 191 192 output.data[i_y][i_x] = value 193 ncounts += 1 194 195 # Det 640 x 640 mm 196 # Q = 4pi/lambda sin(theta/2) 197 # Bin size is 0.5 cm 198 #REmoved +1 from theta = (i_x-center_x+1)*0.5 / distance 199 # / 100.0 and 200 #REmoved +1 from theta = (i_y-center_y+1)*0.5 / 201 # distance / 100.0 202 #ToDo: Need complete check if the following 203 # covert process is consistent with fitting.py. 204 theta = (i_x - center_x) * 0.5 / distance / 100.0 205 qx = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 206 207 if has_converter == True and output.Q_unit != '1/A': 208 qx = data_conv_q(qx, units=output.Q_unit) 209 210 if xmin == None or qx < xmin: 211 xmin = qx 212 if xmax == None or qx > xmax: 213 xmax = qx 214 215 theta = (i_y - center_y) * 0.5 / distance / 100.0 216 qy = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 217 218 if has_converter == True and output.Q_unit != '1/A': 219 qy = data_conv_q(qy, units=output.Q_unit) 220 221 if ymin == None or qy < ymin: 222 ymin = qy 223 if ymax == None or qy > ymax: 224 ymax = qy 225 226 if not qx in x: 227 x.append(qx) 228 if not qy in y: 229 y.append(qy) 230 231 itot += 1 232 233 63 64 data_row = 0 65 wavelength = distance = center_x = center_y = None 66 dataStarted = isInfo = isCenter = False 67 68 with open(filename, 'r') as f: 69 for line in f: 70 data_row += 1 71 # Find setup info line 72 if isInfo: 73 isInfo = False 74 line_toks = line.split() 75 # Wavelength in Angstrom 76 try: 77 wavelength = float(line_toks[1]) 78 except ValueError: 79 msg = "IgorReader: can't read this file, missing wavelength" 80 raise ValueError(msg) 81 # Distance in meters 82 try: 83 distance = float(line_toks[3]) 84 except ValueError: 85 msg = "IgorReader: can't read this file, missing distance" 86 raise ValueError(msg) 87 88 # Distance in meters 89 try: 90 transmission = float(line_toks[4]) 91 except: 92 msg = "IgorReader: can't read this file, " 93 msg += "missing transmission" 94 raise ValueError(msg) 95 96 if line.count("LAMBDA"): 97 isInfo = True 98 99 # Find center info line 100 if isCenter: 101 isCenter = False 102 line_toks = line.split() 103 104 # Center in bin number: Must subtract 1 because 105 # the index starts from 1 106 center_x = float(line_toks[0]) - 1 107 center_y = float(line_toks[1]) - 1 108 109 if line.count("BCENT"): 110 isCenter = True 111 112 # Find data start 113 if line.count("***"): 114 # now have to continue to blank line 115 dataStarted = True 116 117 # Check that we have all the info 118 if (wavelength is None 119 or distance is None 120 or center_x is None 121 or center_y is None): 122 msg = "IgorReader:Missing information in data file" 123 raise ValueError(msg) 124 125 if dataStarted: 126 if len(line.rstrip()): 127 continue 128 else: 129 break 130 131 # The data is loaded in row major order (last index changing most 132 # rapidly). However, the original data is in column major order (first 133 # index changing most rapidly). The swap to column major order is done 134 # in reader2D_converter at the end of this method. 135 data = np.loadtxt(filename, skiprows=data_row) 136 size_x = size_y = int(np.rint(np.sqrt(data.size))) 137 output.data = np.reshape(data, (size_x, size_y)) 138 output.err_data = np.zeros_like(output.data) 139 140 # Det 640 x 640 mm 141 # Q = 4 * pi/lambda * sin(theta/2) 142 # Bin size is 0.5 cm 143 # Removed +1 from theta = (i_x - center_x + 1)*0.5 / distance 144 # / 100.0 and 145 # Removed +1 from theta = (i_y - center_y + 1)*0.5 / 146 # distance / 100.0 147 # ToDo: Need complete check if the following 148 # convert process is consistent with fitting.py. 149 150 # calculate qx, qy bin centers of each pixel in the image 151 theta = (np.arange(size_x) - center_x) * 0.5 / distance / 100. 152 qx = 4 * np.pi / wavelength * np.sin(theta/2) 153 154 theta = (np.arange(size_y) - center_y) * 0.5 / distance / 100. 155 qy = 4 * np.pi / wavelength * np.sin(theta/2) 156 157 if has_converter and output.Q_unit != '1/A': 158 qx = data_conv_q(qx, units=output.Q_unit) 159 qy = data_conv_q(qx, units=output.Q_unit) 160 161 xmax = np.max(qx) 162 xmin = np.min(qx) 163 ymax = np.max(qy) 164 ymin = np.min(qy) 165 166 # calculate edge offset in q. 234 167 theta = 0.25 / distance / 100.0 235 xstep = 4.0 * math.pi / wavelength * math.sin(theta / 2.0)168 xstep = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 236 169 237 170 theta = 0.25 / distance / 100.0 238 ystep = 4.0 * math.pi/ wavelength * math.sin(theta / 2.0)171 ystep = 4.0 * np.pi/ wavelength * np.sin(theta / 2.0) 239 172 240 173 # Store all data ###################################### 241 174 # Store wavelength 242 if has_converter == Trueand output.source.wavelength_unit != 'A':175 if has_converter and output.source.wavelength_unit != 'A': 243 176 conv = Converter('A') 244 177 wavelength = conv(wavelength, units=output.source.wavelength_unit) … … 246 179 247 180 # Store distance 248 if has_converter == Trueand detector.distance_unit != 'm':181 if has_converter and detector.distance_unit != 'm': 249 182 conv = Converter('m') 250 183 distance = conv(distance, units=detector.distance_unit) … … 254 187 output.sample.transmission = transmission 255 188 256 # Store pixel size 189 # Store pixel size (mm) 257 190 pixel = 5.0 258 if has_converter == Trueand detector.pixel_size_unit != 'mm':191 if has_converter and detector.pixel_size_unit != 'mm': 259 192 conv = Converter('mm') 260 193 pixel = conv(pixel, units=detector.pixel_size_unit) … … 267 200 268 201 # Store limits of the image (2D array) 269 xmin = xmin -xstep / 2.0270 xmax = xmax +xstep / 2.0271 ymin = ymin -ystep / 2.0272 ymax = ymax +ystep / 2.0273 if has_converter == Trueand output.Q_unit != '1/A':202 xmin -= xstep / 2.0 203 xmax += xstep / 2.0 204 ymin -= ystep / 2.0 205 ymax += ystep / 2.0 206 if has_converter and output.Q_unit != '1/A': 274 207 xmin = data_conv_q(xmin, units=output.Q_unit) 275 208 xmax = data_conv_q(xmax, units=output.Q_unit) … … 282 215 283 216 # Store x and y axis bin centers 284 output.x_bins = x285 output.y_bins = y217 output.x_bins = qx.tolist() 218 output.y_bins = qy.tolist() 286 219 287 220 # Units -
src/sas/sascalc/dataloader/readers/abs_reader.py
rb699768 r9a5097c 9 9 ###################################################################### 10 10 11 import numpy 11 import numpy as np 12 12 import os 13 13 from sas.sascalc.dataloader.data_info import Data1D … … 53 53 buff = input_f.read() 54 54 lines = buff.split('\n') 55 x = n umpy.zeros(0)56 y = n umpy.zeros(0)57 dy = n umpy.zeros(0)58 dx = n umpy.zeros(0)55 x = np.zeros(0) 56 y = np.zeros(0) 57 dy = np.zeros(0) 58 dx = np.zeros(0) 59 59 output = Data1D(x, y, dy=dy, dx=dx) 60 60 detector = Detector() … … 204 204 _dy = data_conv_i(_dy, units=output.y_unit) 205 205 206 x = n umpy.append(x, _x)207 y = n umpy.append(y, _y)208 dy = n umpy.append(dy, _dy)209 dx = n umpy.append(dx, _dx)206 x = np.append(x, _x) 207 y = np.append(y, _y) 208 dy = np.append(dy, _dy) 209 dx = np.append(dx, _dx) 210 210 211 211 except: -
src/sas/sascalc/dataloader/readers/ascii_reader.py
rd2471870 r9a5097c 14 14 15 15 16 import numpy 16 import numpy as np 17 17 import os 18 18 from sas.sascalc.dataloader.data_info import Data1D … … 69 69 70 70 # Arrays for data storage 71 tx = n umpy.zeros(0)72 ty = n umpy.zeros(0)73 tdy = n umpy.zeros(0)74 tdx = n umpy.zeros(0)71 tx = np.zeros(0) 72 ty = np.zeros(0) 73 tdy = np.zeros(0) 74 tdx = np.zeros(0) 75 75 76 76 # The first good line of data will define whether … … 140 140 is_data == False: 141 141 try: 142 tx = n umpy.zeros(0)143 ty = n umpy.zeros(0)144 tdy = n umpy.zeros(0)145 tdx = n umpy.zeros(0)142 tx = np.zeros(0) 143 ty = np.zeros(0) 144 tdy = np.zeros(0) 145 tdx = np.zeros(0) 146 146 except: 147 147 pass 148 148 149 149 if has_error_dy == True: 150 tdy = n umpy.append(tdy, _dy)150 tdy = np.append(tdy, _dy) 151 151 if has_error_dx == True: 152 tdx = n umpy.append(tdx, _dx)153 tx = n umpy.append(tx, _x)154 ty = n umpy.append(ty, _y)152 tdx = np.append(tdx, _dx) 153 tx = np.append(tx, _x) 154 ty = np.append(ty, _y) 155 155 156 156 #To remember the # of columns on the current line … … 188 188 #Let's re-order the data to make cal. 189 189 # curve look better some cases 190 ind = n umpy.lexsort((ty, tx))191 x = n umpy.zeros(len(tx))192 y = n umpy.zeros(len(ty))193 dy = n umpy.zeros(len(tdy))194 dx = n umpy.zeros(len(tdx))190 ind = np.lexsort((ty, tx)) 191 x = np.zeros(len(tx)) 192 y = np.zeros(len(ty)) 193 dy = np.zeros(len(tdy)) 194 dx = np.zeros(len(tdx)) 195 195 output = Data1D(x, y, dy=dy, dx=dx) 196 196 self.filename = output.filename = basename … … 212 212 output.y = y[x != 0] 213 213 output.dy = dy[x != 0] if has_error_dy == True\ 214 else n umpy.zeros(len(output.y))214 else np.zeros(len(output.y)) 215 215 output.dx = dx[x != 0] if has_error_dx == True\ 216 else n umpy.zeros(len(output.x))216 else np.zeros(len(output.x)) 217 217 218 218 output.xaxis("\\rm{Q}", 'A^{-1}') -
src/sas/sascalc/dataloader/readers/cansas_constants.py
r250fec92 rad4632c 133 133 "variable" : None, 134 134 "children" : {"Idata" : SASDATA_IDATA, 135 "Sesans": {"storeas": "content"}, 136 "zacceptance": {"storeas": "float"}, 135 137 "<any>" : ANY 136 138 } -
src/sas/sascalc/dataloader/readers/cansas_reader.py
r3beacab4 r3d6c010 245 245 self.add_intermediate() 246 246 else: 247 # I and Q - 1D data 248 if tagname == 'I' and isinstance(self.current_dataset, 249 plottable_1D): 250 self.current_dataset.yaxis("Intensity", unit) 251 data_list = node.text.split(',') 252 for item in data_list: 253 data_point, unit = self._get_node_value_from_text(node, 254 item) 255 self.current_dataset.y = np.append( 256 self.current_dataset.y, data_point) 257 elif tagname == 'Idev' and isinstance(self.current_dataset, 258 plottable_1D): 259 data_list = node.text.split(',') 260 for item in data_list: 261 data_point, unit = self._get_node_value_from_text(node, 262 item) 263 self.current_dataset.dy = np.append( 264 self.current_dataset.dy, data_point) 265 elif tagname == 'Q': 266 data_list = node.text.split(',') 267 for item in data_list: 268 data_point, unit = self._get_node_value_from_text(node, 269 item) 270 self.current_dataset.x = np.append( 271 self.current_dataset.x, data_point) 272 elif tagname == 'Qdev': 273 data_list = node.text.split(',') 274 for item in data_list: 275 data_point, unit = self._get_node_value_from_text(node, 276 item) 277 self.current_dataset.dx = np.append( 278 self.current_dataset.dx, data_point) 279 elif tagname == 'dQw': 280 data_list = node.text.split(',') 281 for item in data_list: 282 data_point, unit = self._get_node_value_from_text(node, 283 item) 284 self.current_dataset.dqw = np.append( 285 self.current_dataset.dqw, data_point) 286 elif tagname == 'dQl': 287 data_list = node.text.split(',') 288 for item in data_list: 289 data_point, unit = self._get_node_value_from_text(node, 290 item) 291 self.current_dataset.dxl = np.append( 292 self.current_dataset.dxl, data_point) 293 elif tagname == 'Qmean': 294 pass 295 elif tagname == 'Shadowfactor': 296 pass 297 elif isinstance(self.current_dataset, plottable_2D): 247 if isinstance(self.current_dataset, plottable_2D): 298 248 data_point = node.text 299 249 unit = attr.get('unit', '') … … 309 259 elif tagname == 'SASnote': 310 260 self.current_datainfo.notes.append(data_point) 261 262 # I and Q - 1D data 263 elif tagname == 'I' and isinstance(self.current_dataset, plottable_1D): 264 unit_list = unit.split("|") 265 if len(unit_list) > 1: 266 self.current_dataset.yaxis(unit_list[0].strip(), 267 unit_list[1].strip()) 268 else: 269 self.current_dataset.yaxis("Intensity", unit) 270 self.current_dataset.y = np.append(self.current_dataset.y, data_point) 271 elif tagname == 'Idev' and isinstance(self.current_dataset, plottable_1D): 272 self.current_dataset.dy = np.append(self.current_dataset.dy, data_point) 273 elif tagname == 'Q': 274 unit_list = unit.split("|") 275 if len(unit_list) > 1: 276 self.current_dataset.xaxis(unit_list[0].strip(), 277 unit_list[1].strip()) 278 else: 279 self.current_dataset.xaxis("Q", unit) 280 self.current_dataset.x = np.append(self.current_dataset.x, data_point) 281 elif tagname == 'Qdev': 282 self.current_dataset.dx = np.append(self.current_dataset.dx, data_point) 283 elif tagname == 'dQw': 284 self.current_dataset.dxw = np.append(self.current_dataset.dxw, data_point) 285 elif tagname == 'dQl': 286 self.current_dataset.dxl = np.append(self.current_dataset.dxl, data_point) 287 elif tagname == 'Qmean': 288 pass 289 elif tagname == 'Shadowfactor': 290 pass 291 elif tagname == 'Sesans': 292 self.current_datainfo.isSesans = bool(data_point) 293 elif tagname == 'zacceptance': 294 self.current_datainfo.sample.zacceptance = (data_point, unit) 311 295 312 296 # I and Qx, Qy - 2D data … … 961 945 self._write_data_linearized(datainfo, entry_node) 962 946 # Transmission Spectrum Info 963 self._write_trans_spectrum(datainfo, entry_node) 947 # TODO: fix the writer to linearize all data, including T_spectrum 948 # self._write_trans_spectrum(datainfo, entry_node) 964 949 # Sample info 965 950 self._write_sample_info(datainfo, entry_node) … … 1065 1050 node.append(point) 1066 1051 self.write_node(point, "Q", datainfo.x[i], 1067 {'unit': datainfo. x_unit})1052 {'unit': datainfo._xaxis + " | " + datainfo._xunit}) 1068 1053 if len(datainfo.y) >= i: 1069 1054 self.write_node(point, "I", datainfo.y[i], 1070 {'unit': datainfo. y_unit})1055 {'unit': datainfo._yaxis + " | " + datainfo._yunit}) 1071 1056 if datainfo.dy is not None and len(datainfo.dy) > i: 1072 1057 self.write_node(point, "Idev", datainfo.dy[i], 1073 {'unit': datainfo. y_unit})1058 {'unit': datainfo._yaxis + " | " + datainfo._yunit}) 1074 1059 if datainfo.dx is not None and len(datainfo.dx) > i: 1075 1060 self.write_node(point, "Qdev", datainfo.dx[i], 1076 {'unit': datainfo. x_unit})1061 {'unit': datainfo._xaxis + " | " + datainfo._xunit}) 1077 1062 if datainfo.dxw is not None and len(datainfo.dxw) > i: 1078 1063 self.write_node(point, "dQw", datainfo.dxw[i], 1079 {'unit': datainfo. x_unit})1064 {'unit': datainfo._xaxis + " | " + datainfo._xunit}) 1080 1065 if datainfo.dxl is not None and len(datainfo.dxl) > i: 1081 1066 self.write_node(point, "dQl", datainfo.dxl[i], 1082 {'unit': datainfo.x_unit}) 1083 1084 def _write_data_linearized(self, datainfo, entry_node): 1085 """ 1086 Writes 1D I and Q data to an XML file is a single Idata element 1087 1088 :param datainfo: The Data1D object the information is coming from 1089 :param entry_node: lxml node ElementTree object to be appended to 1090 """ 1091 node = self.create_element("SASdata") 1092 self.append(node, entry_node) 1093 1094 point = self.create_element("Idata") 1095 node.append(point) 1096 x = ','.join([str(datainfo.x[i]) for i in xrange(len(datainfo.x))]) 1097 self.write_node(point, "Q", x, {'unit': datainfo.x_unit}) 1098 y = ','.join([str(datainfo.y[i]) for i in xrange(len(datainfo.y))]) 1099 self.write_node(point, "I", y, {'unit': datainfo.y_unit}) 1100 if datainfo.dy is not None: 1101 dy = ','.join( 1102 [str(datainfo.dy[i]) for i in xrange(len(datainfo.dy))]) 1103 self.write_node(point, "Idev", dy, {'unit': datainfo.y_unit}) 1104 if datainfo.dx is not None: 1105 dx = ','.join( 1106 [str(datainfo.dx[i]) for i in xrange(len(datainfo.dx))]) 1107 self.write_node(point, "Qdev", dx, {'unit': datainfo.x_unit}) 1108 if datainfo.dxw is not None: 1109 dxw = ','.join( 1110 [str(datainfo.dxw[i]) for i in xrange(len(datainfo.dxw))]) 1111 self.write_node(point, "dQw", dxw, {'unit': datainfo.x_unit}) 1112 if datainfo.dxl is not None: 1113 dxl = ','.join( 1114 [str(datainfo.dxl[i]) for i in xrange(len(datainfo.dxl))]) 1115 self.write_node(point, "dQl", dxl, {'unit': datainfo.x_unit}) 1067 {'unit': datainfo._xaxis + " | " + datainfo._xunit}) 1068 if datainfo.isSesans: 1069 sesans = self.create_element("Sesans") 1070 sesans.text = str(datainfo.isSesans) 1071 node.append(sesans) 1072 self.write_node(node, "zacceptance", datainfo.sample.zacceptance[0], 1073 {'unit': datainfo.sample.zacceptance[1]}) 1074 1116 1075 1117 1076 def _write_data_2d(self, datainfo, entry_node): -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
rbbd0f37 rc94280c 9 9 import sys 10 10 11 from sas.sascalc.dataloader.data_info import plottable_1D, plottable_2D, Data1D, Data2D, DataInfo, Process, Aperture 12 from sas.sascalc.dataloader.data_info import Collimation, TransmissionSpectrum, Detector 11 from sas.sascalc.dataloader.data_info import plottable_1D, plottable_2D,\ 12 Data1D, Data2D, DataInfo, Process, Aperture, Collimation, \ 13 TransmissionSpectrum, Detector 13 14 from sas.sascalc.dataloader.data_info import combine_data_info_with_plottable 14 15 15 16 16 17 17 class Reader(): 18 18 """ 19 A class for reading in CanSAS v2.0 data files. The existing iteration opens Mantid generated HDF5 formatted files 20 with file extension .h5/.H5. Any number of data sets may be present within the file and any dimensionality of data 21 may be used. Currently 1D and 2D SAS data sets are supported, but future implementations will include 1D and 2D 22 SESANS data. 23 24 Any number of SASdata sets may be present in a SASentry and the data within can be either 1D I(Q) or 2D I(Qx, Qy). 19 A class for reading in CanSAS v2.0 data files. The existing iteration opens 20 Mantid generated HDF5 formatted files with file extension .h5/.H5. Any 21 number of data sets may be present within the file and any dimensionality 22 of data may be used. Currently 1D and 2D SAS data sets are supported, but 23 future implementations will include 1D and 2D SESANS data. 24 25 Any number of SASdata sets may be present in a SASentry and the data within 26 can be either 1D I(Q) or 2D I(Qx, Qy). 25 27 26 28 Also supports reading NXcanSAS formatted HDF5 files … … 30 32 """ 31 33 32 # #CanSAS version34 # CanSAS version 33 35 cansas_version = 2.0 34 # #Logged warnings or messages36 # Logged warnings or messages 35 37 logging = None 36 # #List of errors for the current data set38 # List of errors for the current data set 37 39 errors = None 38 # #Raw file contents to be processed40 # Raw file contents to be processed 39 41 raw_data = None 40 # #Data info currently being read in42 # Data info currently being read in 41 43 current_datainfo = None 42 # #SASdata set currently being read in44 # SASdata set currently being read in 43 45 current_dataset = None 44 # #List of plottable1D objects that should be linked to the current_datainfo46 # List of plottable1D objects that should be linked to the current_datainfo 45 47 data1d = None 46 # #List of plottable2D objects that should be linked to the current_datainfo48 # List of plottable2D objects that should be linked to the current_datainfo 47 49 data2d = None 48 # #Data type name50 # Data type name 49 51 type_name = "CanSAS 2.0" 50 # #Wildcards52 # Wildcards 51 53 type = ["CanSAS 2.0 HDF5 Files (*.h5)|*.h5"] 52 # #List of allowed extensions54 # List of allowed extensions 53 55 ext = ['.h5', '.H5'] 54 # #Flag to bypass extension check55 allow_all = False56 # #List of files to return56 # Flag to bypass extension check 57 allow_all = True 58 # List of files to return 57 59 output = None 58 60 … … 64 66 :return: List of Data1D/2D objects and/or a list of errors. 65 67 """ 66 # # Reinitialize the classwhen loading a new data file to reset all class variables68 # Reinitialize when loading a new data file to reset all class variables 67 69 self.reset_class_variables() 68 # #Check that the file exists70 # Check that the file exists 69 71 if os.path.isfile(filename): 70 72 basename = os.path.basename(filename) … … 72 74 # If the file type is not allowed, return empty list 73 75 if extension in self.ext or self.allow_all: 74 # #Load the data file76 # Load the data file 75 77 self.raw_data = h5py.File(filename, 'r') 76 # #Read in all child elements of top level SASroot78 # Read in all child elements of top level SASroot 77 79 self.read_children(self.raw_data, []) 78 # #Add the last data set to the list of outputs80 # Add the last data set to the list of outputs 79 81 self.add_data_set() 80 # #Close the data file82 # Close the data file 81 83 self.raw_data.close() 82 # #Return data set(s)84 # Return data set(s) 83 85 return self.output 84 86 … … 110 112 """ 111 113 112 # #Loop through each element of the parent and process accordingly114 # Loop through each element of the parent and process accordingly 113 115 for key in data.keys(): 114 # #Get all information for the current key116 # Get all information for the current key 115 117 value = data.get(key) 116 118 if value.attrs.get(u'canSAS_class') is not None: … … 126 128 self.parent_class = class_name 127 129 parent_list.append(key) 128 ## If this is a new sasentry, store the current data sets and create a fresh Data1D/2D object 130 # If a new sasentry, store the current data sets and create 131 # a fresh Data1D/2D object 129 132 if class_prog.match(u'SASentry'): 130 133 self.add_data_set(key) 131 134 elif class_prog.match(u'SASdata'): 132 135 self._initialize_new_data_set(parent_list) 133 # #Recursion step to access data within the group136 # Recursion step to access data within the group 134 137 self.read_children(value, parent_list) 135 138 self.add_intermediate() … … 137 140 138 141 elif isinstance(value, h5py.Dataset): 139 # #If this is a dataset, store the data appropriately142 # If this is a dataset, store the data appropriately 140 143 data_set = data[key][:] 141 144 unit = self._get_unit(value) 142 145 143 # #I and Q Data146 # I and Q Data 144 147 if key == u'I': 145 if type(self.current_dataset) is plottable_2D:148 if isinstance(self.current_dataset, plottable_2D): 146 149 self.current_dataset.data = data_set 147 150 self.current_dataset.zaxis("Intensity", unit) … … 151 154 continue 152 155 elif key == u'Idev': 153 if type(self.current_dataset) is plottable_2D:156 if isinstance(self.current_dataset, plottable_2D): 154 157 self.current_dataset.err_data = data_set.flatten() 155 158 else: … … 158 161 elif key == u'Q': 159 162 self.current_dataset.xaxis("Q", unit) 160 if type(self.current_dataset) is plottable_2D:163 if isinstance(self.current_dataset, plottable_2D): 161 164 self.current_dataset.q = data_set.flatten() 162 165 else: … … 166 169 self.current_dataset.dx = data_set.flatten() 167 170 continue 171 elif key == u'dQw': 172 self.current_dataset.dxw = data_set.flatten() 173 continue 174 elif key == u'dQl': 175 self.current_dataset.dxl = data_set.flatten() 176 continue 168 177 elif key == u'Qy': 169 178 self.current_dataset.yaxis("Q_y", unit) … … 183 192 self.current_dataset.mask = data_set.flatten() 184 193 continue 194 # Transmission Spectrum 195 elif (key == u'T' 196 and self.parent_class == u'SAStransmission_spectrum'): 197 self.trans_spectrum.transmission = data_set.flatten() 198 continue 199 elif (key == u'Tdev' 200 and self.parent_class == u'SAStransmission_spectrum'): 201 self.trans_spectrum.transmission_deviation = \ 202 data_set.flatten() 203 continue 204 elif (key == u'lambda' 205 and self.parent_class == u'SAStransmission_spectrum'): 206 self.trans_spectrum.wavelength = data_set.flatten() 207 continue 185 208 186 209 for data_point in data_set: 187 # #Top Level Meta Data210 # Top Level Meta Data 188 211 if key == u'definition': 189 212 self.current_datainfo.meta_data['reader'] = data_point … … 201 224 self.current_datainfo.notes.append(data_point) 202 225 203 ## Sample Information 204 elif key == u'Title' and self.parent_class == u'SASsample': # CanSAS 2.0 format 226 # Sample Information 227 # CanSAS 2.0 format 228 elif key == u'Title' and self.parent_class == u'SASsample': 205 229 self.current_datainfo.sample.name = data_point 206 elif key == u'ID' and self.parent_class == u'SASsample': # NXcanSAS format 230 # NXcanSAS format 231 elif key == u'name' and self.parent_class == u'SASsample': 207 232 self.current_datainfo.sample.name = data_point 208 elif key == u'thickness' and self.parent_class == u'SASsample': 233 # NXcanSAS format 234 elif key == u'ID' and self.parent_class == u'SASsample': 235 self.current_datainfo.sample.name = data_point 236 elif (key == u'thickness' 237 and self.parent_class == u'SASsample'): 209 238 self.current_datainfo.sample.thickness = data_point 210 elif key == u'temperature' and self.parent_class == u'SASsample': 239 elif (key == u'temperature' 240 and self.parent_class == u'SASsample'): 211 241 self.current_datainfo.sample.temperature = data_point 212 elif key == u'transmission' and self.parent_class == u'SASsample': 242 elif (key == u'transmission' 243 and self.parent_class == u'SASsample'): 213 244 self.current_datainfo.sample.transmission = data_point 214 elif key == u'x_position' and self.parent_class == u'SASsample': 245 elif (key == u'x_position' 246 and self.parent_class == u'SASsample'): 215 247 self.current_datainfo.sample.position.x = data_point 216 elif key == u'y_position' and self.parent_class == u'SASsample': 248 elif (key == u'y_position' 249 and self.parent_class == u'SASsample'): 217 250 self.current_datainfo.sample.position.y = data_point 218 elif key == u'p olar_angle' and self.parent_class == u'SASsample':251 elif key == u'pitch' and self.parent_class == u'SASsample': 219 252 self.current_datainfo.sample.orientation.x = data_point 220 elif key == u'azimuthal_angle' and self.parent_class == u'SASsample': 253 elif key == u'yaw' and self.parent_class == u'SASsample': 254 self.current_datainfo.sample.orientation.y = data_point 255 elif key == u'roll' and self.parent_class == u'SASsample': 221 256 self.current_datainfo.sample.orientation.z = data_point 222 elif key == u'details' and self.parent_class == u'SASsample': 257 elif (key == u'details' 258 and self.parent_class == u'SASsample'): 223 259 self.current_datainfo.sample.details.append(data_point) 224 260 225 ## Instrumental Information 226 elif key == u'name' and self.parent_class == u'SASinstrument': 261 # Instrumental Information 262 elif (key == u'name' 263 and self.parent_class == u'SASinstrument'): 227 264 self.current_datainfo.instrument = data_point 228 265 elif key == u'name' and self.parent_class == u'SASdetector': … … 231 268 self.detector.distance = float(data_point) 232 269 self.detector.distance_unit = unit 233 elif key == u'slit_length' and self.parent_class == u'SASdetector': 270 elif (key == u'slit_length' 271 and self.parent_class == u'SASdetector'): 234 272 self.detector.slit_length = float(data_point) 235 273 self.detector.slit_length_unit = unit 236 elif key == u'x_position' and self.parent_class == u'SASdetector': 274 elif (key == u'x_position' 275 and self.parent_class == u'SASdetector'): 237 276 self.detector.offset.x = float(data_point) 238 277 self.detector.offset_unit = unit 239 elif key == u'y_position' and self.parent_class == u'SASdetector': 278 elif (key == u'y_position' 279 and self.parent_class == u'SASdetector'): 240 280 self.detector.offset.y = float(data_point) 241 281 self.detector.offset_unit = unit 242 elif key == u'polar_angle' and self.parent_class == u'SASdetector': 282 elif (key == u'pitch' 283 and self.parent_class == u'SASdetector'): 243 284 self.detector.orientation.x = float(data_point) 244 285 self.detector.orientation_unit = unit 245 elif key == u' azimuthal_angle' and self.parent_class == u'SASdetector':286 elif key == u'roll' and self.parent_class == u'SASdetector': 246 287 self.detector.orientation.z = float(data_point) 247 288 self.detector.orientation_unit = unit 248 elif key == u'beam_center_x' and self.parent_class == u'SASdetector': 289 elif key == u'yaw' and self.parent_class == u'SASdetector': 290 self.detector.orientation.y = float(data_point) 291 self.detector.orientation_unit = unit 292 elif (key == u'beam_center_x' 293 and self.parent_class == u'SASdetector'): 249 294 self.detector.beam_center.x = float(data_point) 250 295 self.detector.beam_center_unit = unit 251 elif key == u'beam_center_y' and self.parent_class == u'SASdetector': 296 elif (key == u'beam_center_y' 297 and self.parent_class == u'SASdetector'): 252 298 self.detector.beam_center.y = float(data_point) 253 299 self.detector.beam_center_unit = unit 254 elif key == u'x_pixel_size' and self.parent_class == u'SASdetector': 300 elif (key == u'x_pixel_size' 301 and self.parent_class == u'SASdetector'): 255 302 self.detector.pixel_size.x = float(data_point) 256 303 self.detector.pixel_size_unit = unit 257 elif key == u'y_pixel_size' and self.parent_class == u'SASdetector': 304 elif (key == u'y_pixel_size' 305 and self.parent_class == u'SASdetector'): 258 306 self.detector.pixel_size.y = float(data_point) 259 307 self.detector.pixel_size_unit = unit 260 elif key == u'SSD' and self.parent_class == u'SAScollimation': 308 elif (key == u'distance' 309 and self.parent_class == u'SAScollimation'): 261 310 self.collimation.length = data_point 262 311 self.collimation.length_unit = unit 263 elif key == u'name' and self.parent_class == u'SAScollimation': 312 elif (key == u'name' 313 and self.parent_class == u'SAScollimation'): 264 314 self.collimation.name = data_point 265 266 ## Process Information 267 elif key == u'name' and self.parent_class == u'SASprocess': 315 elif (key == u'shape' 316 and self.parent_class == u'SASaperture'): 317 self.aperture.shape = data_point 318 elif (key == u'x_gap' 319 and self.parent_class == u'SASaperture'): 320 self.aperture.size.x = data_point 321 elif (key == u'y_gap' 322 and self.parent_class == u'SASaperture'): 323 self.aperture.size.y = data_point 324 325 # Process Information 326 elif (key == u'Title' 327 and self.parent_class == u'SASprocess'): # CanSAS 2.0 268 328 self.process.name = data_point 269 elif key == u'Title' and self.parent_class == u'SASprocess': # CanSAS 2.0 format 329 elif (key == u'name' 330 and self.parent_class == u'SASprocess'): # NXcanSAS 270 331 self.process.name = data_point 271 elif key == u'name' and self.parent_class == u'SASprocess': # NXcanSAS format 272 self.process.name = data_point 273 elif key == u'description' and self.parent_class == u'SASprocess': 332 elif (key == u'description' 333 and self.parent_class == u'SASprocess'): 274 334 self.process.description = data_point 275 335 elif key == u'date' and self.parent_class == u'SASprocess': 276 336 self.process.date = data_point 337 elif key == u'term' and self.parent_class == u'SASprocess': 338 self.process.term = data_point 277 339 elif self.parent_class == u'SASprocess': 278 340 self.process.notes.append(data_point) 279 341 280 ## Transmission Spectrum 281 elif key == u'T' and self.parent_class == u'SAStransmission_spectrum': 282 self.trans_spectrum.transmission.append(data_point) 283 elif key == u'Tdev' and self.parent_class == u'SAStransmission_spectrum': 284 self.trans_spectrum.transmission_deviation.append(data_point) 285 elif key == u'lambda' and self.parent_class == u'SAStransmission_spectrum': 286 self.trans_spectrum.wavelength.append(data_point) 287 288 ## Source 289 elif key == u'wavelength' and self.parent_class == u'SASdata': 342 # Source 343 elif (key == u'wavelength' 344 and self.parent_class == u'SASdata'): 290 345 self.current_datainfo.source.wavelength = data_point 291 346 self.current_datainfo.source.wavelength_unit = unit 292 elif key == u'incident_wavelength' and self.parent_class == u'SASsource': 347 elif (key == u'incident_wavelength' 348 and self.parent_class == 'SASsource'): 293 349 self.current_datainfo.source.wavelength = data_point 294 350 self.current_datainfo.source.wavelength_unit = unit 295 elif key == u'wavelength_max' and self.parent_class == u'SASsource': 351 elif (key == u'wavelength_max' 352 and self.parent_class == u'SASsource'): 296 353 self.current_datainfo.source.wavelength_max = data_point 297 354 self.current_datainfo.source.wavelength_max_unit = unit 298 elif key == u'wavelength_min' and self.parent_class == u'SASsource': 355 elif (key == u'wavelength_min' 356 and self.parent_class == u'SASsource'): 299 357 self.current_datainfo.source.wavelength_min = data_point 300 358 self.current_datainfo.source.wavelength_min_unit = unit 301 elif key == u'wavelength_spread' and self.parent_class == u'SASsource': 302 self.current_datainfo.source.wavelength_spread = data_point 303 self.current_datainfo.source.wavelength_spread_unit = unit 304 elif key == u'beam_size_x' and self.parent_class == u'SASsource': 359 elif (key == u'incident_wavelength_spread' 360 and self.parent_class == u'SASsource'): 361 self.current_datainfo.source.wavelength_spread = \ 362 data_point 363 self.current_datainfo.source.wavelength_spread_unit = \ 364 unit 365 elif (key == u'beam_size_x' 366 and self.parent_class == u'SASsource'): 305 367 self.current_datainfo.source.beam_size.x = data_point 306 368 self.current_datainfo.source.beam_size_unit = unit 307 elif key == u'beam_size_y' and self.parent_class == u'SASsource': 369 elif (key == u'beam_size_y' 370 and self.parent_class == u'SASsource'): 308 371 self.current_datainfo.source.beam_size.y = data_point 309 372 self.current_datainfo.source.beam_size_unit = unit 310 elif key == u'beam_shape' and self.parent_class == u'SASsource': 373 elif (key == u'beam_shape' 374 and self.parent_class == u'SASsource'): 311 375 self.current_datainfo.source.beam_shape = data_point 312 elif key == u'radiation' and self.parent_class == u'SASsource': 376 elif (key == u'radiation' 377 and self.parent_class == u'SASsource'): 313 378 self.current_datainfo.source.radiation = data_point 314 elif key == u'transmission' and self.parent_class == u'SASdata': 379 elif (key == u'transmission' 380 and self.parent_class == u'SASdata'): 315 381 self.current_datainfo.sample.transmission = data_point 316 382 317 # #Everything else goes in meta_data383 # Everything else goes in meta_data 318 384 else: 319 new_key = self._create_unique_key(self.current_datainfo.meta_data, key) 385 new_key = self._create_unique_key( 386 self.current_datainfo.meta_data, key) 320 387 self.current_datainfo.meta_data[new_key] = data_point 321 388 322 389 else: 323 # #I don't know if this reachable code390 # I don't know if this reachable code 324 391 self.errors.add("ShouldNeverHappenException") 325 392 326 393 def add_intermediate(self): 327 394 """ 328 This method stores any intermediate objects within the final data set after fully reading the set. 329 330 :param parent: The NXclass name for the h5py Group object that just finished being processed 395 This method stores any intermediate objects within the final data set 396 after fully reading the set. 397 398 :param parent: The NXclass name for the h5py Group object that just 399 finished being processed 331 400 """ 332 401 … … 347 416 self.aperture = Aperture() 348 417 elif self.parent_class == u'SASdata': 349 if type(self.current_dataset) is plottable_2D:418 if isinstance(self.current_dataset, plottable_2D): 350 419 self.data2d.append(self.current_dataset) 351 elif type(self.current_dataset) is plottable_1D:420 elif isinstance(self.current_dataset, plottable_1D): 352 421 self.data1d.append(self.current_dataset) 353 422 354 423 def final_data_cleanup(self): 355 424 """ 356 Does some final cleanup and formatting on self.current_datainfo and all data1D and data2D objects and then 357 combines the data and info into Data1D and Data2D objects 358 """ 359 360 ## Type cast data arrays to float64 425 Does some final cleanup and formatting on self.current_datainfo and 426 all data1D and data2D objects and then combines the data and info into 427 Data1D and Data2D objects 428 """ 429 430 # Type cast data arrays to float64 361 431 if len(self.current_datainfo.trans_spectrum) > 0: 362 432 spectrum_list = [] … … 364 434 spectrum.transmission = np.delete(spectrum.transmission, [0]) 365 435 spectrum.transmission = spectrum.transmission.astype(np.float64) 366 spectrum.transmission_deviation = np.delete(spectrum.transmission_deviation, [0]) 367 spectrum.transmission_deviation = spectrum.transmission_deviation.astype(np.float64) 436 spectrum.transmission_deviation = np.delete( 437 spectrum.transmission_deviation, [0]) 438 spectrum.transmission_deviation = \ 439 spectrum.transmission_deviation.astype(np.float64) 368 440 spectrum.wavelength = np.delete(spectrum.wavelength, [0]) 369 441 spectrum.wavelength = spectrum.wavelength.astype(np.float64) … … 372 444 self.current_datainfo.trans_spectrum = spectrum_list 373 445 374 # #Append errors to dataset and reset class errors446 # Append errors to dataset and reset class errors 375 447 self.current_datainfo.errors = self.errors 376 448 self.errors.clear() 377 449 378 # #Combine all plottables with datainfo and append each to output379 # #Type cast data arrays to float64 and find min/max as appropriate450 # Combine all plottables with datainfo and append each to output 451 # Type cast data arrays to float64 and find min/max as appropriate 380 452 for dataset in self.data2d: 381 453 dataset.data = dataset.data.astype(np.float64) … … 397 469 zeros = np.ones(dataset.data.size, dtype=bool) 398 470 try: 399 for i in range 471 for i in range(0, dataset.mask.size - 1): 400 472 zeros[i] = dataset.mask[i] 401 473 except: 402 474 self.errors.add(sys.exc_value) 403 475 dataset.mask = zeros 404 # #Calculate the actual Q matrix476 # Calculate the actual Q matrix 405 477 try: 406 478 if dataset.q_data.size <= 1: 407 dataset.q_data = np.sqrt(dataset.qx_data * dataset.qx_data + dataset.qy_data * dataset.qy_data) 479 dataset.q_data = np.sqrt(dataset.qx_data 480 * dataset.qx_data 481 + dataset.qy_data 482 * dataset.qy_data) 408 483 except: 409 484 dataset.q_data = None … … 415 490 dataset.data = dataset.data.flatten() 416 491 417 final_dataset = combine_data_info_with_plottable(dataset, self.current_datainfo) 492 final_dataset = combine_data_info_with_plottable( 493 dataset, self.current_datainfo) 418 494 self.output.append(final_dataset) 419 495 … … 435 511 if dataset.dy is not None: 436 512 dataset.dy = dataset.dy.astype(np.float64) 437 final_dataset = combine_data_info_with_plottable(dataset, self.current_datainfo) 513 final_dataset = combine_data_info_with_plottable( 514 dataset, self.current_datainfo) 438 515 self.output.append(final_dataset) 439 516 440 517 def add_data_set(self, key=""): 441 518 """ 442 Adds the current_dataset to the list of outputs after preforming final processing on the data and then calls a 443 private method to generate a new data set. 519 Adds the current_dataset to the list of outputs after preforming final 520 processing on the data and then calls a private method to generate a 521 new data set. 444 522 445 523 :param key: NeXus group name for current tree level … … 453 531 454 532 455 def _initialize_new_data_set(self, parent_list = None): 456 """ 457 A private class method to generate a new 1D or 2D data object based on the type of data within the set. 458 Outside methods should call add_data_set() to be sure any existing data is stored properly. 533 def _initialize_new_data_set(self, parent_list=None): 534 """ 535 A private class method to generate a new 1D or 2D data object based on 536 the type of data within the set. Outside methods should call 537 add_data_set() to be sure any existing data is stored properly. 459 538 460 539 :param parent_list: List of names of parent elements … … 473 552 def _find_intermediate(self, parent_list, basename=""): 474 553 """ 475 A private class used to find an entry by either using a direct key or knowing the approximate basename. 476 477 :param parent_list: List of parents to the current level in the HDF5 file 554 A private class used to find an entry by either using a direct key or 555 knowing the approximate basename. 556 557 :param parent_list: List of parents nodes in the HDF5 file 478 558 :param basename: Approximate name of an entry to search for 479 559 :return: … … 486 566 top = top.get(parent) 487 567 for key in top.keys(): 488 if (key_prog.match(key)):568 if key_prog.match(key): 489 569 entry = True 490 570 break … … 516 596 """ 517 597 unit = value.attrs.get(u'units') 518 if unit ==None:598 if unit is None: 519 599 unit = value.attrs.get(u'unit') 520 # #Convert the unit formats600 # Convert the unit formats 521 601 if unit == "1/A": 522 602 unit = "A^{-1}" -
src/sas/sascalc/dataloader/readers/danse_reader.py
rb699768 r9a5097c 15 15 import os 16 16 import sys 17 import numpy 17 import numpy as np 18 18 import logging 19 19 from sas.sascalc.dataloader.data_info import Data2D, Detector … … 79 79 output.detector.append(detector) 80 80 81 output.data = n umpy.zeros([size_x,size_y])82 output.err_data = n umpy.zeros([size_x, size_y])81 output.data = np.zeros([size_x,size_y]) 82 output.err_data = np.zeros([size_x, size_y]) 83 83 84 84 data_conv_q = None -
src/sas/sascalc/dataloader/readers/hfir1d_reader.py
rb699768 r9a5097c 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 import numpy 11 import numpy as np 12 12 import os 13 13 from sas.sascalc.dataloader.data_info import Data1D … … 52 52 buff = input_f.read() 53 53 lines = buff.split('\n') 54 x = n umpy.zeros(0)55 y = n umpy.zeros(0)56 dx = n umpy.zeros(0)57 dy = n umpy.zeros(0)54 x = np.zeros(0) 55 y = np.zeros(0) 56 dx = np.zeros(0) 57 dy = np.zeros(0) 58 58 output = Data1D(x, y, dx=dx, dy=dy) 59 59 self.filename = output.filename = basename … … 88 88 _dy = data_conv_i(_dy, units=output.y_unit) 89 89 90 x = n umpy.append(x, _x)91 y = n umpy.append(y, _y)92 dx = n umpy.append(dx, _dx)93 dy = n umpy.append(dy, _dy)90 x = np.append(x, _x) 91 y = np.append(y, _y) 92 dx = np.append(dx, _dx) 93 dy = np.append(dy, _dy) 94 94 except: 95 95 # Couldn't parse this line, skip it -
src/sas/sascalc/dataloader/readers/red2d_reader.py
rb699768 r9a5097c 10 10 ###################################################################### 11 11 import os 12 import numpy 12 import numpy as np 13 13 import math 14 14 from sas.sascalc.dataloader.data_info import Data2D, Detector … … 198 198 break 199 199 # Make numpy array to remove header lines using index 200 lines_array = n umpy.array(lines)200 lines_array = np.array(lines) 201 201 202 202 # index for lines_array 203 lines_index = n umpy.arange(len(lines))203 lines_index = np.arange(len(lines)) 204 204 205 205 # get the data lines … … 225 225 226 226 # numpy array form 227 data_array = n umpy.array(data_list1)227 data_array = np.array(data_list1) 228 228 # Redimesion based on the row_num and col_num, 229 229 #otherwise raise an error. … … 235 235 ## Get the all data: Let's HARDcoding; Todo find better way 236 236 # Defaults 237 dqx_data = n umpy.zeros(0)238 dqy_data = n umpy.zeros(0)239 err_data = n umpy.ones(row_num)240 qz_data = n umpy.zeros(row_num)241 mask = n umpy.ones(row_num, dtype=bool)237 dqx_data = np.zeros(0) 238 dqy_data = np.zeros(0) 239 err_data = np.ones(row_num) 240 qz_data = np.zeros(row_num) 241 mask = np.ones(row_num, dtype=bool) 242 242 # Get from the array 243 243 qx_data = data_point[0] … … 254 254 dqy_data = data_point[(5 + ver)] 255 255 #if col_num > (6 + ver): mask[data_point[(6 + ver)] < 1] = False 256 q_data = n umpy.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data)256 q_data = np.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 257 257 258 258 # Extra protection(it is needed for some data files): … … 262 262 263 263 # Store limits of the image in q space 264 xmin = n umpy.min(qx_data)265 xmax = n umpy.max(qx_data)266 ymin = n umpy.min(qy_data)267 ymax = n umpy.max(qy_data)264 xmin = np.min(qx_data) 265 xmax = np.max(qx_data) 266 ymin = np.min(qy_data) 267 ymax = np.max(qy_data) 268 268 269 269 # units … … 287 287 288 288 # store x and y axis bin centers in q space 289 x_bins = n umpy.arange(xmin, xmax + xstep, xstep)290 y_bins = n umpy.arange(ymin, ymax + ystep, ystep)289 x_bins = np.arange(xmin, xmax + xstep, xstep) 290 y_bins = np.arange(ymin, ymax + ystep, ystep) 291 291 292 292 # get the limits of q values … … 300 300 output.data = data 301 301 if (err_data == 1).all(): 302 output.err_data = n umpy.sqrt(numpy.abs(data))302 output.err_data = np.sqrt(np.abs(data)) 303 303 output.err_data[output.err_data == 0.0] = 1.0 304 304 else: … … 335 335 # tranfer the comp. to cartesian coord. for newer version. 336 336 if ver != 1: 337 diag = n umpy.sqrt(qx_data * qx_data + qy_data * qy_data)337 diag = np.sqrt(qx_data * qx_data + qy_data * qy_data) 338 338 cos_th = qx_data / diag 339 339 sin_th = qy_data / diag 340 output.dqx_data = n umpy.sqrt((dqx_data * cos_th) * \340 output.dqx_data = np.sqrt((dqx_data * cos_th) * \ 341 341 (dqx_data * cos_th) \ 342 342 + (dqy_data * sin_th) * \ 343 343 (dqy_data * sin_th)) 344 output.dqy_data = n umpy.sqrt((dqx_data * sin_th) * \344 output.dqy_data = np.sqrt((dqx_data * sin_th) * \ 345 345 (dqx_data * sin_th) \ 346 346 + (dqy_data * cos_th) * \ -
src/sas/sascalc/dataloader/readers/sesans_reader.py
r345e7e4 r9a5097c 6 6 Jurrian Bakker 7 7 """ 8 import numpy 8 import numpy as np 9 9 import os 10 from sas.sascalc.dataloader.data_info import SESANSData1D10 from sas.sascalc.dataloader.data_info import Data1D 11 11 12 12 # Check whether we have a converter available … … 59 59 raise RuntimeError, "sesans_reader: cannot open %s" % path 60 60 buff = input_f.read() 61 # print buff62 61 lines = buff.splitlines() 63 # print lines 64 #Jae could not find python universal line spliter: 65 #keep the below for now 66 # some ascii data has \r line separator, 67 # try it when the data is on only one long line 68 # if len(lines) < 2 : 69 # lines = buff.split('\r') 70 71 x = numpy.zeros(0) 72 y = numpy.zeros(0) 73 dy = numpy.zeros(0) 74 lam = numpy.zeros(0) 75 dlam = numpy.zeros(0) 76 dx = numpy.zeros(0) 62 x = np.zeros(0) 63 y = np.zeros(0) 64 dy = np.zeros(0) 65 lam = np.zeros(0) 66 dlam = np.zeros(0) 67 dx = np.zeros(0) 77 68 78 69 #temp. space to sort data 79 tx = numpy.zeros(0) 80 ty = numpy.zeros(0) 81 tdy = numpy.zeros(0) 82 tlam = numpy.zeros(0) 83 tdlam = numpy.zeros(0) 84 tdx = numpy.zeros(0) 85 # print "all good" 86 output = SESANSData1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam) 87 # print output 70 tx = np.zeros(0) 71 ty = np.zeros(0) 72 tdy = np.zeros(0) 73 tlam = np.zeros(0) 74 tdlam = np.zeros(0) 75 tdx = np.zeros(0) 76 output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, isSesans=True) 88 77 self.filename = output.filename = basename 89 78 90 # #Initialize counters for data lines and header lines.91 # is_data = False # Has more than 5 lines92 # # More than "5" lines of data is considered as actual93 # # data unless that is the only data94 # mum_data_lines = 595 # # To count # of current data candidate lines96 # i = -197 # # To count total # of previous data candidate lines98 # i1 = -199 # # To count # of header lines100 # j = -1101 # # Helps to count # of header lines102 # j1 = -1103 # #minimum required number of columns of data; ( <= 4).104 # lentoks = 2105 79 paramnames=[] 106 80 paramvals=[] … … 111 85 Pvals=[] 112 86 dPvals=[] 113 # print x 114 # print zvals 87 115 88 for line in lines: 116 89 # Initial try for CSV (split on ,) … … 122 95 if len(toks)>5: 123 96 zvals.append(toks[0]) 124 dzvals.append(toks[ 1])125 lamvals.append(toks[ 2])126 dlamvals.append(toks[ 3])127 Pvals.append(toks[ 4])128 dPvals.append(toks[ 5])97 dzvals.append(toks[3]) 98 lamvals.append(toks[4]) 99 dlamvals.append(toks[5]) 100 Pvals.append(toks[1]) 101 dPvals.append(toks[2]) 129 102 else: 130 103 continue … … 140 113 default_z_unit = "A" 141 114 data_conv_P = None 142 default_p_unit = " " 115 default_p_unit = " " # Adjust unit for axis (L^-3) 143 116 lam_unit = lam_header[1].replace("[","").replace("]","") 117 if lam_unit == 'AA': 118 lam_unit = 'A' 144 119 varheader=[zvals[0],dzvals[0],lamvals[0],dlamvals[0],Pvals[0],dPvals[0]] 145 120 valrange=range(1, len(zvals)) … … 153 128 154 129 x,y,lam,dy,dx,dlam = [ 155 numpy.asarray(v, 'double')130 np.asarray(v, 'double') 156 131 for v in (x,y,lam,dy,dx,dlam) 157 132 ] … … 161 136 output.x, output.x_unit = self._unit_conversion(x, lam_unit, default_z_unit) 162 137 output.y = y 138 output.y_unit = r'\AA^{-2} cm^{-1}' # output y_unit added 163 139 output.dx, output.dx_unit = self._unit_conversion(dx, lam_unit, default_z_unit) 164 140 output.dy = dy 165 141 output.lam, output.lam_unit = self._unit_conversion(lam, lam_unit, default_z_unit) 166 142 output.dlam, output.dlam_unit = self._unit_conversion(dlam, lam_unit, default_z_unit) 143 144 output.xaxis(r"\rm{z}", output.x_unit) 145 output.yaxis(r"\rm{ln(P)/(t \lambda^2)}", output.y_unit) # Adjust label to ln P/(lam^2 t), remove lam column refs 167 146 168 output.xaxis("\rm{z}", output.x_unit)169 output.yaxis("\\rm{P/P0}", output.y_unit)170 147 # Store loading process information 171 148 output.meta_data['loader'] = self.type_name 172 output.sample.thickness = float(paramvals[6])149 #output.sample.thickness = float(paramvals[6]) 173 150 output.sample.name = paramvals[1] 174 151 output.sample.ID = paramvals[0] 175 152 zaccept_unit_split = paramnames[7].split("[") 176 153 zaccept_unit = zaccept_unit_split[1].replace("]","") 177 if zaccept_unit.strip() == '\AA^-1':154 if zaccept_unit.strip() == r'\AA^-1' or zaccept_unit.strip() == r'\A^-1': 178 155 zaccept_unit = "1/A" 179 156 output.sample.zacceptance=(float(paramvals[7]),zaccept_unit) 180 output.vars =varheader157 output.vars = varheader 181 158 182 159 if len(output.x) < 1: -
src/sas/sascalc/dataloader/readers/tiff_reader.py
rb699768 r9a5097c 13 13 import logging 14 14 import os 15 import numpy 15 import numpy as np 16 16 from sas.sascalc.dataloader.data_info import Data2D 17 17 from sas.sascalc.dataloader.manipulations import reader2D_converter … … 56 56 57 57 # Initiazed the output data object 58 output.data = n umpy.zeros([im.size[0], im.size[1]])59 output.err_data = n umpy.zeros([im.size[0], im.size[1]])60 output.mask = n umpy.ones([im.size[0], im.size[1]], dtype=bool)58 output.data = np.zeros([im.size[0], im.size[1]]) 59 output.err_data = np.zeros([im.size[0], im.size[1]]) 60 output.mask = np.ones([im.size[0], im.size[1]], dtype=bool) 61 61 62 62 # Initialize … … 94 94 output.x_bins = x_vals 95 95 output.y_bins = y_vals 96 output.qx_data = n umpy.array(x_vals)97 output.qy_data = n umpy.array(y_vals)96 output.qx_data = np.array(x_vals) 97 output.qy_data = np.array(y_vals) 98 98 output.xmin = 0 99 99 output.xmax = im.size[0] - 1
Note: See TracChangeset
for help on using the changeset viewer.