Changeset 6a455cd3 in sasview for src/sas/sascalc/dataloader
- Timestamp:
- Jul 24, 2017 10:27:05 AM (8 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, magnetic_scatt, release-4.2.2, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- 146c669
- Parents:
- b61bd57 (diff), bc04647 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Location:
- src/sas/sascalc/dataloader
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/data_info.py
r2ffe241 ra1b8fee 16 16 ###################################################################### 17 17 18 from __future__ import print_function 18 19 19 20 #TODO: Keep track of data manipulation in the 'process' data structure. … … 23 24 #from sas.guitools.plottables import Data1D as plottable_1D 24 25 from sas.sascalc.data_util.uncertainty import Uncertainty 25 import numpy 26 import numpy as np 26 27 import math 27 28 … … 51 52 52 53 def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None): 53 self.x = n umpy.asarray(x)54 self.y = n umpy.asarray(y)54 self.x = np.asarray(x) 55 self.y = np.asarray(y) 55 56 if dx is not None: 56 self.dx = n umpy.asarray(dx)57 self.dx = np.asarray(dx) 57 58 if dy is not None: 58 self.dy = n umpy.asarray(dy)59 self.dy = np.asarray(dy) 59 60 if dxl is not None: 60 self.dxl = n umpy.asarray(dxl)61 self.dxl = np.asarray(dxl) 61 62 if dxw is not None: 62 self.dxw = n umpy.asarray(dxw)63 self.dxw = np.asarray(dxw) 63 64 if lam is not None: 64 self.lam = n umpy.asarray(lam)65 self.lam = np.asarray(lam) 65 66 if dlam is not None: 66 self.dlam = n umpy.asarray(dlam)67 self.dlam = np.asarray(dlam) 67 68 68 69 def xaxis(self, label, unit): … … 109 110 qy_data=None, q_data=None, mask=None, 110 111 dqx_data=None, dqy_data=None): 111 self.data = n umpy.asarray(data)112 self.qx_data = n umpy.asarray(qx_data)113 self.qy_data = n umpy.asarray(qy_data)114 self.q_data = n umpy.asarray(q_data)115 self.mask = n umpy.asarray(mask)116 self.err_data = n umpy.asarray(err_data)112 self.data = np.asarray(data) 113 self.qx_data = np.asarray(qx_data) 114 self.qy_data = np.asarray(qy_data) 115 self.q_data = np.asarray(q_data) 116 self.mask = np.asarray(mask) 117 self.err_data = np.asarray(err_data) 117 118 if dqx_data is not None: 118 self.dqx_data = n umpy.asarray(dqx_data)119 self.dqx_data = np.asarray(dqx_data) 119 120 if dqy_data is not None: 120 self.dqy_data = n umpy.asarray(dqy_data)121 self.dqy_data = np.asarray(dqy_data) 121 122 122 123 def xaxis(self, label, unit): … … 353 354 details = None 354 355 ## SESANS zacceptance 355 zacceptance = None 356 zacceptance = (0,"") 357 yacceptance = (0,"") 356 358 357 359 def __init__(self): … … 734 736 """ 735 737 def _check(v): 736 if (v.__class__ == list or v.__class__ == n umpy.ndarray) \738 if (v.__class__ == list or v.__class__ == np.ndarray) \ 737 739 and len(v) > 0 and min(v) > 0: 738 740 return True … … 752 754 753 755 if clone is None or not issubclass(clone.__class__, Data1D): 754 x = n umpy.zeros(length)755 dx = n umpy.zeros(length)756 y = n umpy.zeros(length)757 dy = n umpy.zeros(length)758 lam = n umpy.zeros(length)759 dlam = n umpy.zeros(length)756 x = np.zeros(length) 757 dx = np.zeros(length) 758 y = np.zeros(length) 759 dy = np.zeros(length) 760 lam = np.zeros(length) 761 dlam = np.zeros(length) 760 762 clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam) 761 763 … … 805 807 # create zero vector 806 808 dy_other = other.dy 807 if other.dy ==None or (len(other.dy) != len(other.y)):808 dy_other = n umpy.zeros(len(other.y))809 if other.dy is None or (len(other.dy) != len(other.y)): 810 dy_other = np.zeros(len(other.y)) 809 811 810 812 # Check that we have errors, otherwise create zero vector 811 813 dy = self.dy 812 if self.dy ==None or (len(self.dy) != len(self.y)):813 dy = n umpy.zeros(len(self.y))814 if self.dy is None or (len(self.dy) != len(self.y)): 815 dy = np.zeros(len(self.y)) 814 816 815 817 return dy, dy_other … … 821 823 dy, dy_other = self._validity_check(other) 822 824 result = self.clone_without_data(len(self.x)) 823 if self.dxw ==None:825 if self.dxw is None: 824 826 result.dxw = None 825 827 else: 826 result.dxw = n umpy.zeros(len(self.x))827 if self.dxl ==None:828 result.dxw = np.zeros(len(self.x)) 829 if self.dxl is None: 828 830 result.dxl = None 829 831 else: 830 result.dxl = n umpy.zeros(len(self.x))832 result.dxl = np.zeros(len(self.x)) 831 833 832 834 for i in range(len(self.x)): … … 883 885 self._validity_check_union(other) 884 886 result = self.clone_without_data(len(self.x) + len(other.x)) 885 if self.dy ==None or other.dy is None:887 if self.dy is None or other.dy is None: 886 888 result.dy = None 887 889 else: 888 result.dy = n umpy.zeros(len(self.x) + len(other.x))889 if self.dx ==None or other.dx is None:890 result.dy = np.zeros(len(self.x) + len(other.x)) 891 if self.dx is None or other.dx is None: 890 892 result.dx = None 891 893 else: 892 result.dx = n umpy.zeros(len(self.x) + len(other.x))893 if self.dxw ==None or other.dxw is None:894 result.dx = np.zeros(len(self.x) + len(other.x)) 895 if self.dxw is None or other.dxw is None: 894 896 result.dxw = None 895 897 else: 896 result.dxw = n umpy.zeros(len(self.x) + len(other.x))897 if self.dxl ==None or other.dxl is None:898 result.dxw = np.zeros(len(self.x) + len(other.x)) 899 if self.dxl is None or other.dxl is None: 898 900 result.dxl = None 899 901 else: 900 result.dxl = n umpy.zeros(len(self.x) + len(other.x))901 902 result.x = n umpy.append(self.x, other.x)902 result.dxl = np.zeros(len(self.x) + len(other.x)) 903 904 result.x = np.append(self.x, other.x) 903 905 #argsorting 904 ind = n umpy.argsort(result.x)906 ind = np.argsort(result.x) 905 907 result.x = result.x[ind] 906 result.y = n umpy.append(self.y, other.y)908 result.y = np.append(self.y, other.y) 907 909 result.y = result.y[ind] 908 if result.dy !=None:909 result.dy = n umpy.append(self.dy, other.dy)910 if result.dy is not None: 911 result.dy = np.append(self.dy, other.dy) 910 912 result.dy = result.dy[ind] 911 913 if result.dx is not None: 912 result.dx = n umpy.append(self.dx, other.dx)914 result.dx = np.append(self.dx, other.dx) 913 915 result.dx = result.dx[ind] 914 916 if result.dxw is not None: 915 result.dxw = n umpy.append(self.dxw, other.dxw)917 result.dxw = np.append(self.dxw, other.dxw) 916 918 result.dxw = result.dxw[ind] 917 919 if result.dxl is not None: 918 result.dxl = n umpy.append(self.dxl, other.dxl)920 result.dxl = np.append(self.dxl, other.dxl) 919 921 result.dxl = result.dxl[ind] 920 922 return result … … 970 972 971 973 if clone is None or not issubclass(clone.__class__, Data2D): 972 data = n umpy.zeros(length)973 err_data = n umpy.zeros(length)974 qx_data = n umpy.zeros(length)975 qy_data = n umpy.zeros(length)976 q_data = n umpy.zeros(length)977 mask = n umpy.zeros(length)974 data = np.zeros(length) 975 err_data = np.zeros(length) 976 qx_data = np.zeros(length) 977 qy_data = np.zeros(length) 978 q_data = np.zeros(length) 979 mask = np.zeros(length) 978 980 dqx_data = None 979 981 dqy_data = None … … 1029 1031 # Check that the scales match 1030 1032 err_other = other.err_data 1031 if other.err_data ==None or \1033 if other.err_data is None or \ 1032 1034 (len(other.err_data) != len(other.data)): 1033 err_other = n umpy.zeros(len(other.data))1035 err_other = np.zeros(len(other.data)) 1034 1036 1035 1037 # Check that we have errors, otherwise create zero vector 1036 1038 err = self.err_data 1037 if self.err_data ==None or \1039 if self.err_data is None or \ 1038 1040 (len(self.err_data) != len(self.data)): 1039 err = n umpy.zeros(len(other.data))1041 err = np.zeros(len(other.data)) 1040 1042 return err, err_other 1041 1043 … … 1049 1051 # First, check the data compatibility 1050 1052 dy, dy_other = self._validity_check(other) 1051 result = self.clone_without_data(n umpy.size(self.data))1052 if self.dqx_data == None or self.dqy_data ==None:1053 result = self.clone_without_data(np.size(self.data)) 1054 if self.dqx_data is None or self.dqy_data is None: 1053 1055 result.dqx_data = None 1054 1056 result.dqy_data = None 1055 1057 else: 1056 result.dqx_data = n umpy.zeros(len(self.data))1057 result.dqy_data = n umpy.zeros(len(self.data))1058 for i in range(n umpy.size(self.data)):1058 result.dqx_data = np.zeros(len(self.data)) 1059 result.dqy_data = np.zeros(len(self.data)) 1060 for i in range(np.size(self.data)): 1059 1061 result.data[i] = self.data[i] 1060 1062 if self.err_data is not None and \ 1061 numpy.size(self.data) == numpy.size(self.err_data):1063 np.size(self.data) == np.size(self.err_data): 1062 1064 result.err_data[i] = self.err_data[i] 1063 1065 if self.dqx_data is not None: … … 1118 1120 # First, check the data compatibility 1119 1121 self._validity_check_union(other) 1120 result = self.clone_without_data(n umpy.size(self.data) + \1121 n umpy.size(other.data))1122 result = self.clone_without_data(np.size(self.data) + \ 1123 np.size(other.data)) 1122 1124 result.xmin = self.xmin 1123 1125 result.xmax = self.xmax 1124 1126 result.ymin = self.ymin 1125 1127 result.ymax = self.ymax 1126 if self.dqx_data == None or self.dqy_data ==None or \1127 other.dqx_data == None or other.dqy_data ==None:1128 if self.dqx_data is None or self.dqy_data is None or \ 1129 other.dqx_data is None or other.dqy_data is None: 1128 1130 result.dqx_data = None 1129 1131 result.dqy_data = None 1130 1132 else: 1131 result.dqx_data = n umpy.zeros(len(self.data) + \1132 numpy.size(other.data))1133 result.dqy_data = n umpy.zeros(len(self.data) + \1134 numpy.size(other.data))1135 1136 result.data = n umpy.append(self.data, other.data)1137 result.qx_data = n umpy.append(self.qx_data, other.qx_data)1138 result.qy_data = n umpy.append(self.qy_data, other.qy_data)1139 result.q_data = n umpy.append(self.q_data, other.q_data)1140 result.mask = n umpy.append(self.mask, other.mask)1133 result.dqx_data = np.zeros(len(self.data) + \ 1134 np.size(other.data)) 1135 result.dqy_data = np.zeros(len(self.data) + \ 1136 np.size(other.data)) 1137 1138 result.data = np.append(self.data, other.data) 1139 result.qx_data = np.append(self.qx_data, other.qx_data) 1140 result.qy_data = np.append(self.qy_data, other.qy_data) 1141 result.q_data = np.append(self.q_data, other.q_data) 1142 result.mask = np.append(self.mask, other.mask) 1141 1143 if result.err_data is not None: 1142 result.err_data = n umpy.append(self.err_data, other.err_data)1144 result.err_data = np.append(self.err_data, other.err_data) 1143 1145 if self.dqx_data is not None: 1144 result.dqx_data = n umpy.append(self.dqx_data, other.dqx_data)1146 result.dqx_data = np.append(self.dqx_data, other.dqx_data) 1145 1147 if self.dqy_data is not None: 1146 result.dqy_data = n umpy.append(self.dqy_data, other.dqy_data)1148 result.dqy_data = np.append(self.dqy_data, other.dqy_data) 1147 1149 1148 1150 return result -
src/sas/sascalc/dataloader/loader.py
rb699768 r463e7ffc 32 32 from readers import cansas_reader 33 33 34 logger = logging.getLogger(__name__) 35 34 36 class Registry(ExtensionRegistry): 35 37 """ … … 99 101 msg = "DataLoader couldn't locate DataLoader plugin folder." 100 102 msg += """ "%s" does not exist""" % dir 101 logg ing.warning(msg)103 logger.warning(msg) 102 104 return readers_found 103 105 … … 117 119 msg = "Loader: Error importing " 118 120 msg += "%s\n %s" % (item, sys.exc_value) 119 logg ing.error(msg)121 logger.error(msg) 120 122 121 123 # Process zip files … … 139 141 msg = "Loader: Error importing" 140 142 msg += " %s\n %s" % (mfile, sys.exc_value) 141 logg ing.error(msg)143 logger.error(msg) 142 144 143 145 except: 144 146 msg = "Loader: Error importing " 145 147 msg += " %s\n %s" % (item, sys.exc_value) 146 logg ing.error(msg)148 logger.error(msg) 147 149 148 150 return readers_found … … 190 192 msg = "Loader: Error accessing" 191 193 msg += " Reader in %s\n %s" % (module.__name__, sys.exc_value) 192 logg ing.error(msg)194 logger.error(msg) 193 195 return reader_found 194 196 … … 223 225 msg = "Loader: Error accessing Reader " 224 226 msg += "in %s\n %s" % (loader.__name__, sys.exc_value) 225 logg ing.error(msg)227 logger.error(msg) 226 228 return reader_found 227 229 … … 268 270 msg = "Loader: Error accessing Reader" 269 271 msg += " in %s\n %s" % (module.__name__, sys.exc_value) 270 logg ing.error(msg)272 logger.error(msg) 271 273 return reader_found 272 274 -
src/sas/sascalc/dataloader/manipulations.py
rb2b36932 r324e0bf 1 from __future__ import division 1 2 """ 2 3 Data manipulations for 2D data sets. 3 4 Using the meta data information, various types of averaging 4 5 are performed in Q-space 6 7 To test this module use: 8 ``` 9 cd test 10 PYTHONPATH=../src/ python2 -m sasdataloader.test.utest_averaging DataInfoTests.test_sectorphi_quarter 11 ``` 5 12 """ 6 13 ##################################################################### 7 # This software was developed by the University of Tennessee as part of the8 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)9 # project funded by the US National Science Foundation.10 # See the license text in license.txt11 # copyright 2008, University of Tennessee14 # This software was developed by the University of Tennessee as part of the 15 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 16 # project funded by the US National Science Foundation. 17 # See the license text in license.txt 18 # copyright 2008, University of Tennessee 12 19 ###################################################################### 13 20 14 #TODO: copy the meta data from the 2D object to the resulting 1D object 21 22 # TODO: copy the meta data from the 2D object to the resulting 1D object 15 23 import math 16 import numpy 24 import numpy as np 25 import sys 17 26 18 27 #from data_info import plottable_2D … … 70 79 return phi_out 71 80 72 73 def reader2D_converter(data2d=None):74 """75 convert old 2d format opened by IhorReader or danse_reader76 to new Data2D format77 78 :param data2d: 2d array of Data2D object79 :return: 1d arrays of Data2D object80 81 """82 if data2d.data == None or data2d.x_bins == None or data2d.y_bins == None:83 raise ValueError, "Can't convert this data: data=None..."84 new_x = numpy.tile(data2d.x_bins, (len(data2d.y_bins), 1))85 new_y = numpy.tile(data2d.y_bins, (len(data2d.x_bins), 1))86 new_y = new_y.swapaxes(0, 1)87 88 new_data = data2d.data.flatten()89 qx_data = new_x.flatten()90 qy_data = new_y.flatten()91 q_data = numpy.sqrt(qx_data * qx_data + qy_data * qy_data)92 if data2d.err_data == None or numpy.any(data2d.err_data <= 0):93 new_err_data = numpy.sqrt(numpy.abs(new_data))94 else:95 new_err_data = data2d.err_data.flatten()96 mask = numpy.ones(len(new_data), dtype=bool)97 98 #TODO: make sense of the following two lines...99 #from sas.sascalc.dataloader.data_info import Data2D100 #output = Data2D()101 output = data2d102 output.data = new_data103 output.err_data = new_err_data104 output.qx_data = qx_data105 output.qy_data = qy_data106 output.q_data = q_data107 output.mask = mask108 109 return output110 111 112 class _Slab(object):113 """114 Compute average I(Q) for a region of interest115 """116 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0,117 y_max=0.0, bin_width=0.001):118 # Minimum Qx value [A-1]119 self.x_min = x_min120 # Maximum Qx value [A-1]121 self.x_max = x_max122 # Minimum Qy value [A-1]123 self.y_min = y_min124 # Maximum Qy value [A-1]125 self.y_max = y_max126 # Bin width (step size) [A-1]127 self.bin_width = bin_width128 # If True, I(|Q|) will be return, otherwise,129 # negative q-values are allowed130 self.fold = False131 132 def __call__(self, data2D):133 return NotImplemented134 135 def _avg(self, data2D, maj):136 """137 Compute average I(Q_maj) for a region of interest.138 The major axis is defined as the axis of Q_maj.139 The minor axis is the axis that we average over.140 141 :param data2D: Data2D object142 :param maj_min: min value on the major axis143 :return: Data1D object144 """145 if len(data2D.detector) > 1:146 msg = "_Slab._avg: invalid number of "147 msg += " detectors: %g" % len(data2D.detector)148 raise RuntimeError, msg149 150 # Get data151 data = data2D.data[numpy.isfinite(data2D.data)]152 err_data = data2D.err_data[numpy.isfinite(data2D.data)]153 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)]154 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)]155 156 # Build array of Q intervals157 if maj == 'x':158 if self.fold:159 x_min = 0160 else:161 x_min = self.x_min162 nbins = int(math.ceil((self.x_max - x_min) / self.bin_width))163 elif maj == 'y':164 if self.fold:165 y_min = 0166 else:167 y_min = self.y_min168 nbins = int(math.ceil((self.y_max - y_min) / self.bin_width))169 else:170 raise RuntimeError, "_Slab._avg: unrecognized axis %s" % str(maj)171 172 x = numpy.zeros(nbins)173 y = numpy.zeros(nbins)174 err_y = numpy.zeros(nbins)175 y_counts = numpy.zeros(nbins)176 177 # Average pixelsize in q space178 for npts in range(len(data)):179 # default frac180 frac_x = 0181 frac_y = 0182 # get ROI183 if self.x_min <= qx_data[npts] and self.x_max > qx_data[npts]:184 frac_x = 1185 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]:186 frac_y = 1187 frac = frac_x * frac_y188 189 if frac == 0:190 continue191 # binning: find axis of q192 if maj == 'x':193 q_value = qx_data[npts]194 min_value = x_min195 if maj == 'y':196 q_value = qy_data[npts]197 min_value = y_min198 if self.fold and q_value < 0:199 q_value = -q_value200 # bin201 i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1202 203 # skip outside of max bins204 if i_q < 0 or i_q >= nbins:205 continue206 207 #TODO: find better definition of x[i_q] based on q_data208 # min_value + (i_q + 1) * self.bin_width / 2.0209 x[i_q] += frac * q_value210 y[i_q] += frac * data[npts]211 212 if err_data == None or err_data[npts] == 0.0:213 if data[npts] < 0:214 data[npts] = -data[npts]215 err_y[i_q] += frac * frac * data[npts]216 else:217 err_y[i_q] += frac * frac * err_data[npts] * err_data[npts]218 y_counts[i_q] += frac219 220 # Average the sums221 for n in range(nbins):222 err_y[n] = math.sqrt(err_y[n])223 224 err_y = err_y / y_counts225 y = y / y_counts226 x = x / y_counts227 idx = (numpy.isfinite(y) & numpy.isfinite(x))228 229 if not idx.any():230 msg = "Average Error: No points inside ROI to average..."231 raise ValueError, msg232 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx])233 234 235 class SlabY(_Slab):236 """237 Compute average I(Qy) for a region of interest238 """239 def __call__(self, data2D):240 """241 Compute average I(Qy) for a region of interest242 243 :param data2D: Data2D object244 :return: Data1D object245 """246 return self._avg(data2D, 'y')247 248 249 class SlabX(_Slab):250 """251 Compute average I(Qx) for a region of interest252 """253 def __call__(self, data2D):254 """255 Compute average I(Qx) for a region of interest256 :param data2D: Data2D object257 :return: Data1D object258 """259 return self._avg(data2D, 'x')260 261 262 class Boxsum(object):263 """264 Perform the sum of counts in a 2D region of interest.265 """266 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0):267 # Minimum Qx value [A-1]268 self.x_min = x_min269 # Maximum Qx value [A-1]270 self.x_max = x_max271 # Minimum Qy value [A-1]272 self.y_min = y_min273 # Maximum Qy value [A-1]274 self.y_max = y_max275 276 def __call__(self, data2D):277 """278 Perform the sum in the region of interest279 280 :param data2D: Data2D object281 :return: number of counts, error on number of counts,282 number of points summed283 """284 y, err_y, y_counts = self._sum(data2D)285 286 # Average the sums287 counts = 0 if y_counts == 0 else y288 error = 0 if y_counts == 0 else math.sqrt(err_y)289 290 # Added y_counts to return, SMK & PDB, 04/03/2013291 return counts, error, y_counts292 293 def _sum(self, data2D):294 """295 Perform the sum in the region of interest296 297 :param data2D: Data2D object298 :return: number of counts,299 error on number of counts, number of entries summed300 """301 if len(data2D.detector) > 1:302 msg = "Circular averaging: invalid number "303 msg += "of detectors: %g" % len(data2D.detector)304 raise RuntimeError, msg305 # Get data306 data = data2D.data[numpy.isfinite(data2D.data)]307 err_data = data2D.err_data[numpy.isfinite(data2D.data)]308 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)]309 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)]310 311 y = 0.0312 err_y = 0.0313 y_counts = 0.0314 315 # Average pixelsize in q space316 for npts in range(len(data)):317 # default frac318 frac_x = 0319 frac_y = 0320 321 # get min and max at each points322 qx = qx_data[npts]323 qy = qy_data[npts]324 325 # get the ROI326 if self.x_min <= qx and self.x_max > qx:327 frac_x = 1328 if self.y_min <= qy and self.y_max > qy:329 frac_y = 1330 #Find the fraction along each directions331 frac = frac_x * frac_y332 if frac == 0:333 continue334 y += frac * data[npts]335 if err_data == None or err_data[npts] == 0.0:336 if data[npts] < 0:337 data[npts] = -data[npts]338 err_y += frac * frac * data[npts]339 else:340 err_y += frac * frac * err_data[npts] * err_data[npts]341 y_counts += frac342 return y, err_y, y_counts343 344 345 class Boxavg(Boxsum):346 """347 Perform the average of counts in a 2D region of interest.348 """349 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0):350 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max,351 y_min=y_min, y_max=y_max)352 353 def __call__(self, data2D):354 """355 Perform the sum in the region of interest356 357 :param data2D: Data2D object358 :return: average counts, error on average counts359 360 """361 y, err_y, y_counts = self._sum(data2D)362 363 # Average the sums364 counts = 0 if y_counts == 0 else y / y_counts365 error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts366 367 return counts, error368 369 370 81 def get_pixel_fraction_square(x, xmin, xmax): 371 82 """ … … 390 101 return 1.0 391 102 392 393 class CircularAverage(object): 394 """ 395 Perform circular averaging on 2D data 396 397 The data returned is the distribution of counts 398 as a function of Q 399 """ 400 def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): 401 # Minimum radius included in the average [A-1] 402 self.r_min = r_min 403 # Maximum radius included in the average [A-1] 404 self.r_max = r_max 405 # Bin width (step size) [A-1] 406 self.bin_width = bin_width 407 408 def __call__(self, data2D, ismask=False): 409 """ 410 Perform circular averaging on the data 411 412 :param data2D: Data2D object 413 :return: Data1D object 414 """ 415 # Get data W/ finite values 416 data = data2D.data[numpy.isfinite(data2D.data)] 417 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 418 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 419 mask_data = data2D.mask[numpy.isfinite(data2D.data)] 420 421 dq_data = None 422 423 # Get the dq for resolution averaging 424 if data2D.dqx_data != None and data2D.dqy_data != None: 425 # The pinholes and det. pix contribution present 426 # in both direction of the 2D which must be subtracted when 427 # converting to 1D: dq_overlap should calculated ideally at 428 # q = 0. Note This method works on only pinhole geometry. 429 # Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. 430 z_max = max(data2D.q_data) 431 z_min = min(data2D.q_data) 432 x_max = data2D.dqx_data[data2D.q_data[z_max]] 433 x_min = data2D.dqx_data[data2D.q_data[z_min]] 434 y_max = data2D.dqy_data[data2D.q_data[z_max]] 435 y_min = data2D.dqy_data[data2D.q_data[z_min]] 436 # Find qdx at q = 0 437 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 438 # when extrapolation goes wrong 439 if dq_overlap_x > min(data2D.dqx_data): 440 dq_overlap_x = min(data2D.dqx_data) 441 dq_overlap_x *= dq_overlap_x 442 # Find qdx at q = 0 443 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 444 # when extrapolation goes wrong 445 if dq_overlap_y > min(data2D.dqy_data): 446 dq_overlap_y = min(data2D.dqy_data) 447 # get dq at q=0. 448 dq_overlap_y *= dq_overlap_y 449 450 dq_overlap = numpy.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 451 # Final protection of dq 452 if dq_overlap < 0: 453 dq_overlap = y_min 454 dqx_data = data2D.dqx_data[numpy.isfinite(data2D.data)] 455 dqy_data = data2D.dqy_data[numpy.isfinite(data2D.data)] - dq_overlap 456 # def; dqx_data = dq_r dqy_data = dq_phi 457 # Convert dq 2D to 1D here 458 dqx = dqx_data * dqx_data 459 dqy = dqy_data * dqy_data 460 dq_data = numpy.add(dqx, dqy) 461 dq_data = numpy.sqrt(dq_data) 462 463 #q_data_max = numpy.max(q_data) 464 if len(data2D.q_data) == None: 465 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 466 raise RuntimeError, msg 467 468 # Build array of Q intervals 469 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 470 471 x = numpy.zeros(nbins) 472 y = numpy.zeros(nbins) 473 err_y = numpy.zeros(nbins) 474 err_x = numpy.zeros(nbins) 475 y_counts = numpy.zeros(nbins) 476 477 for npt in range(len(data)): 478 479 if ismask and not mask_data[npt]: 480 continue 481 482 frac = 0 483 484 # q-value at the pixel (j,i) 485 q_value = q_data[npt] 486 data_n = data[npt] 487 488 ## No need to calculate the frac when all data are within range 489 if self.r_min >= self.r_max: 490 raise ValueError, "Limit Error: min > max" 491 492 if self.r_min <= q_value and q_value <= self.r_max: 493 frac = 1 494 if frac == 0: 495 continue 496 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 497 498 # Take care of the edge case at phi = 2pi. 499 if i_q == nbins: 500 i_q = nbins - 1 501 y[i_q] += frac * data_n 502 # Take dqs from data to get the q_average 503 x[i_q] += frac * q_value 504 if err_data == None or err_data[npt] == 0.0: 505 if data_n < 0: 506 data_n = -data_n 507 err_y[i_q] += frac * frac * data_n 508 else: 509 err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] 510 if dq_data != None: 511 # To be consistent with dq calculation in 1d reduction, 512 # we need just the averages (not quadratures) because 513 # it should not depend on the number of the q points 514 # in the qr bins. 515 err_x[i_q] += frac * dq_data[npt] 516 else: 517 err_x = None 518 y_counts[i_q] += frac 519 520 # Average the sums 521 for n in range(nbins): 522 if err_y[n] < 0: 523 err_y[n] = -err_y[n] 524 err_y[n] = math.sqrt(err_y[n]) 525 #if err_x != None: 526 # err_x[n] = math.sqrt(err_x[n]) 527 528 err_y = err_y / y_counts 529 err_y[err_y == 0] = numpy.average(err_y) 530 y = y / y_counts 531 x = x / y_counts 532 idx = (numpy.isfinite(y)) & (numpy.isfinite(x)) 533 534 if err_x != None: 535 d_x = err_x[idx] / y_counts[idx] 536 else: 537 d_x = None 538 539 if not idx.any(): 540 msg = "Average Error: No points inside ROI to average..." 541 raise ValueError, msg 542 543 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) 544 545 546 class Ring(object): 547 """ 548 Defines a ring on a 2D data set. 549 The ring is defined by r_min, r_max, and 550 the position of the center of the ring. 551 552 The data returned is the distribution of counts 553 around the ring as a function of phi. 554 555 Phi_min and phi_max should be defined between 0 and 2*pi 556 in anti-clockwise starting from the x- axis on the left-hand side 557 """ 558 #Todo: remove center. 559 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): 560 # Minimum radius 561 self.r_min = r_min 562 # Maximum radius 563 self.r_max = r_max 564 # Center of the ring in x 565 self.center_x = center_x 566 # Center of the ring in y 567 self.center_y = center_y 568 # Number of angular bins 569 self.nbins_phi = nbins 570 571 572 def __call__(self, data2D): 573 """ 574 Apply the ring to the data set. 575 Returns the angular distribution for a given q range 576 577 :param data2D: Data2D object 578 579 :return: Data1D object 580 """ 581 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 582 raise RuntimeError, "Ring averaging only take plottable_2D objects" 583 584 Pi = math.pi 585 586 # Get data 587 data = data2D.data[numpy.isfinite(data2D.data)] 588 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 589 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 590 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 591 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 592 593 # Set space for 1d outputs 594 phi_bins = numpy.zeros(self.nbins_phi) 595 phi_counts = numpy.zeros(self.nbins_phi) 596 phi_values = numpy.zeros(self.nbins_phi) 597 phi_err = numpy.zeros(self.nbins_phi) 598 599 # Shift to apply to calculated phi values in order 600 # to center first bin at zero 601 phi_shift = Pi / self.nbins_phi 602 603 for npt in range(len(data)): 604 frac = 0 605 # q-value at the point (npt) 606 q_value = q_data[npt] 607 data_n = data[npt] 608 609 # phi-value at the point (npt) 610 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 611 612 if self.r_min <= q_value and q_value <= self.r_max: 613 frac = 1 614 if frac == 0: 615 continue 616 # binning 617 i_phi = int(math.floor((self.nbins_phi) * \ 618 (phi_value + phi_shift) / (2 * Pi))) 619 620 # Take care of the edge case at phi = 2pi. 621 if i_phi >= self.nbins_phi: 622 i_phi = 0 623 phi_bins[i_phi] += frac * data[npt] 624 625 if err_data == None or err_data[npt] == 0.0: 626 if data_n < 0: 627 data_n = -data_n 628 phi_err[i_phi] += frac * frac * math.fabs(data_n) 629 else: 630 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 631 phi_counts[i_phi] += frac 632 633 for i in range(self.nbins_phi): 634 phi_bins[i] = phi_bins[i] / phi_counts[i] 635 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 636 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) 637 638 idx = (numpy.isfinite(phi_bins)) 639 640 if not idx.any(): 641 msg = "Average Error: No points inside ROI to average..." 642 raise ValueError, msg 643 #elif len(phi_bins[idx])!= self.nbins_phi: 644 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 645 #,"empty bin(s) due to tight binning..." 646 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 647 103 def get_intercept(q, q_0, q_1): 104 """ 105 Returns the fraction of the side at which the 106 q-value intercept the pixel, None otherwise. 107 The values returned is the fraction ON THE SIDE 108 OF THE LOWEST Q. :: 109 110 A B 111 +-----------+--------+ <--- pixel size 112 0 1 113 Q_0 -------- Q ----- Q_1 <--- equivalent Q range 114 if Q_1 > Q_0, A is returned 115 if Q_1 < Q_0, B is returned 116 if Q is outside the range of [Q_0, Q_1], None is returned 117 118 """ 119 if q_1 > q_0: 120 if q > q_0 and q <= q_1: 121 return (q - q_0) / (q_1 - q_0) 122 else: 123 if q > q_1 and q <= q_0: 124 return (q - q_1) / (q_0 - q_1) 125 return None 648 126 649 127 def get_pixel_fraction(qmax, q_00, q_01, q_10, q_11): … … 710 188 return frac_max 711 189 712 713 def get_intercept(q, q_0, q_1): 714 """ 715 Returns the fraction of the side at which the 716 q-value intercept the pixel, None otherwise. 717 The values returned is the fraction ON THE SIDE 718 OF THE LOWEST Q. :: 719 720 A B 721 +-----------+--------+ <--- pixel size 722 0 1 723 Q_0 -------- Q ----- Q_1 <--- equivalent Q range 724 if Q_1 > Q_0, A is returned 725 if Q_1 < Q_0, B is returned 726 if Q is outside the range of [Q_0, Q_1], None is returned 727 728 """ 729 if q_1 > q_0: 730 if q > q_0 and q <= q_1: 731 return (q - q_0) / (q_1 - q_0) 190 def get_dq_data(data2D): 191 ''' 192 Get the dq for resolution averaging 193 The pinholes and det. pix contribution present 194 in both direction of the 2D which must be subtracted when 195 converting to 1D: dq_overlap should calculated ideally at 196 q = 0. Note This method works on only pinhole geometry. 197 Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. 198 ''' 199 z_max = max(data2D.q_data) 200 z_min = min(data2D.q_data) 201 dqx_at_z_max = data2D.dqx_data[np.argmax(data2D.q_data)] 202 dqx_at_z_min = data2D.dqx_data[np.argmin(data2D.q_data)] 203 dqy_at_z_max = data2D.dqy_data[np.argmax(data2D.q_data)] 204 dqy_at_z_min = data2D.dqy_data[np.argmin(data2D.q_data)] 205 # Find qdx at q = 0 206 dq_overlap_x = (dqx_at_z_min * z_max - dqx_at_z_max * z_min) / (z_max - z_min) 207 # when extrapolation goes wrong 208 if dq_overlap_x > min(data2D.dqx_data): 209 dq_overlap_x = min(data2D.dqx_data) 210 dq_overlap_x *= dq_overlap_x 211 # Find qdx at q = 0 212 dq_overlap_y = (dqy_at_z_min * z_max - dqy_at_z_max * z_min) / (z_max - z_min) 213 # when extrapolation goes wrong 214 if dq_overlap_y > min(data2D.dqy_data): 215 dq_overlap_y = min(data2D.dqy_data) 216 # get dq at q=0. 217 dq_overlap_y *= dq_overlap_y 218 219 dq_overlap = np.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 220 # Final protection of dq 221 if dq_overlap < 0: 222 dq_overlap = dqy_at_z_min 223 dqx_data = data2D.dqx_data[np.isfinite(data2D.data)] 224 dqy_data = data2D.dqy_data[np.isfinite( 225 data2D.data)] - dq_overlap 226 # def; dqx_data = dq_r dqy_data = dq_phi 227 # Convert dq 2D to 1D here 228 dq_data = np.sqrt(dqx_data**2 + dqx_data**2) 229 return dq_data 230 231 ################################################################################ 232 233 def reader2D_converter(data2d=None): 234 """ 235 convert old 2d format opened by IhorReader or danse_reader 236 to new Data2D format 237 This is mainly used by the Readers 238 239 :param data2d: 2d array of Data2D object 240 :return: 1d arrays of Data2D object 241 242 """ 243 if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: 244 raise ValueError("Can't convert this data: data=None...") 245 new_x = np.tile(data2d.x_bins, (len(data2d.y_bins), 1)) 246 new_y = np.tile(data2d.y_bins, (len(data2d.x_bins), 1)) 247 new_y = new_y.swapaxes(0, 1) 248 249 new_data = data2d.data.flatten() 250 qx_data = new_x.flatten() 251 qy_data = new_y.flatten() 252 q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) 253 if data2d.err_data is None or np.any(data2d.err_data <= 0): 254 new_err_data = np.sqrt(np.abs(new_data)) 732 255 else: 733 if q > q_1 and q <= q_0: 734 return (q - q_1) / (q_0 - q_1) 735 return None 256 new_err_data = data2d.err_data.flatten() 257 mask = np.ones(len(new_data), dtype=bool) 258 259 # TODO: make sense of the following two lines... 260 #from sas.sascalc.dataloader.data_info import Data2D 261 #output = Data2D() 262 output = data2d 263 output.data = new_data 264 output.err_data = new_err_data 265 output.qx_data = qx_data 266 output.qy_data = qy_data 267 output.q_data = q_data 268 output.mask = mask 269 270 return output 271 272 ################################################################################ 273 274 class Binning(object): 275 ''' 276 This class just creates a binning object 277 either linear or log 278 ''' 279 280 def __init__(self, min_value, max_value, n_bins, base=None): 281 ''' 282 if base is None: Linear binning 283 ''' 284 self.min = min_value if min_value > 0 else 0.0001 285 self.max = max_value 286 self.n_bins = n_bins 287 self.base = base 288 289 def get_bin_index(self, value): 290 ''' 291 The general formula logarithm binning is: 292 bin = floor(N * (log(x) - log(min)) / (log(max) - log(min))) 293 ''' 294 if self.base: 295 temp_x = self.n_bins * (math.log(value, self.base) - math.log(self.min, self.base)) 296 temp_y = math.log(self.max, self.base) - math.log(self.min, self.base) 297 else: 298 temp_x = self.n_bins * (value - self.min) 299 temp_y = self.max - self.min 300 # Bin index calulation 301 return int(math.floor(temp_x / temp_y)) 302 303 304 ################################################################################ 305 306 class _Slab(object): 307 """ 308 Compute average I(Q) for a region of interest 309 """ 310 311 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, 312 y_max=0.0, bin_width=0.001): 313 # Minimum Qx value [A-1] 314 self.x_min = x_min 315 # Maximum Qx value [A-1] 316 self.x_max = x_max 317 # Minimum Qy value [A-1] 318 self.y_min = y_min 319 # Maximum Qy value [A-1] 320 self.y_max = y_max 321 # Bin width (step size) [A-1] 322 self.bin_width = bin_width 323 # If True, I(|Q|) will be return, otherwise, 324 # negative q-values are allowed 325 self.fold = False 326 327 def __call__(self, data2D): 328 return NotImplemented 329 330 def _avg(self, data2D, maj): 331 """ 332 Compute average I(Q_maj) for a region of interest. 333 The major axis is defined as the axis of Q_maj. 334 The minor axis is the axis that we average over. 335 336 :param data2D: Data2D object 337 :param maj_min: min value on the major axis 338 :return: Data1D object 339 """ 340 if len(data2D.detector) > 1: 341 msg = "_Slab._avg: invalid number of " 342 msg += " detectors: %g" % len(data2D.detector) 343 raise RuntimeError(msg) 344 345 # Get data 346 data = data2D.data[np.isfinite(data2D.data)] 347 err_data = data2D.err_data[np.isfinite(data2D.data)] 348 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 349 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 350 351 # Build array of Q intervals 352 if maj == 'x': 353 if self.fold: 354 x_min = 0 355 else: 356 x_min = self.x_min 357 nbins = int(math.ceil((self.x_max - x_min) / self.bin_width)) 358 elif maj == 'y': 359 if self.fold: 360 y_min = 0 361 else: 362 y_min = self.y_min 363 nbins = int(math.ceil((self.y_max - y_min) / self.bin_width)) 364 else: 365 raise RuntimeError("_Slab._avg: unrecognized axis %s" % str(maj)) 366 367 x = np.zeros(nbins) 368 y = np.zeros(nbins) 369 err_y = np.zeros(nbins) 370 y_counts = np.zeros(nbins) 371 372 # Average pixelsize in q space 373 for npts in range(len(data)): 374 # default frac 375 frac_x = 0 376 frac_y = 0 377 # get ROI 378 if self.x_min <= qx_data[npts] and self.x_max > qx_data[npts]: 379 frac_x = 1 380 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]: 381 frac_y = 1 382 frac = frac_x * frac_y 383 384 if frac == 0: 385 continue 386 # binning: find axis of q 387 if maj == 'x': 388 q_value = qx_data[npts] 389 min_value = x_min 390 if maj == 'y': 391 q_value = qy_data[npts] 392 min_value = y_min 393 if self.fold and q_value < 0: 394 q_value = -q_value 395 # bin 396 i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1 397 398 # skip outside of max bins 399 if i_q < 0 or i_q >= nbins: 400 continue 401 402 # TODO: find better definition of x[i_q] based on q_data 403 # min_value + (i_q + 1) * self.bin_width / 2.0 404 x[i_q] += frac * q_value 405 y[i_q] += frac * data[npts] 406 407 if err_data is None or err_data[npts] == 0.0: 408 if data[npts] < 0: 409 data[npts] = -data[npts] 410 err_y[i_q] += frac * frac * data[npts] 411 else: 412 err_y[i_q] += frac * frac * err_data[npts] * err_data[npts] 413 y_counts[i_q] += frac 414 415 # Average the sums 416 for n in range(nbins): 417 err_y[n] = math.sqrt(err_y[n]) 418 419 err_y = err_y / y_counts 420 y = y / y_counts 421 x = x / y_counts 422 idx = (np.isfinite(y) & np.isfinite(x)) 423 424 if not idx.any(): 425 msg = "Average Error: No points inside ROI to average..." 426 raise ValueError(msg) 427 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) 428 429 430 class SlabY(_Slab): 431 """ 432 Compute average I(Qy) for a region of interest 433 """ 434 435 def __call__(self, data2D): 436 """ 437 Compute average I(Qy) for a region of interest 438 439 :param data2D: Data2D object 440 :return: Data1D object 441 """ 442 return self._avg(data2D, 'y') 443 444 445 class SlabX(_Slab): 446 """ 447 Compute average I(Qx) for a region of interest 448 """ 449 450 def __call__(self, data2D): 451 """ 452 Compute average I(Qx) for a region of interest 453 :param data2D: Data2D object 454 :return: Data1D object 455 """ 456 return self._avg(data2D, 'x') 457 458 ################################################################################ 459 460 class Boxsum(object): 461 """ 462 Perform the sum of counts in a 2D region of interest. 463 """ 464 465 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 466 # Minimum Qx value [A-1] 467 self.x_min = x_min 468 # Maximum Qx value [A-1] 469 self.x_max = x_max 470 # Minimum Qy value [A-1] 471 self.y_min = y_min 472 # Maximum Qy value [A-1] 473 self.y_max = y_max 474 475 def __call__(self, data2D): 476 """ 477 Perform the sum in the region of interest 478 479 :param data2D: Data2D object 480 :return: number of counts, error on number of counts, 481 number of points summed 482 """ 483 y, err_y, y_counts = self._sum(data2D) 484 485 # Average the sums 486 counts = 0 if y_counts == 0 else y 487 error = 0 if y_counts == 0 else math.sqrt(err_y) 488 489 # Added y_counts to return, SMK & PDB, 04/03/2013 490 return counts, error, y_counts 491 492 def _sum(self, data2D): 493 """ 494 Perform the sum in the region of interest 495 496 :param data2D: Data2D object 497 :return: number of counts, 498 error on number of counts, number of entries summed 499 """ 500 if len(data2D.detector) > 1: 501 msg = "Circular averaging: invalid number " 502 msg += "of detectors: %g" % len(data2D.detector) 503 raise RuntimeError(msg) 504 # Get data 505 data = data2D.data[np.isfinite(data2D.data)] 506 err_data = data2D.err_data[np.isfinite(data2D.data)] 507 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 508 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 509 510 y = 0.0 511 err_y = 0.0 512 y_counts = 0.0 513 514 # Average pixelsize in q space 515 for npts in range(len(data)): 516 # default frac 517 frac_x = 0 518 frac_y = 0 519 520 # get min and max at each points 521 qx = qx_data[npts] 522 qy = qy_data[npts] 523 524 # get the ROI 525 if self.x_min <= qx and self.x_max > qx: 526 frac_x = 1 527 if self.y_min <= qy and self.y_max > qy: 528 frac_y = 1 529 # Find the fraction along each directions 530 frac = frac_x * frac_y 531 if frac == 0: 532 continue 533 y += frac * data[npts] 534 if err_data is None or err_data[npts] == 0.0: 535 if data[npts] < 0: 536 data[npts] = -data[npts] 537 err_y += frac * frac * data[npts] 538 else: 539 err_y += frac * frac * err_data[npts] * err_data[npts] 540 y_counts += frac 541 return y, err_y, y_counts 542 543 544 class Boxavg(Boxsum): 545 """ 546 Perform the average of counts in a 2D region of interest. 547 """ 548 549 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 550 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, 551 y_min=y_min, y_max=y_max) 552 553 def __call__(self, data2D): 554 """ 555 Perform the sum in the region of interest 556 557 :param data2D: Data2D object 558 :return: average counts, error on average counts 559 560 """ 561 y, err_y, y_counts = self._sum(data2D) 562 563 # Average the sums 564 counts = 0 if y_counts == 0 else y / y_counts 565 error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts 566 567 return counts, error 568 569 ################################################################################ 570 571 class CircularAverage(object): 572 """ 573 Perform circular averaging on 2D data 574 575 The data returned is the distribution of counts 576 as a function of Q 577 """ 578 579 def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): 580 # Minimum radius included in the average [A-1] 581 self.r_min = r_min 582 # Maximum radius included in the average [A-1] 583 self.r_max = r_max 584 # Bin width (step size) [A-1] 585 self.bin_width = bin_width 586 587 def __call__(self, data2D, ismask=False): 588 """ 589 Perform circular averaging on the data 590 591 :param data2D: Data2D object 592 :return: Data1D object 593 """ 594 # Get data W/ finite values 595 data = data2D.data[np.isfinite(data2D.data)] 596 q_data = data2D.q_data[np.isfinite(data2D.data)] 597 err_data = data2D.err_data[np.isfinite(data2D.data)] 598 mask_data = data2D.mask[np.isfinite(data2D.data)] 599 600 dq_data = None 601 if data2D.dqx_data is not None and data2D.dqy_data is not None: 602 dq_data = get_dq_data(data2D) 603 604 if len(q_data) == 0: 605 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 606 raise RuntimeError(msg) 607 608 # Build array of Q intervals 609 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 610 611 x = np.zeros(nbins) 612 y = np.zeros(nbins) 613 err_y = np.zeros(nbins) 614 err_x = np.zeros(nbins) 615 y_counts = np.zeros(nbins) 616 617 for npt in range(len(data)): 618 619 if ismask and not mask_data[npt]: 620 continue 621 622 frac = 0 623 624 # q-value at the pixel (j,i) 625 q_value = q_data[npt] 626 data_n = data[npt] 627 628 # No need to calculate the frac when all data are within range 629 if self.r_min >= self.r_max: 630 raise ValueError("Limit Error: min > max") 631 632 if self.r_min <= q_value and q_value <= self.r_max: 633 frac = 1 634 if frac == 0: 635 continue 636 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 637 638 # Take care of the edge case at phi = 2pi. 639 if i_q == nbins: 640 i_q = nbins - 1 641 y[i_q] += frac * data_n 642 # Take dqs from data to get the q_average 643 x[i_q] += frac * q_value 644 if err_data is None or err_data[npt] == 0.0: 645 if data_n < 0: 646 data_n = -data_n 647 err_y[i_q] += frac * frac * data_n 648 else: 649 err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] 650 if dq_data is not None: 651 # To be consistent with dq calculation in 1d reduction, 652 # we need just the averages (not quadratures) because 653 # it should not depend on the number of the q points 654 # in the qr bins. 655 err_x[i_q] += frac * dq_data[npt] 656 else: 657 err_x = None 658 y_counts[i_q] += frac 659 660 # Average the sums 661 for n in range(nbins): 662 if err_y[n] < 0: 663 err_y[n] = -err_y[n] 664 err_y[n] = math.sqrt(err_y[n]) 665 # if err_x is not None: 666 # err_x[n] = math.sqrt(err_x[n]) 667 668 err_y = err_y / y_counts 669 err_y[err_y == 0] = np.average(err_y) 670 y = y / y_counts 671 x = x / y_counts 672 idx = (np.isfinite(y)) & (np.isfinite(x)) 673 674 if err_x is not None: 675 d_x = err_x[idx] / y_counts[idx] 676 else: 677 d_x = None 678 679 if not idx.any(): 680 msg = "Average Error: No points inside ROI to average..." 681 raise ValueError(msg) 682 683 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) 684 685 ################################################################################ 686 687 class Ring(object): 688 """ 689 Defines a ring on a 2D data set. 690 The ring is defined by r_min, r_max, and 691 the position of the center of the ring. 692 693 The data returned is the distribution of counts 694 around the ring as a function of phi. 695 696 Phi_min and phi_max should be defined between 0 and 2*pi 697 in anti-clockwise starting from the x- axis on the left-hand side 698 """ 699 # Todo: remove center. 700 701 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): 702 # Minimum radius 703 self.r_min = r_min 704 # Maximum radius 705 self.r_max = r_max 706 # Center of the ring in x 707 self.center_x = center_x 708 # Center of the ring in y 709 self.center_y = center_y 710 # Number of angular bins 711 self.nbins_phi = nbins 712 713 def __call__(self, data2D): 714 """ 715 Apply the ring to the data set. 716 Returns the angular distribution for a given q range 717 718 :param data2D: Data2D object 719 720 :return: Data1D object 721 """ 722 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 723 raise RuntimeError("Ring averaging only take plottable_2D objects") 724 725 Pi = math.pi 726 727 # Get data 728 data = data2D.data[np.isfinite(data2D.data)] 729 q_data = data2D.q_data[np.isfinite(data2D.data)] 730 err_data = data2D.err_data[np.isfinite(data2D.data)] 731 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 732 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 733 734 # Set space for 1d outputs 735 phi_bins = np.zeros(self.nbins_phi) 736 phi_counts = np.zeros(self.nbins_phi) 737 phi_values = np.zeros(self.nbins_phi) 738 phi_err = np.zeros(self.nbins_phi) 739 740 # Shift to apply to calculated phi values in order 741 # to center first bin at zero 742 phi_shift = Pi / self.nbins_phi 743 744 for npt in range(len(data)): 745 frac = 0 746 # q-value at the point (npt) 747 q_value = q_data[npt] 748 data_n = data[npt] 749 750 # phi-value at the point (npt) 751 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 752 753 if self.r_min <= q_value and q_value <= self.r_max: 754 frac = 1 755 if frac == 0: 756 continue 757 # binning 758 i_phi = int(math.floor((self.nbins_phi) * 759 (phi_value + phi_shift) / (2 * Pi))) 760 761 # Take care of the edge case at phi = 2pi. 762 if i_phi >= self.nbins_phi: 763 i_phi = 0 764 phi_bins[i_phi] += frac * data[npt] 765 766 if err_data is None or err_data[npt] == 0.0: 767 if data_n < 0: 768 data_n = -data_n 769 phi_err[i_phi] += frac * frac * math.fabs(data_n) 770 else: 771 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 772 phi_counts[i_phi] += frac 773 774 for i in range(self.nbins_phi): 775 phi_bins[i] = phi_bins[i] / phi_counts[i] 776 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 777 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) 778 779 idx = (np.isfinite(phi_bins)) 780 781 if not idx.any(): 782 msg = "Average Error: No points inside ROI to average..." 783 raise ValueError(msg) 784 # elif len(phi_bins[idx])!= self.nbins_phi: 785 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 786 #,"empty bin(s) due to tight binning..." 787 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 736 788 737 789 … … 748 800 starting from the x- axis on the left-hand side 749 801 """ 750 def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20): 802 803 def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20, 804 base=None): 805 ''' 806 :param base: must be a valid base for an algorithm, i.e., 807 a positive number 808 ''' 751 809 self.r_min = r_min 752 810 self.r_max = r_max … … 754 812 self.phi_max = phi_max 755 813 self.nbins = nbins 814 self.base = base 756 815 757 816 def _agv(self, data2D, run='phi'): … … 765 824 """ 766 825 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 767 raise RuntimeError, "Ring averaging only take plottable_2D objects" 768 Pi = math.pi 826 raise RuntimeError("Ring averaging only take plottable_2D objects") 769 827 770 828 # Get the all data & info 771 data = data2D.data[numpy.isfinite(data2D.data)] 772 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 773 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 774 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 775 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 829 data = data2D.data[np.isfinite(data2D.data)] 830 q_data = data2D.q_data[np.isfinite(data2D.data)] 831 err_data = data2D.err_data[np.isfinite(data2D.data)] 832 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 833 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 834 776 835 dq_data = None 777 778 # Get the dq for resolution averaging 779 if data2D.dqx_data != None and data2D.dqy_data != None: 780 # The pinholes and det. pix contribution present 781 # in both direction of the 2D which must be subtracted when 782 # converting to 1D: dq_overlap should calculated ideally at 783 # q = 0. 784 # Extrapolate dqy(perp) at q = 0 785 z_max = max(data2D.q_data) 786 z_min = min(data2D.q_data) 787 x_max = data2D.dqx_data[data2D.q_data[z_max]] 788 x_min = data2D.dqx_data[data2D.q_data[z_min]] 789 y_max = data2D.dqy_data[data2D.q_data[z_max]] 790 y_min = data2D.dqy_data[data2D.q_data[z_min]] 791 # Find qdx at q = 0 792 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 793 # when extrapolation goes wrong 794 if dq_overlap_x > min(data2D.dqx_data): 795 dq_overlap_x = min(data2D.dqx_data) 796 dq_overlap_x *= dq_overlap_x 797 # Find qdx at q = 0 798 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 799 # when extrapolation goes wrong 800 if dq_overlap_y > min(data2D.dqy_data): 801 dq_overlap_y = min(data2D.dqy_data) 802 # get dq at q=0. 803 dq_overlap_y *= dq_overlap_y 804 805 dq_overlap = numpy.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 806 if dq_overlap < 0: 807 dq_overlap = y_min 808 dqx_data = data2D.dqx_data[numpy.isfinite(data2D.data)] 809 dqy_data = data2D.dqy_data[numpy.isfinite(data2D.data)] - dq_overlap 810 # def; dqx_data = dq_r dqy_data = dq_phi 811 # Convert dq 2D to 1D here 812 dqx = dqx_data * dqx_data 813 dqy = dqy_data * dqy_data 814 dq_data = numpy.add(dqx, dqy) 815 dq_data = numpy.sqrt(dq_data) 816 817 #set space for 1d outputs 818 x = numpy.zeros(self.nbins) 819 y = numpy.zeros(self.nbins) 820 y_err = numpy.zeros(self.nbins) 821 x_err = numpy.zeros(self.nbins) 822 y_counts = numpy.zeros(self.nbins) 836 if data2D.dqx_data is not None and data2D.dqy_data is not None: 837 dq_data = get_dq_data(data2D) 838 839 # set space for 1d outputs 840 x = np.zeros(self.nbins) 841 y = np.zeros(self.nbins) 842 y_err = np.zeros(self.nbins) 843 x_err = np.zeros(self.nbins) 844 y_counts = np.zeros(self.nbins) # Cycle counts (for the mean) 823 845 824 846 # Get the min and max into the region: 0 <= phi < 2Pi … … 826 848 phi_max = flip_phi(self.phi_max) 827 849 850 # binning object 851 if run.lower() == 'phi': 852 binning = Binning(self.phi_min, self.phi_max, self.nbins, self.base) 853 else: 854 binning = Binning(self.r_min, self.r_max, self.nbins, self.base) 855 828 856 for n in range(len(data)): 829 frac = 0830 857 831 858 # q-value at the pixel (j,i) … … 837 864 838 865 # phi-value of the pixel (j,i) 839 phi_value = math.atan2(qy_data[n], qx_data[n]) + Pi 840 841 ## No need to calculate the frac when all data are within range 842 if self.r_min <= q_value and q_value <= self.r_max: 843 frac = 1 844 if frac == 0: 866 phi_value = math.atan2(qy_data[n], qx_data[n]) + math.pi 867 868 # No need to calculate: data outside of the radius 869 if self.r_min > q_value or q_value > self.r_max: 845 870 continue 846 #In case of two ROIs (symmetric major and minor regions)(for 'q2') 871 872 # In case of two ROIs (symmetric major and minor regions)(for 'q2') 847 873 if run.lower() == 'q2': 848 # #For minor sector wing874 # For minor sector wing 849 875 # Calculate the minor wing phis 850 phi_min_minor = flip_phi(phi_min - Pi)851 phi_max_minor = flip_phi(phi_max - Pi)876 phi_min_minor = flip_phi(phi_min - math.pi) 877 phi_max_minor = flip_phi(phi_max - math.pi) 852 878 # Check if phis of the minor ring is within 0 to 2pi 853 879 if phi_min_minor > phi_max_minor: 854 is_in = (phi_value > phi_min_minor or \855 880 is_in = (phi_value > phi_min_minor or 881 phi_value < phi_max_minor) 856 882 else: 857 is_in = (phi_value > phi_min_minor and \858 859 860 # For all cases(i.e.,for 'q', 'q2', and 'phi')861 # Find pixels within ROI883 is_in = (phi_value > phi_min_minor and 884 phi_value < phi_max_minor) 885 886 # For all cases(i.e.,for 'q', 'q2', and 'phi') 887 # Find pixels within ROI 862 888 if phi_min > phi_max: 863 is_in = is_in or (phi_value > phi_min or \864 889 is_in = is_in or (phi_value > phi_min or 890 phi_value < phi_max) 865 891 else: 866 is_in = is_in or (phi_value >= phi_min and \ 867 phi_value < phi_max) 868 892 is_in = is_in or (phi_value >= phi_min and 893 phi_value < phi_max) 894 895 # data oustide of the phi range 869 896 if not is_in: 870 frac = 0871 if frac == 0:872 897 continue 873 # Check which type of averaging we need 898 899 # Get the binning index 874 900 if run.lower() == 'phi': 875 temp_x = (self.nbins) * (phi_value - self.phi_min) 876 temp_y = (self.phi_max - self.phi_min) 877 i_bin = int(math.floor(temp_x / temp_y)) 901 i_bin = binning.get_bin_index(phi_value) 878 902 else: 879 temp_x = (self.nbins) * (q_value - self.r_min) 880 temp_y = (self.r_max - self.r_min) 881 i_bin = int(math.floor(temp_x / temp_y)) 903 i_bin = binning.get_bin_index(q_value) 882 904 883 905 # Take care of the edge case at phi = 2pi. … … 885 907 i_bin = self.nbins - 1 886 908 887 # #Get the total y888 y[i_bin] += frac *data_n889 x[i_bin] += frac *q_value890 if err_data[n] ==None or err_data[n] == 0.0:909 # Get the total y 910 y[i_bin] += data_n 911 x[i_bin] += q_value 912 if err_data[n] is None or err_data[n] == 0.0: 891 913 if data_n < 0: 892 914 data_n = -data_n 893 y_err[i_bin] += frac * frac *data_n915 y_err[i_bin] += data_n 894 916 else: 895 y_err[i_bin] += frac * frac * err_data[n] * err_data[n]896 897 if dq_data !=None:917 y_err[i_bin] += err_data[n]**2 918 919 if dq_data is not None: 898 920 # To be consistent with dq calculation in 1d reduction, 899 921 # we need just the averages (not quadratures) because 900 922 # it should not depend on the number of the q points 901 923 # in the qr bins. 902 x_err[i_bin] += frac *dq_data[n]924 x_err[i_bin] += dq_data[n] 903 925 else: 904 926 x_err = None 905 y_counts[i_bin] += frac927 y_counts[i_bin] += 1 906 928 907 929 # Organize the results … … 918 940 # We take the center of ring area, not radius. 919 941 # This is more accurate than taking the radial center of ring. 920 # delta_r = (self.r_max - self.r_min) / self.nbins921 # r_inner = self.r_min + delta_r * i922 # r_outer = r_inner + delta_r923 # x[i] = math.sqrt((r_inner * r_inner + r_outer * r_outer) / 2)942 # delta_r = (self.r_max - self.r_min) / self.nbins 943 # r_inner = self.r_min + delta_r * i 944 # r_outer = r_inner + delta_r 945 # x[i] = math.sqrt((r_inner * r_inner + r_outer * r_outer) / 2) 924 946 x[i] = x[i] / y_counts[i] 925 y_err[y_err == 0] = n umpy.average(y_err)926 idx = (n umpy.isfinite(y) & numpy.isfinite(y_err))927 if x_err !=None:947 y_err[y_err == 0] = np.average(y_err) 948 idx = (np.isfinite(y) & np.isfinite(y_err)) 949 if x_err is not None: 928 950 d_x = x_err[idx] / y_counts[idx] 929 951 else: … … 931 953 if not idx.any(): 932 954 msg = "Average Error: No points inside sector of ROI to average..." 933 raise ValueError , msg934 # elif len(y[idx])!= self.nbins:955 raise ValueError(msg) 956 # elif len(y[idx])!= self.nbins: 935 957 # print "resulted",self.nbins- len(y[idx]), 936 # "empty bin(s) due to tight binning..."958 # "empty bin(s) due to tight binning..." 937 959 return Data1D(x=x[idx], y=y[idx], dy=y_err[idx], dx=d_x) 938 960 … … 946 968 The number of bin in phi also has to be defined. 947 969 """ 970 948 971 def __call__(self, data2D): 949 972 """ … … 965 988 The number of bin in Q also has to be defined. 966 989 """ 990 967 991 def __call__(self, data2D): 968 992 """ … … 975 999 return self._agv(data2D, 'q2') 976 1000 1001 ################################################################################ 977 1002 978 1003 class Ringcut(object): … … 987 1012 in anti-clockwise starting from the x- axis on the left-hand side 988 1013 """ 1014 989 1015 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0): 990 1016 # Minimum radius … … 1007 1033 """ 1008 1034 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1009 raise RuntimeError , "Ring cut only take plottable_2D objects"1035 raise RuntimeError("Ring cut only take plottable_2D objects") 1010 1036 1011 1037 # Get data 1012 1038 qx_data = data2D.qx_data 1013 1039 qy_data = data2D.qy_data 1014 q_data = n umpy.sqrt(qx_data * qx_data + qy_data * qy_data)1040 q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) 1015 1041 1016 1042 # check whether or not the data point is inside ROI … … 1018 1044 return out 1019 1045 1046 ################################################################################ 1020 1047 1021 1048 class Boxcut(object): … … 1023 1050 Find a rectangular 2D region of interest. 1024 1051 """ 1052 1025 1053 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 1026 1054 # Minimum Qx value [A-1] … … 1055 1083 """ 1056 1084 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1057 raise RuntimeError , "Boxcut take only plottable_2D objects"1085 raise RuntimeError("Boxcut take only plottable_2D objects") 1058 1086 # Get qx_ and qy_data 1059 1087 qx_data = data2D.qx_data … … 1066 1094 return outx & outy 1067 1095 1096 ################################################################################ 1068 1097 1069 1098 class Sectorcut(object): … … 1077 1106 and (phi_max-phi_min) should not be larger than pi 1078 1107 """ 1108 1079 1109 def __init__(self, phi_min=0, phi_max=math.pi): 1080 1110 self.phi_min = phi_min … … 1106 1136 """ 1107 1137 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1108 raise RuntimeError , "Sectorcut take only plottable_2D objects"1138 raise RuntimeError("Sectorcut take only plottable_2D objects") 1109 1139 Pi = math.pi 1110 1140 # Get data … … 1113 1143 1114 1144 # get phi from data 1115 phi_data = n umpy.arctan2(qy_data, qx_data)1145 phi_data = np.arctan2(qy_data, qx_data) 1116 1146 1117 1147 # Get the min and max into the region: -pi <= phi < Pi … … 1120 1150 # check for major sector 1121 1151 if phi_min_major > phi_max_major: 1122 out_major = (phi_min_major <= phi_data) + (phi_max_major > phi_data) 1152 out_major = (phi_min_major <= phi_data) + \ 1153 (phi_max_major > phi_data) 1123 1154 else: 1124 out_major = (phi_min_major <= phi_data) & (phi_max_major > phi_data) 1155 out_major = (phi_min_major <= phi_data) & ( 1156 phi_max_major > phi_data) 1125 1157 1126 1158 # minor sector … … 1132 1164 if phi_min_minor > phi_max_minor: 1133 1165 out_minor = (phi_min_minor <= phi_data) + \ 1134 1166 (phi_max_minor >= phi_data) 1135 1167 else: 1136 1168 out_minor = (phi_min_minor <= phi_data) & \ 1137 1169 (phi_max_minor >= phi_data) 1138 1170 out = out_major + out_minor 1139 1171 -
src/sas/sascalc/dataloader/readers/IgorReader.py
rb699768 ra1b8fee 12 12 #copyright 2008, University of Tennessee 13 13 ############################################################################# 14 from __future__ import print_function 15 14 16 import os 15 import numpy 16 import math 17 #import logging 17 18 18 from sas.sascalc.dataloader.data_info import Data2D 19 19 from sas.sascalc.dataloader.data_info import Detector 20 20 from sas.sascalc.dataloader.manipulations import reader2D_converter 21 import numpy as np 21 22 22 23 # Look for unit converter … … 40 41 """ Read file """ 41 42 if not os.path.isfile(filename): 42 raise ValueError, \ 43 "Specified file %s is not a regular file" % filename 44 45 # Read file 46 f = open(filename, 'r') 47 buf = f.read() 48 49 # Instantiate data object 43 raise ValueError("Specified file %s is not a regular " 44 "file" % filename) 45 50 46 output = Data2D() 47 51 48 output.filename = os.path.basename(filename) 52 49 detector = Detector() 53 if len(output.detector) > 0:54 print str(output.detector[0])50 if len(output.detector): 51 print(str(output.detector[0])) 55 52 output.detector.append(detector) 56 57 # Get content 58 dataStarted = False 59 60 lines = buf.split('\n') 61 itot = 0 62 x = [] 63 y = [] 64 65 ncounts = 0 66 67 xmin = None 68 xmax = None 69 ymin = None 70 ymax = None 71 72 i_x = 0 73 i_y = -1 74 i_tot_row = 0 75 76 isInfo = False 77 isCenter = False 78 79 data_conv_q = None 80 data_conv_i = None 81 82 if has_converter == True and output.Q_unit != '1/A': 53 54 data_conv_q = data_conv_i = None 55 56 if has_converter and output.Q_unit != '1/A': 83 57 data_conv_q = Converter('1/A') 84 58 # Test it 85 59 data_conv_q(1.0, output.Q_unit) 86 60 87 if has_converter == Trueand output.I_unit != '1/cm':61 if has_converter and output.I_unit != '1/cm': 88 62 data_conv_i = Converter('1/cm') 89 63 # Test it 90 64 data_conv_i(1.0, output.I_unit) 91 92 for line in lines: 93 94 # Find setup info line 95 if isInfo: 96 isInfo = False 97 line_toks = line.split() 98 # Wavelength in Angstrom 99 try: 100 wavelength = float(line_toks[1]) 101 except: 102 msg = "IgorReader: can't read this file, missing wavelength" 103 raise ValueError, msg 104 105 #Find # of bins in a row assuming the detector is square. 106 if dataStarted == True: 107 try: 108 value = float(line) 109 except: 110 # Found a non-float entry, skip it 111 continue 112 113 # Get total bin number 114 115 i_tot_row += 1 116 i_tot_row = math.ceil(math.sqrt(i_tot_row)) - 1 117 #print "i_tot", i_tot_row 118 size_x = i_tot_row # 192#128 119 size_y = i_tot_row # 192#128 120 output.data = numpy.zeros([size_x, size_y]) 121 output.err_data = numpy.zeros([size_x, size_y]) 122 123 #Read Header and 2D data 124 for line in lines: 125 # Find setup info line 126 if isInfo: 127 isInfo = False 128 line_toks = line.split() 129 # Wavelength in Angstrom 130 try: 131 wavelength = float(line_toks[1]) 132 except: 133 msg = "IgorReader: can't read this file, missing wavelength" 134 raise ValueError, msg 135 # Distance in meters 136 try: 137 distance = float(line_toks[3]) 138 except: 139 msg = "IgorReader: can't read this file, missing distance" 140 raise ValueError, msg 141 142 # Distance in meters 143 try: 144 transmission = float(line_toks[4]) 145 except: 146 msg = "IgorReader: can't read this file, " 147 msg += "missing transmission" 148 raise ValueError, msg 149 150 if line.count("LAMBDA") > 0: 151 isInfo = True 152 153 # Find center info line 154 if isCenter: 155 isCenter = False 156 line_toks = line.split() 157 158 # Center in bin number: Must substrate 1 because 159 #the index starts from 1 160 center_x = float(line_toks[0]) - 1 161 center_y = float(line_toks[1]) - 1 162 163 if line.count("BCENT") > 0: 164 isCenter = True 165 166 # Find data start 167 if line.count("***")>0: 168 dataStarted = True 169 170 # Check that we have all the info 171 if wavelength == None \ 172 or distance == None \ 173 or center_x == None \ 174 or center_y == None: 175 msg = "IgorReader:Missing information in data file" 176 raise ValueError, msg 177 178 if dataStarted == True: 179 try: 180 value = float(line) 181 except: 182 # Found a non-float entry, skip it 183 continue 184 185 # Get bin number 186 if math.fmod(itot, i_tot_row) == 0: 187 i_x = 0 188 i_y += 1 189 else: 190 i_x += 1 191 192 output.data[i_y][i_x] = value 193 ncounts += 1 194 195 # Det 640 x 640 mm 196 # Q = 4pi/lambda sin(theta/2) 197 # Bin size is 0.5 cm 198 #REmoved +1 from theta = (i_x-center_x+1)*0.5 / distance 199 # / 100.0 and 200 #REmoved +1 from theta = (i_y-center_y+1)*0.5 / 201 # distance / 100.0 202 #ToDo: Need complete check if the following 203 # covert process is consistent with fitting.py. 204 theta = (i_x - center_x) * 0.5 / distance / 100.0 205 qx = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 206 207 if has_converter == True and output.Q_unit != '1/A': 208 qx = data_conv_q(qx, units=output.Q_unit) 209 210 if xmin == None or qx < xmin: 211 xmin = qx 212 if xmax == None or qx > xmax: 213 xmax = qx 214 215 theta = (i_y - center_y) * 0.5 / distance / 100.0 216 qy = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 217 218 if has_converter == True and output.Q_unit != '1/A': 219 qy = data_conv_q(qy, units=output.Q_unit) 220 221 if ymin == None or qy < ymin: 222 ymin = qy 223 if ymax == None or qy > ymax: 224 ymax = qy 225 226 if not qx in x: 227 x.append(qx) 228 if not qy in y: 229 y.append(qy) 230 231 itot += 1 232 233 65 66 data_row = 0 67 wavelength = distance = center_x = center_y = None 68 dataStarted = isInfo = isCenter = False 69 70 with open(filename, 'r') as f: 71 for line in f: 72 data_row += 1 73 # Find setup info line 74 if isInfo: 75 isInfo = False 76 line_toks = line.split() 77 # Wavelength in Angstrom 78 try: 79 wavelength = float(line_toks[1]) 80 except ValueError: 81 msg = "IgorReader: can't read this file, missing wavelength" 82 raise ValueError(msg) 83 # Distance in meters 84 try: 85 distance = float(line_toks[3]) 86 except ValueError: 87 msg = "IgorReader: can't read this file, missing distance" 88 raise ValueError(msg) 89 90 # Distance in meters 91 try: 92 transmission = float(line_toks[4]) 93 except: 94 msg = "IgorReader: can't read this file, " 95 msg += "missing transmission" 96 raise ValueError(msg) 97 98 if line.count("LAMBDA"): 99 isInfo = True 100 101 # Find center info line 102 if isCenter: 103 isCenter = False 104 line_toks = line.split() 105 106 # Center in bin number: Must subtract 1 because 107 # the index starts from 1 108 center_x = float(line_toks[0]) - 1 109 center_y = float(line_toks[1]) - 1 110 111 if line.count("BCENT"): 112 isCenter = True 113 114 # Find data start 115 if line.count("***"): 116 # now have to continue to blank line 117 dataStarted = True 118 119 # Check that we have all the info 120 if (wavelength is None 121 or distance is None 122 or center_x is None 123 or center_y is None): 124 msg = "IgorReader:Missing information in data file" 125 raise ValueError(msg) 126 127 if dataStarted: 128 if len(line.rstrip()): 129 continue 130 else: 131 break 132 133 # The data is loaded in row major order (last index changing most 134 # rapidly). However, the original data is in column major order (first 135 # index changing most rapidly). The swap to column major order is done 136 # in reader2D_converter at the end of this method. 137 data = np.loadtxt(filename, skiprows=data_row) 138 size_x = size_y = int(np.rint(np.sqrt(data.size))) 139 output.data = np.reshape(data, (size_x, size_y)) 140 output.err_data = np.zeros_like(output.data) 141 142 # Det 640 x 640 mm 143 # Q = 4 * pi/lambda * sin(theta/2) 144 # Bin size is 0.5 cm 145 # Removed +1 from theta = (i_x - center_x + 1)*0.5 / distance 146 # / 100.0 and 147 # Removed +1 from theta = (i_y - center_y + 1)*0.5 / 148 # distance / 100.0 149 # ToDo: Need complete check if the following 150 # convert process is consistent with fitting.py. 151 152 # calculate qx, qy bin centers of each pixel in the image 153 theta = (np.arange(size_x) - center_x) * 0.5 / distance / 100. 154 qx = 4 * np.pi / wavelength * np.sin(theta/2) 155 156 theta = (np.arange(size_y) - center_y) * 0.5 / distance / 100. 157 qy = 4 * np.pi / wavelength * np.sin(theta/2) 158 159 if has_converter and output.Q_unit != '1/A': 160 qx = data_conv_q(qx, units=output.Q_unit) 161 qy = data_conv_q(qx, units=output.Q_unit) 162 163 xmax = np.max(qx) 164 xmin = np.min(qx) 165 ymax = np.max(qy) 166 ymin = np.min(qy) 167 168 # calculate edge offset in q. 234 169 theta = 0.25 / distance / 100.0 235 xstep = 4.0 * math.pi / wavelength * math.sin(theta / 2.0)170 xstep = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 236 171 237 172 theta = 0.25 / distance / 100.0 238 ystep = 4.0 * math.pi/ wavelength * math.sin(theta / 2.0)173 ystep = 4.0 * np.pi/ wavelength * np.sin(theta / 2.0) 239 174 240 175 # Store all data ###################################### 241 176 # Store wavelength 242 if has_converter == Trueand output.source.wavelength_unit != 'A':177 if has_converter and output.source.wavelength_unit != 'A': 243 178 conv = Converter('A') 244 179 wavelength = conv(wavelength, units=output.source.wavelength_unit) … … 246 181 247 182 # Store distance 248 if has_converter == Trueand detector.distance_unit != 'm':183 if has_converter and detector.distance_unit != 'm': 249 184 conv = Converter('m') 250 185 distance = conv(distance, units=detector.distance_unit) … … 254 189 output.sample.transmission = transmission 255 190 256 # Store pixel size 191 # Store pixel size (mm) 257 192 pixel = 5.0 258 if has_converter == Trueand detector.pixel_size_unit != 'mm':193 if has_converter and detector.pixel_size_unit != 'mm': 259 194 conv = Converter('mm') 260 195 pixel = conv(pixel, units=detector.pixel_size_unit) … … 267 202 268 203 # Store limits of the image (2D array) 269 xmin = xmin -xstep / 2.0270 xmax = xmax +xstep / 2.0271 ymin = ymin -ystep / 2.0272 ymax = ymax +ystep / 2.0273 if has_converter == Trueand output.Q_unit != '1/A':204 xmin -= xstep / 2.0 205 xmax += xstep / 2.0 206 ymin -= ystep / 2.0 207 ymax += ystep / 2.0 208 if has_converter and output.Q_unit != '1/A': 274 209 xmin = data_conv_q(xmin, units=output.Q_unit) 275 210 xmax = data_conv_q(xmax, units=output.Q_unit) … … 282 217 283 218 # Store x and y axis bin centers 284 output.x_bins = x285 output.y_bins = y219 output.x_bins = qx.tolist() 220 output.y_bins = qy.tolist() 286 221 287 222 # Units -
src/sas/sascalc/dataloader/readers/abs_reader.py
rb699768 r959eb01 9 9 ###################################################################### 10 10 11 import numpy 11 import numpy as np 12 12 import os 13 13 from sas.sascalc.dataloader.data_info import Data1D … … 53 53 buff = input_f.read() 54 54 lines = buff.split('\n') 55 x = n umpy.zeros(0)56 y = n umpy.zeros(0)57 dy = n umpy.zeros(0)58 dx = n umpy.zeros(0)55 x = np.zeros(0) 56 y = np.zeros(0) 57 dy = np.zeros(0) 58 dx = np.zeros(0) 59 59 output = Data1D(x, y, dy=dy, dx=dx) 60 60 detector = Detector() … … 204 204 _dy = data_conv_i(_dy, units=output.y_unit) 205 205 206 x = n umpy.append(x, _x)207 y = n umpy.append(y, _y)208 dy = n umpy.append(dy, _dy)209 dx = n umpy.append(dx, _dx)206 x = np.append(x, _x) 207 y = np.append(y, _y) 208 dy = np.append(dy, _dy) 209 dx = np.append(dx, _dx) 210 210 211 211 except: -
src/sas/sascalc/dataloader/readers/ascii_reader.py
rd2471870 r235f514 14 14 15 15 16 import numpy 16 import numpy as np 17 17 import os 18 18 from sas.sascalc.dataloader.data_info import Data1D … … 69 69 70 70 # Arrays for data storage 71 tx = n umpy.zeros(0)72 ty = n umpy.zeros(0)73 tdy = n umpy.zeros(0)74 tdx = n umpy.zeros(0)71 tx = np.zeros(0) 72 ty = np.zeros(0) 73 tdy = np.zeros(0) 74 tdx = np.zeros(0) 75 75 76 76 # The first good line of data will define whether … … 128 128 if new_lentoks > 2: 129 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy ==None else True130 has_error_dy = False if _dy is None else True 131 131 132 132 # If a 4th row is present, consider it dx 133 133 if new_lentoks > 3: 134 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx ==None else True135 has_error_dx = False if _dx is None else True 136 136 137 137 # Delete the previously stored lines of data candidates if … … 140 140 is_data == False: 141 141 try: 142 tx = n umpy.zeros(0)143 ty = n umpy.zeros(0)144 tdy = n umpy.zeros(0)145 tdx = n umpy.zeros(0)142 tx = np.zeros(0) 143 ty = np.zeros(0) 144 tdy = np.zeros(0) 145 tdx = np.zeros(0) 146 146 except: 147 147 pass 148 148 149 149 if has_error_dy == True: 150 tdy = n umpy.append(tdy, _dy)150 tdy = np.append(tdy, _dy) 151 151 if has_error_dx == True: 152 tdx = n umpy.append(tdx, _dx)153 tx = n umpy.append(tx, _x)154 ty = n umpy.append(ty, _y)152 tdx = np.append(tdx, _dx) 153 tx = np.append(tx, _x) 154 ty = np.append(ty, _y) 155 155 156 156 #To remember the # of columns on the current line … … 188 188 #Let's re-order the data to make cal. 189 189 # curve look better some cases 190 ind = n umpy.lexsort((ty, tx))191 x = n umpy.zeros(len(tx))192 y = n umpy.zeros(len(ty))193 dy = n umpy.zeros(len(tdy))194 dx = n umpy.zeros(len(tdx))190 ind = np.lexsort((ty, tx)) 191 x = np.zeros(len(tx)) 192 y = np.zeros(len(ty)) 193 dy = np.zeros(len(tdy)) 194 dx = np.zeros(len(tdx)) 195 195 output = Data1D(x, y, dy=dy, dx=dx) 196 196 self.filename = output.filename = basename … … 212 212 output.y = y[x != 0] 213 213 output.dy = dy[x != 0] if has_error_dy == True\ 214 else n umpy.zeros(len(output.y))214 else np.zeros(len(output.y)) 215 215 output.dx = dx[x != 0] if has_error_dx == True\ 216 else n umpy.zeros(len(output.x))216 else np.zeros(len(output.x)) 217 217 218 218 output.xaxis("\\rm{Q}", 'A^{-1}') -
src/sas/sascalc/dataloader/readers/associations.py
re5c09cf ra1b8fee 14 14 #copyright 2009, University of Tennessee 15 15 ############################################################################# 16 from __future__ import print_function 17 16 18 import os 17 19 import sys 18 20 import logging 19 21 import json 22 23 logger = logging.getLogger(__name__) 20 24 21 25 FILE_NAME = 'defaults.json' … … 67 71 msg = "read_associations: skipping association" 68 72 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 69 logg ing.error(msg)73 logger.error(msg) 70 74 else: 71 print "Could not find reader association settings\n %s [%s]" % (__file__, os.getcwd())75 print("Could not find reader association settings\n %s [%s]" % (__file__, os.getcwd())) 72 76 73 77 … … 81 85 :param registry_function: function to be called to register each reader 82 86 """ 83 logg ing.info("register_readers is now obsolete: use read_associations()")87 logger.info("register_readers is now obsolete: use read_associations()") 84 88 import abs_reader 85 89 import ascii_reader -
src/sas/sascalc/dataloader/readers/cansas_constants.py
rad4632c r63d773c 135 135 "Sesans": {"storeas": "content"}, 136 136 "zacceptance": {"storeas": "float"}, 137 "yacceptance": {"storeas": "float"}, 137 138 "<any>" : ANY 138 139 } -
src/sas/sascalc/dataloader/readers/cansas_reader.py
r527a190 r6a455cd3 33 33 import xml.dom.minidom 34 34 from xml.dom.minidom import parseString 35 36 logger = logging.getLogger(__name__) 35 37 36 38 PREPROCESS = "xmlpreprocess" … … 285 287 self.current_dataset.yaxis(attr.get('y_axis'), 286 288 attr.get('y_unit')) 289 elif tagname == 'yacceptance': 290 self.current_datainfo.sample.yacceptance = (data_point, unit) 287 291 elif tagname == 'zacceptance': 288 292 self.current_datainfo.sample.zacceptance = (data_point, unit) … … 801 805 :param data1d: presumably a Data1D object 802 806 """ 803 if self.current_dataset ==None:807 if self.current_dataset is None: 804 808 x_vals = np.empty(0) 805 809 y_vals = np.empty(0) … … 889 893 # Write the file 890 894 file_ref = open(filename, 'w') 891 if self.encoding ==None:895 if self.encoding is None: 892 896 self.encoding = "UTF-8" 893 897 doc.write(file_ref, encoding=self.encoding, … … 1009 1013 :param entry_node: lxml node ElementTree object to be appended to 1010 1014 """ 1011 if datainfo.run ==None or datainfo.run == []:1015 if datainfo.run is None or datainfo.run == []: 1012 1016 datainfo.run.append(RUN_NAME_DEFAULT) 1013 1017 datainfo.run_name[RUN_NAME_DEFAULT] = RUN_NAME_DEFAULT … … 1057 1061 sesans.text = str(datainfo.isSesans) 1058 1062 entry_node.append(sesans) 1063 self.write_node(entry_node, "yacceptance", datainfo.sample.yacceptance[0], 1064 {'unit': datainfo.sample.yacceptance[1]}) 1059 1065 self.write_node(entry_node, "zacceptance", datainfo.sample.zacceptance[0], 1060 1066 {'unit': datainfo.sample.zacceptance[1]}) … … 1129 1135 self.write_node(point, "T", spectrum.transmission[i], 1130 1136 {'unit': spectrum.transmission_unit}) 1131 if spectrum.transmission_deviation !=None \1137 if spectrum.transmission_deviation is not None \ 1132 1138 and len(spectrum.transmission_deviation) >= i: 1133 1139 self.write_node(point, "Tdev", … … 1209 1215 str(datainfo.source.name)) 1210 1216 self.append(source, instr) 1211 if datainfo.source.radiation ==None or datainfo.source.radiation == '':1217 if datainfo.source.radiation is None or datainfo.source.radiation == '': 1212 1218 datainfo.source.radiation = "neutron" 1213 1219 self.write_node(source, "radiation", datainfo.source.radiation) … … 1250 1256 :param instr: lxml node ElementTree object to be appended to 1251 1257 """ 1252 if datainfo.collimation == [] or datainfo.collimation ==None:1258 if datainfo.collimation == [] or datainfo.collimation is None: 1253 1259 coll = Collimation() 1254 1260 datainfo.collimation.append(coll) … … 1295 1301 :param inst: lxml instrument node to be appended to 1296 1302 """ 1297 if datainfo.detector ==None or datainfo.detector == []:1303 if datainfo.detector is None or datainfo.detector == []: 1298 1304 det = Detector() 1299 1305 det.name = "" … … 1460 1466 local_unit = None 1461 1467 exec "local_unit = storage.%s_unit" % toks[0] 1462 if local_unit !=None and units.lower() != local_unit.lower():1468 if local_unit is not None and units.lower() != local_unit.lower(): 1463 1469 if HAS_CONVERTER == True: 1464 1470 try: … … 1473 1479 self.errors.add(err_mess) 1474 1480 if optional: 1475 logg ing.info(err_mess)1481 logger.info(err_mess) 1476 1482 else: 1477 1483 raise ValueError, err_mess … … 1482 1488 self.errors.add(err_mess) 1483 1489 if optional: 1484 logg ing.info(err_mess)1490 logger.info(err_mess) 1485 1491 else: 1486 1492 raise ValueError, err_mess -
src/sas/sascalc/dataloader/readers/danse_reader.py
rb699768 r235f514 15 15 import os 16 16 import sys 17 import numpy 17 import numpy as np 18 18 import logging 19 19 from sas.sascalc.dataloader.data_info import Data2D, Detector 20 20 from sas.sascalc.dataloader.manipulations import reader2D_converter 21 22 logger = logging.getLogger(__name__) 21 23 22 24 # Look for unit converter … … 79 81 output.detector.append(detector) 80 82 81 output.data = n umpy.zeros([size_x,size_y])82 output.err_data = n umpy.zeros([size_x, size_y])83 output.data = np.zeros([size_x,size_y]) 84 output.err_data = np.zeros([size_x, size_y]) 83 85 84 86 data_conv_q = None … … 142 144 error.append(err) 143 145 except: 144 logg ing.info("Skipping line:%s,%s" %(data_str,146 logger.info("Skipping line:%s,%s" %(data_str, 145 147 sys.exc_value)) 146 148 … … 164 166 165 167 x_vals.append(qx) 166 if xmin ==None or qx < xmin:168 if xmin is None or qx < xmin: 167 169 xmin = qx 168 if xmax ==None or qx > xmax:170 if xmax is None or qx > xmax: 169 171 xmax = qx 170 172 … … 179 181 180 182 y_vals.append(qy) 181 if ymin ==None or qy < ymin:183 if ymin is None or qy < ymin: 182 184 ymin = qy 183 if ymax ==None or qy > ymax:185 if ymax is None or qy > ymax: 184 186 ymax = qy 185 187 … … 196 198 msg = "Skipping entry (v1.0):%s,%s" % (str(data[i_pt]), 197 199 sys.exc_value) 198 logg ing.info(msg)200 logger.info(msg) 199 201 200 202 # Get bin number … … 271 273 raise ValueError, msg 272 274 else: 273 logg ing.info("Danse_reader Reading %s \n" % filename)275 logger.info("Danse_reader Reading %s \n" % filename) 274 276 275 277 # Store loading process information -
src/sas/sascalc/dataloader/readers/hfir1d_reader.py
rb699768 r959eb01 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 import numpy 11 import numpy as np 12 12 import os 13 13 from sas.sascalc.dataloader.data_info import Data1D … … 52 52 buff = input_f.read() 53 53 lines = buff.split('\n') 54 x = n umpy.zeros(0)55 y = n umpy.zeros(0)56 dx = n umpy.zeros(0)57 dy = n umpy.zeros(0)54 x = np.zeros(0) 55 y = np.zeros(0) 56 dx = np.zeros(0) 57 dy = np.zeros(0) 58 58 output = Data1D(x, y, dx=dx, dy=dy) 59 59 self.filename = output.filename = basename … … 88 88 _dy = data_conv_i(_dy, units=output.y_unit) 89 89 90 x = n umpy.append(x, _x)91 y = n umpy.append(y, _y)92 dx = n umpy.append(dx, _dx)93 dy = n umpy.append(dy, _dy)90 x = np.append(x, _x) 91 y = np.append(y, _y) 92 dx = np.append(dx, _dx) 93 dy = np.append(dy, _dy) 94 94 except: 95 95 # Couldn't parse this line, skip it -
src/sas/sascalc/dataloader/readers/red2d_reader.py
rb699768 ra1b8fee 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 from __future__ import print_function 12 11 13 import os 12 import numpy 14 import numpy as np 13 15 import math 14 16 from sas.sascalc.dataloader.data_info import Data2D, Detector … … 82 84 detector = Detector() 83 85 if len(output.detector) > 0: 84 print str(output.detector[0])86 print(str(output.detector[0])) 85 87 output.detector.append(detector) 86 88 … … 198 200 break 199 201 # Make numpy array to remove header lines using index 200 lines_array = n umpy.array(lines)202 lines_array = np.array(lines) 201 203 202 204 # index for lines_array 203 lines_index = n umpy.arange(len(lines))205 lines_index = np.arange(len(lines)) 204 206 205 207 # get the data lines … … 225 227 226 228 # numpy array form 227 data_array = n umpy.array(data_list1)229 data_array = np.array(data_list1) 228 230 # Redimesion based on the row_num and col_num, 229 231 #otherwise raise an error. … … 235 237 ## Get the all data: Let's HARDcoding; Todo find better way 236 238 # Defaults 237 dqx_data = n umpy.zeros(0)238 dqy_data = n umpy.zeros(0)239 err_data = n umpy.ones(row_num)240 qz_data = n umpy.zeros(row_num)241 mask = n umpy.ones(row_num, dtype=bool)239 dqx_data = np.zeros(0) 240 dqy_data = np.zeros(0) 241 err_data = np.ones(row_num) 242 qz_data = np.zeros(row_num) 243 mask = np.ones(row_num, dtype=bool) 242 244 # Get from the array 243 245 qx_data = data_point[0] … … 254 256 dqy_data = data_point[(5 + ver)] 255 257 #if col_num > (6 + ver): mask[data_point[(6 + ver)] < 1] = False 256 q_data = n umpy.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data)258 q_data = np.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 257 259 258 260 # Extra protection(it is needed for some data files): … … 262 264 263 265 # Store limits of the image in q space 264 xmin = n umpy.min(qx_data)265 xmax = n umpy.max(qx_data)266 ymin = n umpy.min(qy_data)267 ymax = n umpy.max(qy_data)266 xmin = np.min(qx_data) 267 xmax = np.max(qx_data) 268 ymin = np.min(qy_data) 269 ymax = np.max(qy_data) 268 270 269 271 # units … … 287 289 288 290 # store x and y axis bin centers in q space 289 x_bins = n umpy.arange(xmin, xmax + xstep, xstep)290 y_bins = n umpy.arange(ymin, ymax + ystep, ystep)291 x_bins = np.arange(xmin, xmax + xstep, xstep) 292 y_bins = np.arange(ymin, ymax + ystep, ystep) 291 293 292 294 # get the limits of q values … … 300 302 output.data = data 301 303 if (err_data == 1).all(): 302 output.err_data = n umpy.sqrt(numpy.abs(data))304 output.err_data = np.sqrt(np.abs(data)) 303 305 output.err_data[output.err_data == 0.0] = 1.0 304 306 else: … … 335 337 # tranfer the comp. to cartesian coord. for newer version. 336 338 if ver != 1: 337 diag = n umpy.sqrt(qx_data * qx_data + qy_data * qy_data)339 diag = np.sqrt(qx_data * qx_data + qy_data * qy_data) 338 340 cos_th = qx_data / diag 339 341 sin_th = qy_data / diag 340 output.dqx_data = n umpy.sqrt((dqx_data * cos_th) * \342 output.dqx_data = np.sqrt((dqx_data * cos_th) * \ 341 343 (dqx_data * cos_th) \ 342 344 + (dqy_data * sin_th) * \ 343 345 (dqy_data * sin_th)) 344 output.dqy_data = n umpy.sqrt((dqx_data * sin_th) * \346 output.dqy_data = np.sqrt((dqx_data * sin_th) * \ 345 347 (dqx_data * sin_th) \ 346 348 + (dqy_data * cos_th) * \ -
src/sas/sascalc/dataloader/readers/sesans_reader.py
r7caf3e5 r149b8f6 1 1 """ 2 2 SESANS reader (based on ASCII reader) 3 3 4 4 Reader for .ses or .sesans file format 5 6 Jurrian Bakker 5 6 Jurrian Bakker 7 7 """ 8 import numpy 8 import numpy as np 9 9 import os 10 10 from sas.sascalc.dataloader.data_info import Data1D … … 18 18 _ZERO = 1e-16 19 19 20 20 21 class Reader: 21 22 """ 22 23 Class to load sesans files (6 columns). 23 24 """ 24 # #File type25 # File type 25 26 type_name = "SESANS" 26 27 # #Wildcards27 28 # Wildcards 28 29 type = ["SESANS files (*.ses)|*.ses", 29 30 "SESANS files (*..sesans)|*.sesans"] 30 # #List of allowed extensions31 # List of allowed extensions 31 32 ext = ['.ses', '.SES', '.sesans', '.SESANS'] 32 33 # #Flag to bypass extension check33 34 # Flag to bypass extension check 34 35 allow_all = True 35 36 36 37 def read(self, path): 37 38 # print "reader triggered"39 40 38 """ 41 39 Load data file 42 40 43 41 :param path: file path 44 42 45 43 :return: SESANSData1D object, or None 46 44 47 45 :raise RuntimeError: when the file can't be opened 48 46 :raise ValueError: when the length of the data vectors are inconsistent … … 51 49 basename = os.path.basename(path) 52 50 _, extension = os.path.splitext(basename) 53 if self.allow_all or extension.lower() in self.ext: 54 try: 55 # Read in binary mode since GRASP frequently has no-ascii 56 # characters that brakes the open operation 57 input_f = open(path,'rb') 58 except: 59 raise RuntimeError, "sesans_reader: cannot open %s" % path 60 buff = input_f.read() 61 lines = buff.splitlines() 62 x = numpy.zeros(0) 63 y = numpy.zeros(0) 64 dy = numpy.zeros(0) 65 lam = numpy.zeros(0) 66 dlam = numpy.zeros(0) 67 dx = numpy.zeros(0) 68 69 #temp. space to sort data 70 tx = numpy.zeros(0) 71 ty = numpy.zeros(0) 72 tdy = numpy.zeros(0) 73 tlam = numpy.zeros(0) 74 tdlam = numpy.zeros(0) 75 tdx = numpy.zeros(0) 76 output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, isSesans=True) 77 self.filename = output.filename = basename 51 if not (self.allow_all or extension.lower() in self.ext): 52 raise RuntimeError( 53 "{} has an unrecognized file extension".format(path)) 54 else: 55 raise RuntimeError("{} is not a file".format(path)) 56 with open(path, 'r') as input_f: 57 line = input_f.readline() 58 params = {} 59 while not line.startswith("BEGIN_DATA"): 60 terms = line.split() 61 if len(terms) >= 2: 62 params[terms[0]] = " ".join(terms[1:]) 63 line = input_f.readline() 64 self.params = params 78 65 79 paramnames=[] 80 paramvals=[] 81 zvals=[] 82 dzvals=[] 83 lamvals=[] 84 dlamvals=[] 85 Pvals=[] 86 dPvals=[] 66 if "FileFormatVersion" not in self.params: 67 raise RuntimeError("SES file missing FileFormatVersion") 68 if float(self.params["FileFormatVersion"]) >= 2.0: 69 raise RuntimeError("SASView only supports SES version 1") 87 70 88 for line in lines: 89 # Initial try for CSV (split on ,) 90 line=line.strip() 91 toks = line.split('\t') 92 if len(toks)==2: 93 paramnames.append(toks[0]) 94 paramvals.append(toks[1]) 95 if len(toks)>5: 96 zvals.append(toks[0]) 97 dzvals.append(toks[3]) 98 lamvals.append(toks[4]) 99 dlamvals.append(toks[5]) 100 Pvals.append(toks[1]) 101 dPvals.append(toks[2]) 102 else: 103 continue 71 if "SpinEchoLength_unit" not in self.params: 72 raise RuntimeError("SpinEchoLength has no units") 73 if "Wavelength_unit" not in self.params: 74 raise RuntimeError("Wavelength has no units") 75 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 76 raise RuntimeError("The spin echo data has rudely used " 77 "different units for the spin echo length " 78 "and the wavelength. While sasview could " 79 "handle this instance, it is a violation " 80 "of the file format and will not be " 81 "handled by other software.") 104 82 105 x=[] 106 y=[] 107 lam=[] 108 dx=[] 109 dy=[] 110 dlam=[] 111 lam_header = lamvals[0].split() 112 data_conv_z = None 113 default_z_unit = "A" 114 data_conv_P = None 115 default_p_unit = " " # Adjust unit for axis (L^-3) 116 lam_unit = lam_header[1].replace("[","").replace("]","") 117 if lam_unit == 'AA': 118 lam_unit = 'A' 119 varheader=[zvals[0],dzvals[0],lamvals[0],dlamvals[0],Pvals[0],dPvals[0]] 120 valrange=range(1, len(zvals)) 121 for i in valrange: 122 x.append(float(zvals[i])) 123 y.append(float(Pvals[i])) 124 lam.append(float(lamvals[i])) 125 dy.append(float(dPvals[i])) 126 dx.append(float(dzvals[i])) 127 dlam.append(float(dlamvals[i])) 83 headers = input_f.readline().split() 128 84 129 x,y,lam,dy,dx,dlam = [130 numpy.asarray(v, 'double')131 for v in (x,y,lam,dy,dx,dlam)132 ]85 self._insist_header(headers, "SpinEchoLength") 86 self._insist_header(headers, "Depolarisation") 87 self._insist_header(headers, "Depolarisation_error") 88 self._insist_header(headers, "Wavelength") 133 89 134 input_f.close()90 data = np.loadtxt(input_f) 135 91 136 output.x, output.x_unit = self._unit_conversion(x, lam_unit, default_z_unit) 137 output.y = y 138 output.y_unit = r'\AA^{-2} cm^{-1}' # output y_unit added 139 output.dx, output.dx_unit = self._unit_conversion(dx, lam_unit, default_z_unit) 140 output.dy = dy 141 output.lam, output.lam_unit = self._unit_conversion(lam, lam_unit, default_z_unit) 142 output.dlam, output.dlam_unit = self._unit_conversion(dlam, lam_unit, default_z_unit) 143 144 output.xaxis(r"\rm{z}", output.x_unit) 145 output.yaxis(r"\rm{ln(P)/(t \lambda^2)}", output.y_unit) # Adjust label to ln P/(lam^2 t), remove lam column refs 92 if data.shape[1] != len(headers): 93 raise RuntimeError( 94 "File has {} headers, but {} columns".format( 95 len(headers), 96 data.shape[1])) 146 97 147 # Store loading process information 148 output.meta_data['loader'] = self.type_name 149 #output.sample.thickness = float(paramvals[6]) 150 output.sample.name = paramvals[1] 151 output.sample.ID = paramvals[0] 152 zaccept_unit_split = paramnames[7].split("[") 153 zaccept_unit = zaccept_unit_split[1].replace("]","") 154 if zaccept_unit.strip() == r'\AA^-1' or zaccept_unit.strip() == r'\A^-1': 155 zaccept_unit = "1/A" 156 output.sample.zacceptance=(float(paramvals[7]),zaccept_unit) 157 output.vars = varheader 98 if not data.size: 99 raise RuntimeError("{} is empty".format(path)) 100 x = data[:, headers.index("SpinEchoLength")] 101 if "SpinEchoLength_error" in headers: 102 dx = data[:, headers.index("SpinEchoLength_error")] 103 else: 104 dx = x * 0.05 105 lam = data[:, headers.index("Wavelength")] 106 if "Wavelength_error" in headers: 107 dlam = data[:, headers.index("Wavelength_error")] 108 else: 109 dlam = lam * 0.05 110 y = data[:, headers.index("Depolarisation")] 111 dy = data[:, headers.index("Depolarisation_error")] 158 112 159 if len(output.x) < 1: 160 raise RuntimeError, "%s is empty" % path 161 return output 113 lam_unit = self._unit_fetch("Wavelength") 114 x, x_unit = self._unit_conversion(x, "A", 115 self._unit_fetch( 116 "SpinEchoLength")) 117 dx, dx_unit = self._unit_conversion( 118 dx, lam_unit, 119 self._unit_fetch("SpinEchoLength")) 120 dlam, dlam_unit = self._unit_conversion( 121 dlam, lam_unit, 122 self._unit_fetch("Wavelength")) 123 y_unit = self._unit_fetch("Depolarisation") 162 124 163 else: 164 raise RuntimeError, "%s is not a file" % path 165 return None 125 output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, 126 isSesans=True) 166 127 167 def _unit_conversion(self, value, value_unit, default_unit): 168 if has_converter == True and value_unit != default_unit: 169 data_conv_q = Converter(value_unit) 170 value = data_conv_q(value, units=default_unit) 128 output.y_unit = y_unit 129 output.x_unit = x_unit 130 output.source.wavelength_unit = lam_unit 131 output.source.wavelength = lam 132 self.filename = output.filename = basename 133 output.xaxis(r"\rm{z}", x_unit) 134 # Adjust label to ln P/(lam^2 t), remove lam column refs 135 output.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 136 # Store loading process information 137 output.meta_data['loader'] = self.type_name 138 output.sample.name = params["Sample"] 139 output.sample.ID = params["DataFileTitle"] 140 output.sample.thickness = self._unit_conversion( 141 float(params["Thickness"]), "cm", 142 self._unit_fetch("Thickness"))[0] 143 144 output.sample.zacceptance = ( 145 float(params["Theta_zmax"]), 146 self._unit_fetch("Theta_zmax")) 147 148 output.sample.yacceptance = ( 149 float(params["Theta_ymax"]), 150 self._unit_fetch("Theta_ymax")) 151 return output 152 153 @staticmethod 154 def _insist_header(headers, name): 155 if name not in headers: 156 raise RuntimeError( 157 "Missing {} column in spin echo data".format(name)) 158 159 @staticmethod 160 def _unit_conversion(value, value_unit, default_unit): 161 """ 162 Performs unit conversion on a measurement. 163 164 :param value: The magnitude of the measurement 165 :param value_unit: a string containing the final desired unit 166 :param default_unit: string with the units of the original measurement 167 :return: The magnitude of the measurement in the new units 168 """ 169 # (float, string, string) -> float 170 if has_converter and value_unit != default_unit: 171 data_conv_q = Converter(default_unit) 172 value = data_conv_q(value, units=value_unit) 171 173 new_unit = default_unit 172 174 else: 173 175 new_unit = value_unit 174 176 return value, new_unit 177 178 def _unit_fetch(self, unit): 179 return self.params[unit+"_unit"] -
src/sas/sascalc/dataloader/readers/tiff_reader.py
rb699768 r959eb01 13 13 import logging 14 14 import os 15 import numpy 15 import numpy as np 16 16 from sas.sascalc.dataloader.data_info import Data2D 17 17 from sas.sascalc.dataloader.manipulations import reader2D_converter 18 18 19 logger = logging.getLogger(__name__) 20 19 21 class Reader: 20 22 """ … … 56 58 57 59 # Initiazed the output data object 58 output.data = n umpy.zeros([im.size[0], im.size[1]])59 output.err_data = n umpy.zeros([im.size[0], im.size[1]])60 output.mask = n umpy.ones([im.size[0], im.size[1]], dtype=bool)60 output.data = np.zeros([im.size[0], im.size[1]]) 61 output.err_data = np.zeros([im.size[0], im.size[1]]) 62 output.mask = np.ones([im.size[0], im.size[1]], dtype=bool) 61 63 62 64 # Initialize … … 76 78 value = float(val) 77 79 except: 78 logg ing.error("tiff_reader: had to skip a non-float point")80 logger.error("tiff_reader: had to skip a non-float point") 79 81 continue 80 82 … … 94 96 output.x_bins = x_vals 95 97 output.y_bins = y_vals 96 output.qx_data = n umpy.array(x_vals)97 output.qy_data = n umpy.array(y_vals)98 output.qx_data = np.array(x_vals) 99 output.qy_data = np.array(y_vals) 98 100 output.xmin = 0 99 101 output.xmax = im.size[0] - 1 -
src/sas/sascalc/dataloader/readers/xml_reader.py
r527a190 r6a455cd3 18 18 from lxml import etree 19 19 from lxml.builder import E 20 21 logger = logging.getLogger(__name__) 20 22 21 23 PARSER = etree.ETCompatXMLParser(remove_comments=True, remove_pis=False) … … 71 73 self.xmlroot = self.xmldoc.getroot() 72 74 except etree.XMLSyntaxError as xml_error: 73 logg ing.info(xml_error)75 logger.info(xml_error) 74 76 except Exception: 75 77 self.xml = None … … 88 90 self.xmlroot = etree.fromstring(tag_soup) 89 91 except etree.XMLSyntaxError as xml_error: 90 logg ing.info(xml_error)92 logger.info(xml_error) 91 93 except Exception: 92 94 self.xml = None … … 102 104 self.schemadoc = etree.parse(self.schema, parser=PARSER) 103 105 except etree.XMLSyntaxError as xml_error: 104 logg ing.info(xml_error)106 logger.info(xml_error) 105 107 except Exception: 106 108 self.schema = None … … 241 243 :param name: The name of the element to be created 242 244 """ 243 if attrib ==None:245 if attrib is None: 244 246 attrib = {} 245 247 return etree.Element(name, attrib, nsmap) … … 300 302 """ 301 303 text = str(text) 302 if attrib ==None:304 if attrib is None: 303 305 attrib = {} 304 306 elem = E(elementname, attrib, text) -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
rc94280c r7c24685 126 126 127 127 if isinstance(value, h5py.Group): 128 parent_class = class_name 128 129 self.parent_class = class_name 129 130 parent_list.append(key) … … 136 137 # Recursion step to access data within the group 137 138 self.read_children(value, parent_list) 139 self.parent_class = parent_class 138 140 self.add_intermediate() 139 141 parent_list.remove(key)
Note: See TracChangeset
for help on using the changeset viewer.