Changeset fd1aec6f in sasview for sansmodels/src/sans/models
- Timestamp:
- Jul 27, 2012 11:18:40 AM (12 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, costrafo411, magnetic_scatt, release-4.1.1, release-4.1.2, release-4.2.2, release_4.0.1, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- 69ebe91
- Parents:
- 082c565
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
sansmodels/src/sans/models/smearing_2d.py
r1aa8084 rfd1aec6f 1 ##################################################################### 1 """ 2 2 #This software was developed by the University of Tennessee as part of the 3 3 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 4 4 #project funded by the US National Science Foundation. 5 5 #See the license text in license.txt 6 #copyright 2008, University of Tennessee 7 ###################################################################### 8 9 ## TODO: Need test,and check Gaussian averaging 6 """ 10 7 import numpy 11 8 import math … … 17 14 LIMIT = 3.0 18 15 ## Defaults 19 R_BIN = {'Xhigh':10, 'High':5, 'Med':5,'Low':3}20 PHI_BIN ={'Xhigh':20, 'High':12,'Med':6,'Low':4}16 R_BIN = {'Xhigh':10, 'High':5, 'Med':5, 'Low':3} 17 PHI_BIN ={'Xhigh':20, 'High':12, 'Med':6, 'Low':4} 21 18 22 19 class Smearer2D: … … 32 29 :param data: 2d data used to set the smearing parameters 33 30 :param model: model function 34 :param index: 1d array with len(data) to define the range of the calculation: elements are given as True or False 31 :param index: 1d array with len(data) to define the range 32 of the calculation: elements are given as True or False 35 33 :param nr: number of bins in dq_r-axis 36 34 :param nphi: number of bins in dq_phi-axis … … 42 40 ## model 43 41 self.model = model 44 ## Accuracy: Higher stands for more sampling points in both directions of r and phi. 42 ## Accuracy: Higher stands for more sampling points in both directions 43 ## of r and phi. 45 44 self.accuracy = accuracy 46 45 ## number of bins in r axis for over-sampling … … 54 53 self.smearer = True 55 54 self._engine = engine 56 55 self.qx_data = None 56 self.qy_data = None 57 self.q_data = None 58 # dqx and dqy mean dq_parr and dq_perp 59 self.dqx_data = None 60 self.dqy_data = None 61 self.phi_data = None 57 62 58 63 def get_data(self): 59 64 """ 60 get qx_data, qy_data, dqx_data,dqy_data,and calculate phi_data=arctan(qx_data/qy_data) 65 Get qx_data, qy_data, dqx_data,dqy_data, 66 and calculate phi_data=arctan(qx_data/qy_data) 61 67 """ 62 68 if self.data == None or self.data.__class__.__name__ == 'Data1D': … … 70 76 self.dqx_data = self.data.dqx_data[self.index] 71 77 self.dqy_data = self.data.dqy_data[self.index] 72 self.phi_data = numpy.arctan(self.qx_data /self.qy_data)78 self.phi_data = numpy.arctan(self.qx_data / self.qy_data) 73 79 ## Remove singular points if exists 74 self.dqx_data[self.dqx_data <SIGMA_ZERO]=SIGMA_ZERO75 self.dqy_data[self.dqy_data <SIGMA_ZERO]=SIGMA_ZERO80 self.dqx_data[self.dqx_data < SIGMA_ZERO] = SIGMA_ZERO 81 self.dqy_data[self.dqy_data < SIGMA_ZERO] = SIGMA_ZERO 76 82 return True 77 83 … … 143 149 # data length in the range of self.index 144 150 len_data = len(self.qx_data) 145 len_datay = len(self.qy_data) 146 151 #len_datay = len(self.qy_data) 147 152 if self._engine == 'c' and self.coords == 'polar': 148 153 try: 149 154 import sans.models.sans_extension.smearer2d_helper as smearer2dc 150 smearc = smearer2dc.new_Smearer_helper(self.qx_data, self.qy_data, 155 smearc = smearer2dc.new_Smearer_helper(self.qx_data, 156 self.qy_data, 151 157 self.dqx_data, self.dqy_data, 152 self.limit, nr, nphi, int(len_data)) 158 self.limit, nr, nphi, 159 int(len_data)) 153 160 weight_res = numpy.zeros(nr * nphi ) 154 161 qx_res = numpy.zeros(nr * nphi * int(len_data)) … … 158 165 raise 159 166 else: 160 # Mean values of dqr at each bins ,starting from the half of bin size 167 # Mean values of dqr at each bins 168 # starting from the half of bin size 161 169 r = bin_size / 2.0 + numpy.arange(nr) * bin_size 162 170 # mean values of qphi at each bines … … 170 178 q_phi = self.qy_data / self.qx_data 171 179 172 # Starting angle is different between polar and cartesian coordinates. 180 # Starting angle is different between polar 181 # and cartesian coordinates. 173 182 #if self.coords != 'polar': 174 183 # dphi += numpy.arctan( q_phi * self.dqx_data/ \ 175 # 176 # 184 # self.dqy_data).repeat(n_bins).reshape(len_data,\ 185 # n_bins).transpose().flatten() 177 186 178 187 # The angle (phi) of the original q point 179 188 q_phi = numpy.arctan(q_phi).repeat(n_bins).reshape(len_data,\ 180 189 n_bins).transpose().flatten() 181 190 ## Find Gaussian weight for each dq bins: The weight depends only 182 191 # on r-direction (The integration may not need) 183 192 weight_res = numpy.exp(-0.5 * ((r - bin_size / 2.0) * \ 184 185 186 193 (r - bin_size / 2.0)))- \ 194 numpy.exp(-0.5 * ((r + bin_size / 2.0 ) *\ 195 (r + bin_size / 2.0))) 187 196 # No needs of normalization here. 188 197 #weight_res /= numpy.sum(weight_res) … … 192 201 193 202 ## Set dr for all dq bins for averaging 194 dr = r.repeat(nphi).reshape(nr, nphi).transpose().flatten()203 dr = r.repeat(nphi).reshape(nr, nphi).transpose().flatten() 195 204 ## Set dqr for all data points 196 dqx = numpy.outer(dr, self.dqx_data).flatten()197 dqy = numpy.outer(dr, self.dqy_data).flatten()198 199 qx = self.qx_data.repeat(n_bins).reshape(len_data, \205 dqx = numpy.outer(dr, self.dqx_data).flatten() 206 dqy = numpy.outer(dr, self.dqy_data).flatten() 207 208 qx = self.qx_data.repeat(n_bins).reshape(len_data, \ 200 209 n_bins).transpose().flatten() 201 qy = self.qy_data.repeat(n_bins).reshape(len_data, \210 qy = self.qy_data.repeat(n_bins).reshape(len_data, \ 202 211 n_bins).transpose().flatten() 203 212 … … 216 225 val = self.model.evalDistribution([qx_res, qy_res]) 217 226 ## Reshape into 2d array to use numpy weighted averaging 218 value_res= val.reshape(n_bins, len(self.qx_data))227 value_res= val.reshape(n_bins, len(self.qx_data)) 219 228 ## Averaging with Gaussian weighting: normalization included. 220 229 value =numpy.average(value_res,axis=0, weights=weight_res) 221 230 ## Return the smeared values in the range of self.index 222 231 return value 223 232 """ 224 233 if __name__ == '__main__': 225 234 ## Test w/ 2D linear function 226 x = 0.001*numpy.arange(1, 11)235 x = 0.001*numpy.arange(1, 11) 227 236 dx = numpy.ones(len(x))*0.0003 228 y = 0.001*numpy.arange(1, 11)237 y = 0.001*numpy.arange(1, 11) 229 238 dy = numpy.ones(len(x))*0.001 230 239 z = numpy.ones(10) … … 247 256 model.setParam("A", 0) 248 257 249 smear = Smearer2D(out, model,index)258 smear = Smearer2D(out, model, index) 250 259 #smear.set_accuracy('Xhigh') 251 260 value = smear.get_value() 252 261 ## All data are ones, so the smeared should also be ones. 253 print "Data length =", len(value)262 print "Data length =", len(value) 254 263 print " 2D linear function, I = 0 + 1*qy" 255 264 text = " Gaussian weighted averaging on a 2D linear function will " … … 258 267 print "qx_data", "qy_data", "I_nonsmear", "I_smeared" 259 268 for ind in range(len(value)): 260 print x[ind],y[ind],model.evalDistribution([x,y])[ind], value[ind] 261 262 """ 263 for i in range(len(qx_res)/(128*128)): 264 k = i * 128*128 +64 265 266 print qx_res[k]-qqx[k], qy_res[k]-qqy[k] 267 print qqx[64],qqy[64] 268 """ 269 """ 269 print x[ind], y[ind], model.evalDistribution([x, y])[ind], value[ind] 270 271 270 272 if __name__ == '__main__': 271 273 ## Another Test w/ constant function
Note: See TracChangeset
for help on using the changeset viewer.