Changeset 63b32bb in sasmodels
- Timestamp:
- Mar 11, 2015 12:05:10 PM (10 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 49d1d42f
- Parents:
- a217f7d
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
extra/pylint_numpy.py
r3c56da87 r63b32bb 8 8 #print("processing",module.name) 9 9 if module.name.startswith('numpy'): 10 if module.name == 'numpy': import numpy10 if module.name == 'numpy': import numpy 11 11 elif module.name == 'numpy.random': import numpy.random 12 12 -
sasmodels/bumps_model.py
r750ffa5 r63b32bb 365 365 if 'theory' not in self._cache: 366 366 if self._fn is None: 367 q_input = self.model.make_input(self._fn_inputs)367 q_input = self.model.make_input(self._fn_inputs) 368 368 self._fn = self.model(q_input) 369 369 -
sasmodels/kernelcl.py
r750ffa5 r63b32bb 93 93 """ 94 94 return kernel.get_work_group_info( 95 96 95 cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE, 96 queue.device) 97 97 98 98 def _stretch_input(vector, dtype, extra=1e-3, boundary=32): -
sasmodels/kerneldll.py
r750ffa5 r63b32bb 18 18 from .kernelpy import PyInput, PyModel 19 19 20 from .generate import F32, F6421 20 # Compiler platform details 22 21 if sys.platform == 'darwin': … … 199 198 200 199 def __call__(self, fixed_pars, pd_pars, cutoff): 201 real = np.float32 if self.q_input.dtype == F32 else np.float64200 real = np.float32 if self.q_input.dtype == generate.F32 else np.float64 202 201 203 202 nq = c_int(self.q_input.nq) -
sasmodels/models/HayterMSAsq.py
r5959da2 r63b32bb 48 48 # dp[5] = dielectconst(); 49 49 50 from numpy import pi,inf50 from numpy import inf 51 51 52 52 source = ["HayterMSAsq_kernel.c"] -
sasmodels/resolution.py
r3fdb4b6 r63b32bb 3 3 import numpy as np 4 4 5 def pinhole_resolution(q_calc, q, dq): 5 SLIT_SMEAR_POINTS = 500 6 7 def pinhole_resolution(q_calc, q, q_width): 6 8 """ 7 9 Compute the convolution matrix *W* for pinhole resolution 1-D data. … … 15 17 edges = bin_edges(q_calc) 16 18 edges[edges<0.] = 0. # clip edges below zero 17 G = erf( (edges[:,None] -q[None,:]) / (sqrt(2.0)*dq)[None,:] )18 weights = G[1: ,:] - G[:-1,:]19 G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] ) 20 weights = G[1:] - G[:-1] 19 21 weights /= sum(weights, axis=1) 20 22 return weights … … 22 24 def slit_resolution(q_calc, q, qx_width, qy_width): 23 25 edges = bin_edges(q_calc) # Note: requires q > 0 24 edges[edges<0.] = 0. # clip edges below zero 26 edges[edges<0.] = 0.0 # clip edges below zero 27 qy_min, qy_max = 0.0, edges[-1] 25 28 26 29 weights = np.zeros((len(q),len(q_calc)),'d') 27 # Loop for width ( ;Height is analytical.)30 # Loop for width (height is analytical). 28 31 # Condition: height >>> width, otherwise, below is not accurate enough. 29 # Smear weight numerical iteration for width >0 when the height (>0) presents.32 # Smear weight numerical iteration for width>0 when height>0. 30 33 # When width = 0, the numerical iteration will be skipped. 31 34 # The resolution calculation for the height is done by direct integration, 32 # assuming the I(q'=sqrt(q_j^2-(q+shift_w)^2)) is constant within a q' bin, [q_high, q_low]. 33 # In general, this weight numerical iteration for width >0 might be a rough approximation, 34 # but it must be good enough when height >>> width. 35 # assuming the I(q'=sqrt(q_j^2-(q+shift_w)^2)) is constant within 36 # a q' bin, [q_high, q_low]. 37 # In general, this weight numerical iteration for width>0 might be a rough 38 # approximation, but it must be good enough when height >>> width. 35 39 E_sq = edges**2[:,None] 36 y_p ts = 500if np.any(qy_width>0) else 137 for k in range(-y_pts+1,y_pts):38 qy = q if y_pts == 1 else q + qy_width/(y_pts-1)*k39 qy = np.clip(q y, 0.0, edges[-1])40 y_points = SLIT_SMEAR_POINTS if np.any(qy_width>0) else 1 41 qy_step = 0 if y_points == 1 else qy_width/(y_points-1) 42 for k in range(-y_points+1,y_points): 43 qy = np.clip(q + qy_step*k, qy_min, qy_max) 40 44 qx_low = qy 41 45 qx_high = sqrt(qx_low**2 + qx_width**2) 42 46 in_x = (q_calc[:,None]>=qx_low[None,:])*(q_calc[:,None]<=qx_high[None,:]) 43 weights += (sqrt(E_sq[1:]-qy[None,:]**2)-sqrt(E_sq[:-1]-qy[None,:]**2))*in_x 47 qy_sq = qy**2[None,:] 48 weights += (sqrt(E_sq[1:]-qy_sq) - sqrt(E_sq[:-1]-qy_sq))*in_x 44 49 weights /= sum(weights, axis=1) 45 # Condition: zero slit smear. 46 if (npts_w == 1 and npts_h == 1): 47 if(q_j == q) : 48 weights[i,j] = 1.0 49 #Condition:Smear weight integration for width >0 when the height (=0) does not present. 50 #Or height << width. 51 elif (npts_w!=1 and npts_h==1)or(npts_w!=1 and npts_h != 1 and width/height > 100.0): 52 shift_w = width 53 #del_w = width/((double)npts_w-1.0); 54 q_shifted_low = q - shift_w 55 # High limit of the resolution range 56 q_shifted_high = q + shift_w 57 # Go through all the q_js for weighting those points 58 if(q_j >= q_shifted_low and q_j <= q_shifted_high): 59 # The weighting factor comes, 60 # Give some weight (delq_bin) for the q_j within the resolution range 61 # Weight should be same for all qs except 62 # for the q bin size at j. 63 # Note that the division by q_0 is only due to the precision problem 64 # where q_high - q_low gets to very small. 65 # Later, it will be normalized again. 66 weights[i,j] += (q_high - q_low)/q_0 67 else: 68 # Loop for width (;Height is analytical.) 69 # Condition: height >>> width, otherwise, below is not accurate enough. 70 # Smear weight numerical iteration for width >0 when the height (>0) presents. 71 # When width = 0, the numerical iteration will be skipped. 72 # The resolution calculation for the height is done by direct integration, 73 # assuming the I(q'=sqrt(q_j^2-(q+shift_w)^2)) is constant within a q' bin, [q_high, q_low]. 74 # In general, this weight numerical iteration for width >0 might be a rough approximation, 75 # but it must be good enough when height >>> width. 76 for k in range(-npts_w + 1,npts_w+1): 77 if(npts_w!=1): 78 shift_w = width/(npts_w-1.0)*k 79 # For each q-value, compute the weight of each other q-bin 80 # in the I(q) array 81 # Low limit of the resolution range 82 q_shift = q + shift_w 83 if (q_shift < 0.0): 84 q_shift = 0.0 85 q_shifted_low = q_shift 86 # High limit of the resolution range 87 q_shifted_high = sqrt(q_shift * q_shift + shift_h * shift_h) 88 89 90 # Go through all the q_js for weighting those points 91 if(q_j >= q_shifted_low and q_j <= q_shifted_high) : 92 # The weighting factor comes, 93 # Give some weight (delq_bin) for the q_j within the resolution range 94 # Weight should be same for all qs except 95 # for the q bin size at j. 96 # Note that the division by q_0 is only due to the precision problem 97 # where q_high - q_low gets to very small. 98 # Later, it will be normalized again. 99 100 # The fabs below are not necessary but in case: the weight should never be imaginary. 101 # At the edge of each sub_width. weight += u(at q_high bin) - u(0), where u(0) = 0, 102 # and weighted by (2.0* npts_w -1.0)once for each q. 103 #if (q == q_j) { 104 if (q_low <= q_shift and q_high > q_shift) : 105 #if (k==0) 106 weights[i,j] += (sqrt(abs((q_high)*(q_high)-q_shift * q_shift)))/q_0# * (2.0*double(npts_w)-1.0); 107 # For the rest of sub_width. weight += u(at q_high bin) - u(at q_low bin) 108 else:# if (u > 0.0){ 109 weights[i,j] += (sqrt(abs((q_high)*(q_high)- q_shift * q_shift))-sqrt(abs((q_low)*(q_low)- q_shift * q_shift)))/q_0 110 50 return weights 111 51 112 52 def bin_edges(x): -
sasmodels/sasview_model.py
r3c56da87 r63b32bb 154 154 return 155 155 156 raise ValueError , "Model does not contain parameter %s" % name156 raise ValueError("Model does not contain parameter %s" % name) 157 157 158 158 def getParam(self, name): … … 177 177 return self.params[item] 178 178 179 raise ValueError , "Model does not contain parameter %s" % name179 raise ValueError("Model does not contain parameter %s" % name) 180 180 181 181 def getParamList(self):
Note: See TracChangeset
for help on using the changeset viewer.