Changeset 3c56da87 in sasmodels
- Timestamp:
- Mar 5, 2015 12:55:38 AM (10 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 3a45c2c
- Parents:
- b89f519
- Files:
-
- 2 added
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
extra/pylint.rc
r53d0e24 r3c56da87 20 20 # List of plugins (as comma separated values of python modules names) to load, 21 21 # usually to register additional checkers. 22 load-plugins= 22 load-plugins=pylint_numpy,pylint_pyopencl 23 23 24 24 # Use multiple processes to speed up Pylint. 25 jobs=425 #jobs=4 26 26 27 27 # Allow loading of arbitrary C extensions. Extensions are imported into the … … 57 57 58 58 # Disable the message(s) with the given id(s). 59 disable=W0702,W0613,W0703,W0142 59 # http://pylint-messages.wikidot.com/all-codes 60 disable= 61 multiple-statements, 62 global-statement, 63 bare-except, 64 broad-except, 65 bad-whitespace, 66 bad-continuation, 67 too-many-statements, 68 too-many-branches, 69 too-many-locals, 70 too-many-instance-attributes, 71 too-many-arguments, 72 star-args, 73 unbalanced-tuple-unpacking, 74 locally-disabled, 75 old-style-class, 60 76 61 77 [REPORTS] … … 98 114 99 115 # Good variable names which should always be accepted, separated by a comma 100 good-names=i,j,k,ex,Run,_,x,y,z,qx,qy,qz,n,q,dx,dy,dz,id,Iq,dIq,Qx,Qy,Qz 116 good-names=_, 117 input, 118 i,j,k,n,x,y,z, 119 q,qx,qy,qz, 120 dt,dx,dy,dz,id, 121 Iq,dIq,Qx,Qy,Qz, 122 p, 123 ER, call_ER, VR, call_VR, 101 124 102 125 # Bad variable names which should always be refused, separated by a comma … … 111 134 112 135 # Regular expression matching correct function names 113 function-rgx=[a-z_][a-z0-9_]{2,30} $136 function-rgx=[a-z_][a-z0-9_]{2,30}([123]D)?$ 114 137 115 138 # Naming hint for function names … … 172 195 # Regular expression which should only match function or class names that do 173 196 # not require a docstring. 174 no-docstring-rgx=__.*__ 197 #no-docstring-rgx=__.*__ 198 no-docstring-rgx=_.* 175 199 176 200 # Minimum line length for functions/classes that require docstrings, shorter … … 181 205 182 206 # Maximum number of characters on a single line. 183 max-line-length=100 207 #max-line-length=100 208 max-line-length=80 184 209 185 210 # Regexp for a line that is allowed to be longer than the limit. 186 ignore-long-lines=^\s*(# )?<?https?://\S+>?$ 211 #ignore-long-lines=^\s*(# )?<?https?://\S+>?$ 212 # No comma in the last forty characters 213 ignore-long-lines=([^-,+*/]{40},?|[^"]{40}")$ 187 214 188 215 # Allow the body of an if to be on the same line as the test if there is no -
extra/run-pylint.py
r6c2eb83 r3c56da87 4 4 5 5 def main(): 6 envpath = os.environ.get('PYTHONPATH',None) 7 path = [envpath] if envpath else [] 8 path.append(abspath(dirname(__file__))) # so we can find the plugins 9 os.environ['PYTHONPATH'] = ':'.join(path) 6 10 root = abspath(joinpath(dirname(__file__), '..')) 7 11 os.chdir(root) 8 cmd = "pylint --rcfile extra/pylint.rc -f parseable sasmodels > pylint_violations.txt"12 cmd = "pylint --rcfile extra/pylint.rc -f parseable sasmodels" 9 13 status = os.system(cmd) 10 14 sys.exit(status) -
sasmodels/alignment.py
rff7119b r3c56da87 34 34 return view 35 35 36 def align_data( v, dtype, alignment=128):36 def align_data(x, dtype, alignment=128): 37 37 """ 38 38 Return a copy of an array on the alignment boundary. 39 39 """ 40 # if v is contiguous, aligned, and of the correct type then just return v41 view = align_empty( v.shape, dtype, alignment=alignment)42 view[:] = v40 # if x is contiguous, aligned, and of the correct type then just return x 41 view = align_empty(x.shape, dtype, alignment=alignment) 42 view[:] = x 43 43 return view 44 44 -
sasmodels/bumps_model.py
rb89f519 r3c56da87 8 8 # CRUFT python 2.6 9 9 if not hasattr(datetime.timedelta, 'total_seconds'): 10 def delay(dt): return dt.days * 86400 + dt.seconds + 1e-6 * dt.microseconds 10 def delay(dt): 11 """Return number date-time delta as number seconds""" 12 return dt.days * 86400 + dt.seconds + 1e-6 * dt.microseconds 11 13 else: 12 def delay(dt): return dt.total_seconds() 14 def delay(dt): 15 """Return number date-time delta as number seconds""" 16 return dt.total_seconds() 13 17 14 18 import numpy as np … … 141 145 from sas.dataloader.manipulations import Boxcut 142 146 if half == 'right': 143 data.mask += Boxcut(x_min=-np.inf, x_max=0.0, y_min=-np.inf, y_max=np.inf)(data) 147 data.mask += \ 148 Boxcut(x_min=-np.inf, x_max=0.0, y_min=-np.inf, y_max=np.inf)(data) 144 149 if half == 'left': 145 data.mask += Boxcut(x_min=0.0, x_max=np.inf, y_min=-np.inf, y_max=np.inf)(data) 146 147 148 def set_top(data, max): 149 """ 150 Chop the top off the data, above *max*. 150 data.mask += \ 151 Boxcut(x_min=0.0, x_max=np.inf, y_min=-np.inf, y_max=np.inf)(data) 152 153 154 def set_top(data, cutoff): 155 """ 156 Chop the top off the data, above *cutoff*. 151 157 """ 152 158 from sas.dataloader.manipulations import Boxcut 153 data.mask += Boxcut(x_min=-np.inf, x_max=np.inf, y_min=-np.inf, y_max=max)(data) 154 155 156 def plot_data(data, iq, vmin=None, vmax=None, view='log'): 159 data.mask += \ 160 Boxcut(x_min=-np.inf, x_max=np.inf, y_min=-np.inf, y_max=cutoff)(data) 161 162 163 def plot_data(data, Iq, vmin=None, vmax=None, view='log'): 157 164 """ 158 165 Plot the target value for the data. This could be the data itself, … … 161 168 *scale* can be 'log' for log scale data, or 'linear'. 162 169 """ 163 from numpy.ma import masked_array , masked170 from numpy.ma import masked_array 164 171 import matplotlib.pyplot as plt 165 172 if hasattr(data, 'qx_data'): 166 iq = iq + 0167 valid = np.isfinite( iq)173 Iq = Iq + 0 174 valid = np.isfinite(Iq) 168 175 if view == 'log': 169 valid[valid] = ( iq[valid] > 0)170 iq[valid] = np.log10(iq[valid])176 valid[valid] = (Iq[valid] > 0) 177 Iq[valid] = np.log10(Iq[valid]) 171 178 elif view == 'q4': 172 iq[valid] = iq*(data.qx_data[valid]**2+data.qy_data[valid]**2)**2173 iq[~valid | data.mask] = 0174 #plottable = iq175 plottable = masked_array( iq, ~valid | data.mask)179 Iq[valid] = Iq*(data.qx_data[valid]**2+data.qy_data[valid]**2)**2 180 Iq[~valid | data.mask] = 0 181 #plottable = Iq 182 plottable = masked_array(Iq, ~valid | data.mask) 176 183 xmin, xmax = min(data.qx_data), max(data.qx_data) 177 184 ymin, ymax = min(data.qy_data), max(data.qy_data) 178 185 try: 179 if vmin is None: vmin = iq[valid & ~data.mask].min()180 if vmax is None: vmax = iq[valid & ~data.mask].max()186 if vmin is None: vmin = Iq[valid & ~data.mask].min() 187 if vmax is None: vmax = Iq[valid & ~data.mask].max() 181 188 except: 182 189 vmin, vmax = 0, 1 … … 186 193 else: # 1D data 187 194 if view == 'linear' or view == 'q4': 188 #idx = np.isfinite( iq)195 #idx = np.isfinite(Iq) 189 196 scale = data.x**4 if view == 'q4' else 1.0 190 plt.plot(data.x, scale* iq) #, '.')197 plt.plot(data.x, scale*Iq) #, '.') 191 198 else: 192 199 # Find the values that are finite and positive 193 idx = np.isfinite( iq)194 idx[idx] = iq[idx]>0195 iq[~idx] = np.nan196 plt.loglog(data.x, iq)200 idx = np.isfinite(Iq) 201 idx[idx] = (Iq[idx] > 0) 202 Iq[~idx] = np.nan 203 plt.loglog(data.x, Iq) 197 204 198 205 … … 220 227 plt.plot(data.x, mresid, 'x') 221 228 229 # pylint: disable=unused-argument 222 230 def _plot_sesans(data, theory, view): 223 231 import matplotlib.pyplot as plt … … 286 294 q = sesans.make_q(data.sample.zacceptance, data.Rmax) 287 295 self.index = slice(None, None) 288 self. iq = data.y289 self.d iq = data.dy296 self.Iq = data.y 297 self.dIq = data.dy 290 298 self._theory = np.zeros_like(q) 291 299 q_vectors = [q] 292 300 elif self.data_type == 'Iqxy': 293 301 self.index = (data.mask == 0) & (~np.isnan(data.data)) 294 self. iq = data.data[self.index]295 self.d iq = data.err_data[self.index]302 self.Iq = data.data[self.index] 303 self.dIq = data.err_data[self.index] 296 304 self._theory = np.zeros_like(data.data) 297 305 if not partype['orientation'] and not partype['magnetic']: … … 301 309 elif self.data_type == 'Iq': 302 310 self.index = (data.x >= data.qmin) & (data.x <= data.qmax) & ~np.isnan(data.y) 303 self. iq = data.y[self.index]304 self.d iq = data.dy[self.index]311 self.Iq = data.y[self.index] 312 self.dIq = data.dy[self.index] 305 313 self._theory = np.zeros_like(data.y) 306 314 q_vectors = [data.x] … … 316 324 pars = [] 317 325 for p in model.info['parameters']: 318 name, default, limits , ptype = p[0], p[2], p[3], p[4]326 name, default, limits = p[0], p[2], p[3] 319 327 value = kw.pop(name, default) 320 328 setattr(self, name, Parameter.default(value, name=name, limits=limits)) … … 335 343 self._parameter_names = pars 336 344 if kw: 337 raise TypeError("unexpected parameters: %s" % (", ".join(sorted(kw.keys())))) 345 raise TypeError("unexpected parameters: %s" 346 % (", ".join(sorted(kw.keys())))) 338 347 self.update() 339 348 … … 345 354 Return the number of points 346 355 """ 347 return len(self. iq)356 return len(self.Iq) 348 357 349 358 def parameters(self): … … 365 374 #self._theory[:] = self._fn.eval(pars, pd_pars) 366 375 if self.data_type == 'sesans': 367 P= sesans.hankel(self.data.x, self.data.lam * 1e-9,368 self.data.sample.thickness / 10, self._fn_inputs[0],369 self._theory)370 self._cache['theory'] = P376 result = sesans.hankel(self.data.x, self.data.lam * 1e-9, 377 self.data.sample.thickness / 10, 378 self._fn_inputs[0], self._theory) 379 self._cache['theory'] = result 371 380 else: 372 381 self._cache['theory'] = self._theory … … 375 384 def residuals(self): 376 385 #if np.any(self.err ==0): print "zeros in err" 377 return (self.theory()[self.index] - self. iq) / self.diq386 return (self.theory()[self.index] - self.Iq) / self.dIq 378 387 379 388 def nllf(self): 380 R= self.residuals()389 delta = self.residuals() 381 390 #if np.any(np.isnan(R)): print "NaN in residuals" 382 return 0.5 * np.sum( R** 2)383 384 def __call__(self):385 return 2 * self.nllf() / self.dof391 return 0.5 * np.sum(delta ** 2) 392 393 #def __call__(self): 394 # return 2 * self.nllf() / self.dof 386 395 387 396 def plot(self, view='log'): … … 402 411 print "noise", noise 403 412 if noise is None: 404 noise = self.d iq[self.index]413 noise = self.dIq[self.index] 405 414 else: 406 415 noise = 0.01 * noise 407 self.d iq[self.index] = noise416 self.dIq[self.index] = noise 408 417 y = self.theory() 409 418 y += y * np.random.randn(*y.shape) * noise … … 428 437 relative = self.model.info['partype']['pd-rel'] 429 438 limits = self.model.info['limits'] 430 disperser, value, npts, width, nsigma = \ 431 [getattr(self, par + ext) for ext in ('_pd_type', '', '_pd_n', '_pd', '_pd_nsigma')] 432 v, w = weights.get_weights( 439 disperser, value, npts, width, nsigma = [ 440 getattr(self, par + ext) 441 for ext in ('_pd_type', '', '_pd_n', '_pd', '_pd_nsigma')] 442 value, weight = weights.get_weights( 433 443 disperser, int(npts.value), width.value, nsigma.value, 434 444 value.value, limits[par], par in relative) 435 return v , w / w.max()445 return value, weight / np.sum(weight) 436 446 437 447 def __getstate__(self): … … 442 452 443 453 def __setstate__(self, state): 454 # pylint: disable=attribute-defined-outside-init 444 455 self.__dict__ = state 445 456 -
sasmodels/convert.py
r2a74b99 r3c56da87 37 37 Convert model from old style parameter names to new style. 38 38 """ 39 _,_ = name,pars # lint 39 40 raise NotImplementedError 40 41 # need to load all new models in order to determine old=>new -
sasmodels/core.py
r9890053 r3c56da87 12 12 try: 13 13 from .kernelcl import load_model as load_model_cl 14 except Exception,exc: 14 except: 15 # pylint: disable=invalid-name 15 16 load_model_cl = None 16 17 from .kerneldll import load_model as load_model_dll … … 31 32 Return a computation kernel from the model definition and the q input. 32 33 """ 33 input = model.make_input(q_vectors)34 return model( input)34 model_input = model.make_input(q_vectors) 35 return model(model_input) 35 36 36 def get_weights( kernel, pars, name):37 def get_weights(info, pars, name): 37 38 """ 38 39 Generate the distribution for parameter *name* given the parameter values … … 41 42 Searches for "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma" 42 43 """ 43 relative = name in kernel.info['partype']['pd-rel']44 limits = kernel.info['limits']44 relative = name in info['partype']['pd-rel'] 45 limits = info['limits'] 45 46 disperser = pars.get(name+'_pd_type', 'gaussian') 46 value = pars.get(name, kernel.info['defaults'][name])47 value = pars.get(name, info['defaults'][name]) 47 48 npts = pars.get(name+'_pd_n', 0) 48 49 width = pars.get(name+'_pd', 0.0) 49 50 nsigma = pars.get(name+'_pd_nsigma', 3.0) 50 v ,w= weights.get_weights(51 value,weight = weights.get_weights( 51 52 disperser, npts, width, nsigma, 52 53 value, limits[name], relative) 53 return v ,w/np.sum(w)54 return value,weight/np.sum(weight) 54 55 55 56 def dispersion_mesh(pars): … … 61 62 parameter set in the vector. 62 63 """ 63 value s, weights= zip(*pars)64 if len(value s) > 1:65 value s = [v.flatten() for v in np.meshgrid(*values)]66 weight s = np.vstack([v.flatten() for v in np.meshgrid(*weights)])67 weight s = np.prod(weights, axis=0)68 return value s, weights64 value, weight = zip(*pars) 65 if len(value) > 1: 66 value = [v.flatten() for v in np.meshgrid(*value)] 67 weight = np.vstack([v.flatten() for v in np.meshgrid(*weight)]) 68 weight = np.prod(weight, axis=0) 69 return value, weight 69 70 70 71 def call_kernel(kernel, pars, cutoff=1e-5): 71 72 fixed_pars = [pars.get(name, kernel.info['defaults'][name]) 72 73 for name in kernel.fixed_pars] 73 pd_pars = [get_weights(kernel , pars, name) for name in kernel.pd_pars]74 pd_pars = [get_weights(kernel.info, pars, name) for name in kernel.pd_pars] 74 75 return kernel(fixed_pars, pd_pars, cutoff=cutoff) 75 76 76 def call_ER( kernel, pars):77 ER = kernel.info.get('ER', None)77 def call_ER(info, pars): 78 ER = info.get('ER', None) 78 79 if ER is None: 79 80 return 1.0 80 81 else: 81 vol_pars = [get_weights( kernel, pars, name)82 for name in kernel.info['partype']['volume']]83 value s, weights= dispersion_mesh(vol_pars)84 fv = ER(*values)82 vol_pars = [get_weights(info, pars, name) 83 for name in info['partype']['volume']] 84 value, weight = dispersion_mesh(vol_pars) 85 individual_radii = ER(*value) 85 86 #print values[0].shape, weights.shape, fv.shape 86 return np.sum(weight s*fv) / np.sum(weights)87 return np.sum(weight*individual_radii) / np.sum(weight) 87 88 88 def call_VR( kernel, pars):89 VR = kernel.info.get('VR', None)89 def call_VR(info, pars): 90 VR = info.get('VR', None) 90 91 if VR is None: 91 92 return 1.0 92 93 else: 93 vol_pars = [get_weights( kernel, pars, name)94 for name in kernel.info['partype']['volume']]95 value s, weights= dispersion_mesh(vol_pars)96 whole,part = VR(*value s)97 return np.sum(weight s*part)/np.sum(weights*whole)94 vol_pars = [get_weights(info, pars, name) 95 for name in info['partype']['volume']] 96 value, weight = dispersion_mesh(vol_pars) 97 whole,part = VR(*value) 98 return np.sum(weight*part)/np.sum(weight*whole) 98 99 -
sasmodels/generate.py
r33e91b1 r3c56da87 566 566 567 567 def demo_time(): 568 from .models import cylinder 568 569 import datetime 569 from .models import cylinder570 570 tic = datetime.datetime.now() 571 toc = lambda: (datetime.datetime.now() - tic).total_seconds()572 571 make(cylinder) 573 print "time:", toc() 572 toc = (datetime.datetime.now() - tic).total_seconds() 573 print "time:", toc 574 574 575 575 def main(): -
sasmodels/kernelcl.py
rc85db69 r3c56da87 30 30 try: 31 31 import pyopencl as cl 32 context = cl.create_some_context(interactive=False)33 del context32 # Ask OpenCL for the default context so that we know that one exists 33 cl.create_some_context(interactive=False) 34 34 except Exception, exc: 35 35 warnings.warn(str(exc)) … … 160 160 161 161 if not self.context: 162 self.context = self._find_context()162 self.context = _get_default_context() 163 163 164 164 # Byte boundary for data alignment … … 178 178 warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly") 179 179 180 def _find_context(self):181 default = None182 for platform in cl.get_platforms():183 for device in platform.get_devices():184 if device.type == cl.device_type.GPU:185 return cl.Context([device])186 if default is None:187 default = device188 189 if not default:190 raise RuntimeError("OpenCL device not found")191 192 return cl.Context([default])193 194 180 def compile_program(self, name, source, dtype): 195 181 if name not in self.compiled: … … 203 189 del self.compiled[name] 204 190 191 def _get_default_context(): 192 default = None 193 for platform in cl.get_platforms(): 194 for device in platform.get_devices(): 195 if device.type == cl.device_type.GPU: 196 return cl.Context([device]) 197 if default is None: 198 default = device 199 200 if not default: 201 raise RuntimeError("OpenCL device not found") 202 203 return cl.Context([default]) 204 205 205 206 206 class GpuModel(object): … … 234 234 raise TypeError("data and kernel have different types") 235 235 if self.program is None: 236 self.program = environment().compile_program(self.info['name'], self.source, self.dtype) 236 compiler = environment().compile_program 237 self.program = compiler(self.info['name'], self.source, self.dtype) 237 238 kernel_name = generate.kernel_name(self.info, input_value.is_2D) 238 239 kernel = getattr(self.program, kernel_name) … … 303 304 *info* is the module information 304 305 305 * input* is the DllInput q vectors at which the kernel should be306 *q_input* is the DllInput q vectors at which the kernel should be 306 307 evaluated. 307 308 … … 314 315 Call :meth:`release` when done with the kernel instance. 315 316 """ 316 def __init__(self, kernel, info, input):317 self. input =input317 def __init__(self, kernel, info, q_input): 318 self.q_input = q_input 318 319 self.kernel = kernel 319 320 self.info = info 320 self.res = np.empty( input.nq,input.dtype)321 dim = '2d' if input.is_2D else '1d'321 self.res = np.empty(q_input.nq, q_input.dtype) 322 dim = '2d' if q_input.is_2D else '1d' 322 323 self.fixed_pars = info['partype']['fixed-' + dim] 323 324 self.pd_pars = info['partype']['pd-' + dim] … … 327 328 env = environment() 328 329 self.loops_b = [cl.Buffer(env.context, mf.READ_WRITE, 329 2 * MAX_LOOPS * input.dtype.itemsize)330 2 * MAX_LOOPS * q_input.dtype.itemsize) 330 331 for _ in env.queues] 331 332 self.res_b = [cl.Buffer(env.context, mf.READ_WRITE, 332 input.global_size[0] *input.dtype.itemsize)333 q_input.global_size[0] * q_input.dtype.itemsize) 333 334 for _ in env.queues] 334 335 335 336 336 337 def __call__(self, fixed_pars, pd_pars, cutoff=1e-5): 337 real = np.float32 if self. input.dtype == generate.F32 else np.float64338 real = np.float32 if self.q_input.dtype == generate.F32 else np.float64 338 339 339 340 device_num = 0 340 341 queuei = environment().queues[device_num] 341 342 res_bi = self.res_b[device_num] 342 nq = np.uint32(self. input.nq)343 nq = np.uint32(self.q_input.nq) 343 344 if pd_pars: 344 345 cutoff = real(cutoff) 345 346 loops_N = [np.uint32(len(p[0])) for p in pd_pars] 346 loops = np.hstack(pd_pars) if pd_pars else np.empty(0, dtype=self.input.dtype) 347 loops = np.ascontiguousarray(loops.T, self.input.dtype).flatten() 347 loops = np.hstack(pd_pars) \ 348 if pd_pars else np.empty(0, dtype=self.q_input.dtype) 349 loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() 348 350 #print "loops",Nloops, loops 349 351 … … 357 359 loops_l = cl.LocalMemory(len(loops.data)) 358 360 #ctx = environment().context 359 #loops_bi = cl.Buffer(ctx, mf.READ_ONLY |mf.COPY_HOST_PTR, hostbuf=loops)361 #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops) 360 362 dispersed = [loops_bi, loops_l, cutoff] + loops_N 361 363 else: 362 364 dispersed = [] 363 365 fixed = [real(p) for p in fixed_pars] 364 args = self. input.q_buffers + [res_bi, nq] + dispersed + fixed365 self.kernel(queuei, self. input.global_size, None, *args)366 args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed 367 self.kernel(queuei, self.q_input.global_size, None, *args) 366 368 cl.enqueue_copy(queuei, self.res, res_bi) 367 369 -
sasmodels/kerneldll.py
rf734e7d r3c56da87 19 19 COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 20 20 elif os.name == 'nt': 21 # make sure vcvarsall.bat is called first in order to set compiler, headers, lib paths, etc.21 # call vcvarsall.bat before compiling to set path, headers, libs, etc. 22 22 if "VCINSTALLDIR" in os.environ: 23 23 # MSVC compiler is available, so use it. … … 25 25 # TODO: maybe don't use randomized name for the c file 26 26 COMPILE = "cl /nologo /Ox /MD /W3 /GS- /DNDEBUG /Tp%(source)s /openmp /link /DLL /INCREMENTAL:NO /MANIFEST /OUT:%(output)s" 27 # Can't find VCOMP90.DLL (don't know why), so remove openmp support from windows compiler build 27 # Can't find VCOMP90.DLL (don't know why), so remove openmp support 28 # from windows compiler build 28 29 #COMPILE = "cl /nologo /Ox /MD /W3 /GS- /DNDEBUG /Tp%(source)s /link /DLL /INCREMENTAL:NO /MANIFEST /OUT:%(output)s" 29 30 else: … … 123 124 self.__dict__ = state 124 125 125 def __call__(self, input):126 def __call__(self, q_input): 126 127 if self.dll is None: self._load_dll() 127 kernel = self.Iqxy if input.is_2D else self.Iq128 return DllKernel(kernel, self.info, input)128 kernel = self.Iqxy if q_input.is_2D else self.Iq 129 return DllKernel(kernel, self.info, q_input) 129 130 131 # pylint: disable=no-self-use 130 132 def make_input(self, q_vectors): 131 133 """ … … 150 152 *info* is the module information 151 153 152 * input* is the DllInput q vectors at which the kernel should be154 *q_input* is the DllInput q vectors at which the kernel should be 153 155 evaluated. 154 156 … … 161 163 Call :meth:`release` when done with the kernel instance. 162 164 """ 163 def __init__(self, kernel, info, input):165 def __init__(self, kernel, info, q_input): 164 166 self.info = info 165 self. input =input167 self.q_input = q_input 166 168 self.kernel = kernel 167 self.res = np.empty( input.nq,input.dtype)168 dim = '2d' if input.is_2D else '1d'169 self.res = np.empty(q_input.nq, q_input.dtype) 170 dim = '2d' if q_input.is_2D else '1d' 169 171 self.fixed_pars = info['partype']['fixed-'+dim] 170 172 self.pd_pars = info['partype']['pd-'+dim] … … 174 176 175 177 def __call__(self, fixed_pars, pd_pars, cutoff): 176 real = np.float32 if self. input.dtype == F32 else np.float64178 real = np.float32 if self.q_input.dtype == F32 else np.float64 177 179 178 nq = c_int(self. input.nq)180 nq = c_int(self.q_input.nq) 179 181 if pd_pars: 180 182 cutoff = real(cutoff) 181 183 loops_N = [np.uint32(len(p[0])) for p in pd_pars] 182 184 loops = np.hstack(pd_pars) 183 loops = np.ascontiguousarray(loops.T, self. input.dtype).flatten()185 loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() 184 186 p_loops = loops.ctypes.data 185 187 dispersed = [p_loops, cutoff] + loops_N … … 187 189 dispersed = [] 188 190 fixed = [real(p) for p in fixed_pars] 189 args = self. input.q_pointers + [self.p_res, nq] + dispersed + fixed191 args = self.q_input.q_pointers + [self.p_res, nq] + dispersed + fixed 190 192 #print pars 191 193 self.kernel(*args) -
sasmodels/kernelpy.py
rc85db69 r3c56da87 7 7 def __init__(self, info): 8 8 self.info = info 9 def __call__(self, input_value): 10 kernel = self.info['Iqxy'] if input_value.is_2D else self.info['Iq'] 11 return PyKernel(kernel, self.info, input_value) 9 10 def __call__(self, q_input): 11 kernel = self.info['Iqxy'] if q_input.is_2D else self.info['Iq'] 12 return PyKernel(kernel, self.info, q_input) 13 14 # pylint: disable=no-self-use 12 15 def make_input(self, q_vectors): 13 16 return PyInput(q_vectors, dtype=F64) 17 14 18 def release(self): 15 19 pass … … 52 56 *info* is the module information 53 57 54 * input* is the DllInput q vectors at which the kernel should be58 *q_input* is the DllInput q vectors at which the kernel should be 55 59 evaluated. 56 60 … … 63 67 Call :meth:`release` when done with the kernel instance. 64 68 """ 65 def __init__(self, kernel, info, input):69 def __init__(self, kernel, info, q_input): 66 70 self.info = info 67 self. input =input68 self.res = np.empty( input.nq,input.dtype)69 dim = '2d' if input.is_2D else '1d'71 self.q_input = q_input 72 self.res = np.empty(q_input.nq, q_input.dtype) 73 dim = '2d' if q_input.is_2D else '1d' 70 74 # Loop over q unless user promises that the kernel is vectorized by 71 75 # taggining it with vectorized=True … … 73 77 if dim == '2d': 74 78 def vector_kernel(qx, qy, *args): 75 return np.array([kernel(qxi, qyi, *args) for qxi, qyi in zip(qx, qy)]) 79 return np.array([kernel(qxi, qyi, *args) 80 for qxi, qyi in zip(qx, qy)]) 76 81 else: 77 82 def vector_kernel(q, *args): 78 return np.array([kernel(qi, *args) for qi in q]) 83 return np.array([kernel(qi, *args) 84 for qi in q]) 79 85 self.kernel = vector_kernel 80 86 else: … … 86 92 # First two fixed pars are scale and background 87 93 pars = [p[0] for p in info['parameters'][2:]] 88 offset = len(self.input.q_vectors) 89 self.args = self.input.q_vectors + [None] * len(pars) 90 self.fixed_index = np.array([pars.index(p) + offset for p in fixed_pars[2:]]) 91 self.pd_index = np.array([pars.index(p) + offset for p in pd_pars]) 92 self.vol_index = np.array([pars.index(p) + offset for p in vol_pars]) 94 offset = len(self.q_input.q_vectors) 95 self.args = self.q_input.q_vectors + [None] * len(pars) 96 self.fixed_index = np.array([pars.index(p) + offset 97 for p in fixed_pars[2:]]) 98 self.pd_index = np.array([pars.index(p) + offset 99 for p in pd_pars]) 100 self.vol_index = np.array([pars.index(p) + offset 101 for p in vol_pars]) 93 102 try: self.theta_index = pars.index('theta') + offset 94 103 except ValueError: self.theta_index = -1 … … 113 122 114 123 def release(self): 115 self. input = None124 self.q_input = None 116 125 117 126 def _loops(form, form_volume, cutoff, scale, background, … … 194 203 args[pd_index[0]] = fast_value[fast_index] 195 204 weight[0] = fast_weight[fast_index] 196 # This computes the weight, and if it is sufficient, calls the scattering197 # function and adds it to the total. If there is a volume normalization,198 # it will also be added here.205 # This computes the weight, and if it is sufficient, calls the 206 # scattering function and adds it to the total. If there is a volume 207 # normalization, it will also be added here. 199 208 # Note: make sure this is consistent with the code in PY_LOOP_BODY!! 200 209 # Note: can precompute w1*w2*...*wn … … 204 213 positive = (I >= 0.0) 205 214 206 # Note: can precompute spherical correction if theta_index is not the fast index 207 # Correction factor for spherical integration p(theta) I(q) sin(theta) dtheta 208 #spherical_correction = abs(sin(pi*args[theta_index])) if theta_index>=0 else 1.0 209 spherical_correction = abs(cos(pi * args[theta_index])) * pi / 2 if theta_index >= 0 else 1.0 215 # Note: can precompute spherical correction if theta_index is not 216 # the fast index. Correction factor for spherical integration 217 #spherical_correction = abs(cos(pi*args[phi_index])) if phi_index>=0 else 1.0 218 spherical_correction = (abs(cos(pi * args[theta_index])) * pi / 2 219 if theta_index >= 0 else 1.0) 210 220 #spherical_correction = 1.0 211 221 ret += w * I * spherical_correction * positive … … 213 223 214 224 # Volume normalization. 215 # If there are "volume" polydispersity parameters, then these will be used216 # to call the form_volume function from the user supplied kernel, and accumulate217 # a normalized weight.218 # Note: can precompute volume norm if the fast index is not a volume index225 # If there are "volume" polydispersity parameters, then these 226 # will be used to call the form_volume function from the user 227 # supplied kernel, and accumulate a normalized weight. 228 # Note: can precompute volume norm if fast index is not a volume 219 229 if form_volume: 220 230 vol_args = [args[index] for index in vol_index] -
sasmodels/model_test.py
rd6adfbe r3c56da87 5 5 Usage:: 6 6 7 8 9 7 python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ... 8 9 if model1 is 'all', then all except the remaining models will be tested 10 10 11 11 Each model is tested using the default parameters at q=0.1, (qx,qy)=(0.1,0.1), … … 60 60 Example:: 61 61 >>> D = {} 62 >>> try: 62 >>> try: 63 63 ... print D['hello'] 64 ... except Exception,exc: 64 ... except Exception,exc: 65 65 ... annotate_exception(exc, "while accessing 'D'") 66 66 ... raise … … 78 78 except: 79 79 exc.args = (" ".join((str(exc),msg)),) 80 81 def suite(loaders, models):80 81 def make_suite(loaders, models): 82 82 83 83 ModelTestCase = _hide_model_case_from_nosetests() … … 100 100 ] 101 101 tests = smoke_tests + getattr(model_definition, 'tests', []) 102 102 103 103 if tests: # in case there are no smoke tests... 104 104 #print '------' … … 143 143 model = self.loader(self.definition) 144 144 for test in self.tests: 145 pars, Q, I = test 146 147 if not isinstance(I, list): 148 I = [I] 149 if not isinstance(Q, list): 150 Q = [Q] 151 152 self.assertEqual(len(I), len(Q)) 153 154 if Q[0] == 'ER': 155 Iq = [call_ER(kernel, pars)] 156 elif Q[0] == 'VR': 157 Iq = [call_VR(kernel, pars)] 158 elif isinstance(Q[0], tuple): 159 Qx,Qy = zip(*Q) 160 Q_vectors = [np.array(Qx), np.array(Qy)] 161 kernel = make_kernel(model, Q_vectors) 162 Iq = call_kernel(kernel, pars) 163 else: 164 Q_vectors = [np.array(Q)] 165 kernel = make_kernel(model, Q_vectors) 166 Iq = call_kernel(kernel, pars) 167 168 self.assertGreater(len(Iq), 0) 169 self.assertEqual(len(I), len(Iq)) 170 171 for q, i, iq in zip(Q, I, Iq): 172 if i is None: 173 # smoke test --- make sure it runs and produces a value 174 self.assertTrue(np.isfinite(iq), 'q:%s; not finite; actual:%s' % (q, iq)) 175 else: 176 err = abs(i - iq) 177 nrm = abs(i) 178 self.assertLess(err * 10**5, nrm, 'q:%s; expected:%s; actual:%s' % (q, i, iq)) 145 self._run_one_test(model, test) 179 146 180 147 except Exception,exc: … … 182 149 raise 183 150 151 def _run_one_test(self, model, test): 152 pars, x, y = test 153 154 if not isinstance(y, list): 155 y = [y] 156 if not isinstance(x, list): 157 x = [x] 158 159 self.assertEqual(len(y), len(x)) 160 161 if x[0] == 'ER': 162 actual = [call_ER(model.info, pars)] 163 elif x[0] == 'VR': 164 actual = [call_VR(model.info, pars)] 165 elif isinstance(x[0], tuple): 166 Qx,Qy = zip(*x) 167 q_vectors = [np.array(Qx), np.array(Qy)] 168 kernel = make_kernel(model, q_vectors) 169 actual = call_kernel(kernel, pars) 170 else: 171 q_vectors = [np.array(x)] 172 kernel = make_kernel(model, q_vectors) 173 actual = call_kernel(kernel, pars) 174 175 self.assertGreater(len(actual), 0) 176 self.assertEqual(len(y), len(actual)) 177 178 for xi, yi, actual_yi in zip(x, y, actual): 179 if yi is None: 180 # smoke test --- make sure it runs and produces a value 181 self.assertTrue(np.isfinite(actual_yi), 182 'invalid f(%s): %s' % (xi, actual_yi)) 183 else: 184 err = abs(yi - actual_yi) 185 nrm = abs(yi) 186 self.assertLess(err * 10**5, nrm, 187 'f(%s); expected:%s; actual:%s' % (xi, yi, actual_yi)) 188 184 189 return ModelTestCase 185 190 … … 187 192 # let nosetests sniff out the tests 188 193 def model_tests(): 189 tests = suite(['opencl','dll'],['all'])194 tests = make_suite(['opencl','dll'],['all']) 190 195 for test_i in tests: 191 196 yield test_i.runTest … … 218 223 #run_tests(loaders, models) 219 224 runner = unittest.TextTestRunner() 220 result = runner.run( suite(loaders, models))225 result = runner.run(make_suite(loaders, models)) 221 226 return 1 if result.failures or result.errors else 0 222 227 -
sasmodels/models/barbell.py
r9890053 r3c56da87 4 4 r""" 5 5 6 Calculates the scattering from a barbell-shaped cylinder (This model simply becomes the DumBellModel when the length of 7 the cylinder, *L*, is set to zero). That is, a sphereocylinder with spherical end caps that have a radius larger than 8 that of the cylinder and the center of the end cap radius lies outside of the cylinder. All dimensions of the BarBell 9 are considered to be monodisperse. See the diagram for the details of the geometry and restrictions on parameter values. 6 Calculates the scattering from a barbell-shaped cylinder (This model simply 7 becomes the DumBellModel when the length of the cylinder, *L*, is set to zero). 8 That is, a sphereocylinder with spherical end caps that have a radius larger 9 than that of the cylinder and the center of the end cap radius lies outside 10 of the cylinder. All dimensions of the BarBell are considered to be 11 monodisperse. See the diagram for the details of the geometry and restrictions 12 on parameter values. 10 13 11 14 Definition … … 18 21 .. image:: img/barbell_geometry.jpg 19 22 20 where *r* is the radius of the cylinder. All other parameters are as defined in the diagram. 23 where *r* is the radius of the cylinder. All other parameters are as defined 24 in the diagram. 21 25 22 26 Since the end cap radius 23 *R* >= *r* and by definition for this geometry *h* < 0, *h* is then defined by *r* and *R* as 27 *R* >= *r* and by definition for this geometry *h* < 0, *h* is then 28 defined by *r* and *R* as 24 29 25 30 *h* = -1 \* sqrt(*R*\ :sup:`2` - *r*\ :sup:`2`) … … 46 51 \over QR\sin\theta \left(1-t^2\right)^{1/2}} 47 52 48 The < > brackets denote an average of the structure over all orientations. <*A* :sup:`2`\ *(q)*> is then the form 49 factor, *P(q)*. The scale factor is equivalent to the volume fraction of cylinders, each of volume, *V*. Contrast is 50 the difference of scattering length densities of the cylinder and the surrounding solvent. 53 The < > brackets denote an average of the structure over all orientations. 54 <*A* :sup:`2`\ *(q)*> is then the form factor, *P(q)*. The scale factor is 55 equivalent to the volume fraction of cylinders, each of volume, *V*. Contrast 56 is the difference of scattering length densities of the cylinder and the 57 surrounding solvent. 51 58 52 59 The volume of the barbell is … … 69 76 \left( 4R^3 6R^2h - 2h^3 + 3r^2L \right)^{-1} 70 77 71 **The requirement that** *R* >= *r* **is not enforced in the model!** It is up to you to restrict this during analysis. 78 **The requirement that** *R* >= *r* **is not enforced in the model!** It is 79 up to you to restrict this during analysis. 72 80 73 81 This example dataset is produced by running the Macro PlotBarbell(), … … 79 87 *Figure. 1D plot using the default values (w/256 data point).* 80 88 81 For 2D data: The 2D scattering intensity is calculated similar to the 2D cylinder model. For example, for 82 |theta| = 45 deg and |phi| = 0 deg with default values for other parameters 89 For 2D data: The 2D scattering intensity is calculated similar to the 2D 90 cylinder model. For example, for |theta| = 45 deg and |phi| = 0 deg with 91 default values for other parameters 83 92 84 93 .. image:: img/barbell_2d.jpg … … 102 111 103 112 """ 104 from numpy import pi,inf113 from numpy import inf 105 114 106 115 name = "barbell" -
sasmodels/models/bcc.py
r9890053 r3c56da87 80 80 .. image:: img/bcc_1d.jpg 81 81 82 *Figure. 1D plot in the linear scale using the default values (w/200 data point).* 82 *Figure. 1D plot in the linear scale using the default values 83 (w/200 data point).* 83 84 84 85 The 2D (Anisotropic model) is based on the reference below where $I(q)$ is … … 103 104 """ 104 105 105 from numpy import pi,inf106 from numpy import inf 106 107 107 108 name = "bcc_paracrystal" … … 120 121 [ "radius", "Ang", 40, [0, inf], "volume","Particle radius" ], 121 122 [ "sld", "1e-6/Ang^2", 4, [-inf,inf], "", "Particle scattering length density" ], 122 [ "solvent_sld", "1e-6/Ang^2", 1, [-inf,inf], "", "Solvent scattering length density" ],123 [ "solvent_sld", "1e-6/Ang^2", 1, [-inf,inf], "", "Solvent scattering length density" ], 123 124 [ "theta", "degrees", 60, [-inf, inf], "orientation","In plane angle" ], 124 125 [ "phi", "degrees", 60, [-inf, inf], "orientation","Out of plane angle" ], -
sasmodels/models/broad_peak.py
ra5d0d00 r3c56da87 6 6 layered structures, etc. 7 7 8 The d-spacing corresponding to the broad peak is a characteristic distance 9 between the scattering inhomogeneities (such as in lamellar, cylindrical, or 8 The d-spacing corresponding to the broad peak is a characteristic distance 9 between the scattering inhomogeneities (such as in lamellar, cylindrical, or 10 10 spherical morphologies, or for bicontinuous structures). 11 11 … … 44 44 """ 45 45 46 import numpy as np 47 from numpy import pi, inf, sin, cos, sqrt, exp, log, fabs 46 from numpy import inf, sqrt 48 47 49 48 name = "broad_peak" … … 82 81 83 82 84 #def form_volume():85 # return 186 87 83 def Iq(q, porod_scale, porod_exp, lorentz_scale, lorentz_length, peak_pos, lorentz_exp): 88 inten = porod_scale/pow(q,porod_exp) + lorentz_scale/(1.0 \ 89 + pow((fabs(q-peak_pos)*lorentz_length),lorentz_exp)) 90 return inten 91 92 # FOR VECTORIZED VERSION, UNCOMMENT THE NEXT LINE 93 Iq.vectorized = True 84 inten = (porod_scale/q**porod_exp + lorentz_scale 85 / (1.0 + (abs(q-peak_pos)*lorentz_length)**lorentz_exp)) 86 return inten 87 Iq.vectorized = True # Iq accepts an array of Q values 94 88 95 89 def Iqxy(qx, qy, *args): 96 90 return Iq(sqrt(qx**2 + qy**2), *args) 97 98 # FOR VECTORIZED VERSION, UNCOMMENT THE NEXT LINE 99 Iqxy.vectorized = True 91 Iqxy.vectorized = True # Iqxy accepts an array of Qx, Qy values 100 92 101 93 … … 105 97 lorentz_scale=10,lorentz_length=50, peak_pos=0.1, lorentz_exp=2, 106 98 ) 99 107 100 oldname = "BroadPeakModel" 108 oldpars = dict(porod_scale='scale_p', porod_exp='exponent_p', 109 lorentz_scale='scale_l', lorentz_length='length_l', peak_pos='q_peak', 101 oldpars = dict(porod_scale='scale_p', porod_exp='exponent_p', 102 lorentz_scale='scale_l', lorentz_length='length_l', peak_pos='q_peak', 110 103 lorentz_exp='exponent_l', scale=None) -
sasmodels/models/ellipsoid.py
ra5d0d00 r3c56da87 116 116 """ 117 117 118 from numpy import pi,inf118 from numpy import inf 119 119 120 120 name = "ellipsoid" -
sasmodels/models/fcc.py
r9890053 r3c56da87 3 3 #note - calculation requires double precision 4 4 r""" 5 Calculates the scattering from a **face-centered cubic lattice** with paracrystalline distortion. Thermal vibrations 6 are considered to be negligible, and the size of the paracrystal is infinitely large. Paracrystalline distortion is 7 assumed to be isotropic and characterized by a Gaussian distribution. 5 Calculates the scattering from a **face-centered cubic lattice** with 6 paracrystalline distortion. Thermal vibrations are considered to be 7 negligible, and the size of the paracrystal is infinitely large. 8 Paracrystalline distortion is assumed to be isotropic and characterized by 9 a Gaussian distribution. 8 10 9 11 The returned value is scaled to units of |cm^-1|\ |sr^-1|, absolute scale. … … 16 18 .. image:: img/image158.jpg 17 19 18 where *scale* is the volume fraction of spheres, *Vp* is the volume of the primary particle, *V(lattice)* is a volume 19 correction for the crystal structure, *P(q)* is the form factor of the sphere (normalized), and *Z(q)* is the 20 paracrystalline structure factor for a face-centered cubic structure. 20 where *scale* is the volume fraction of spheres, *Vp* is the volume of 21 the primary particle, *V(lattice)* is a volume correction for the crystal 22 structure, *P(q)* is the form factor of the sphere (normalized), and *Z(q)* 23 is the paracrystalline structure factor for a face-centered cubic structure. 21 24 22 Equation (1) of the 1990 reference is used to calculate *Z(q)*, using equations (23)-(25) from the 1987 paper for23 *Z1*\ , *Z2*\ , and *Z3*\ .25 Equation (1) of the 1990 reference is used to calculate *Z(q)*, using 26 equations (23)-(25) from the 1987 paper for *Z1*\ , *Z2*\ , and *Z3*\ . 24 27 25 The lattice correction (the occupied volume of the lattice) for a face-centered cubic structure of particles of radius 26 *R* and nearest neighbor separation *D* is 28 The lattice correction (the occupied volume of the lattice) for a 29 face-centered cubic structure of particles of radius *R* and nearest 30 neighbor separation *D* is 27 31 28 32 .. image:: img/image159.jpg 29 33 30 The distortion factor (one standard deviation) of the paracrystal is included in the calculation of *Z(q)* 34 The distortion factor (one standard deviation) of the paracrystal is 35 included in the calculation of *Z(q)* 31 36 32 37 .. image:: img/image160.jpg … … 42 47 .. image:: img/image162.jpg 43 48 44 where for a face-centered cubic lattice *h*\ , *k*\ , *l* all odd or all even are allowed and reflections where 45 *h*\ , *k*\ , *l* are mixed odd/even are forbidden. Thus the peak positions correspond to (just the first 5) 49 where for a face-centered cubic lattice *h*\ , *k*\ , *l* all odd or all 50 even are allowed and reflections where *h*\ , *k*\ , *l* are mixed odd/even 51 are forbidden. Thus the peak positions correspond to (just the first 5) 46 52 47 53 .. image:: img/image163.jpg 48 54 49 **NB: The calculation of** *Z(q)* **is a double numerical integral that must be carried out with a high density of** 50 **points to properly capture the sharp peaks of the paracrystalline scattering.** So be warned that the calculation is 51 SLOW. Go get some coffee. Fitting of any experimental data must be resolution smeared for any meaningful fit. This 52 makes a triple integral. Very, very slow. Go get lunch! 55 **NB: The calculation of** *Z(q)* **is a double numerical integral that 56 must be carried out with a high density of** **points to properly capture 57 the sharp peaks of the paracrystalline scattering.** So be warned that the 58 calculation is SLOW. Go get some coffee. Fitting of any experimental data 59 must be resolution smeared for any meaningful fit. This makes a triple 60 integral. Very, very slow. Go get lunch! 53 61 54 This example dataset is produced using 200 data points, *qmin* = 0.01 |Ang^-1|, *qmax* = 0.1 |Ang^-1| and the above55 default values.62 This example dataset is produced using 200 data points, *qmin* = 0.01 |Ang^-1|, 63 *qmax* = 0.1 |Ang^-1| and the above default values. 56 64 57 65 .. image:: img/image164.jpg … … 59 67 *Figure. 1D plot in the linear scale using the default values (w/200 data point).* 60 68 61 The 2D (Anisotropic model) is based on the reference below where *I(q)* is approximated for 1d scattering. Thus the 62 scattering pattern for 2D may not be accurate. Note that we are not responsible for any incorrectness of the 2D model 63 computation. 69 The 2D (Anisotropic model) is based on the reference below where *I(q)* is 70 approximated for 1d scattering. Thus the scattering pattern for 2D may not 71 be accurate. Note that we are not responsible for any incorrectness of the 72 2D model computation. 64 73 65 74 .. image:: img/image165.gif … … 78 87 """ 79 88 80 from numpy import pi,inf89 from numpy import inf 81 90 82 91 name = "fcc_paracrystal" … … 118 127 # names and the target sasview model name. 119 128 oldname='FCCrystalModel' 120 oldpars=dict(sld='sldSph', 121 solvent_sld='sldSolv') 129 oldpars=dict(sld='sldSph', solvent_sld='sldSolv') -
sasmodels/models/gaussian_peak.py
ra5d0d00 r3c56da87 21 21 """ 22 22 23 from numpy import pi,inf23 from numpy import inf 24 24 25 25 name = "gaussian_peak" -
sasmodels/models/hardsphere.py
ra5d0d00 r3c56da87 32 32 """ 33 33 34 from numpy import pi,inf34 from numpy import inf 35 35 36 36 name = "hardsphere" -
sasmodels/models/lamellar.py
rbfb195e r3c56da87 47 47 """ 48 48 49 from numpy import pi,inf49 from numpy import inf 50 50 51 51 name = "lamellar" -
sasmodels/models/lamellarCaille.py
rbfb195e r3c56da87 70 70 also in J. Phys. Chem. B, 105, (2001) 11081-11088 71 71 """ 72 from numpy import pi,inf72 from numpy import inf 73 73 74 74 name = "lamellarPS" -
sasmodels/models/lamellarCailleHG.py
r12c810f r3c56da87 74 74 also in J. Phys. Chem. B, 105, (2001) 11081-11088 75 75 """ 76 from numpy import pi,inf76 from numpy import inf 77 77 78 78 name = "lamellarCailleHG" -
sasmodels/models/lamellarFFHG.py
rbfb195e r3c56da87 1 1 # Note: model title and parameter table are inserted automatically 2 2 r""" 3 This model provides the scattering intensity, *I(q)*, for a lyotropic lamellar phase where a random distribution in 4 solution are assumed. The SLD of the head region is taken to be different from the SLD of the tail region. 3 This model provides the scattering intensity, *I(q)*, for a lyotropic lamellar 4 phase where a random distribution in solution are assumed. The SLD of the head 5 region is taken to be different from the SLD of the tail region. 5 6 6 7 *2.1.31.1. Definition* … … 16 17 .. image:: img/lamellarFFHG_.jpg 17 18 18 where |delta|\ T = tail length (or *tail_length*), |delta|\ H = head thickness (or *h_thickness*), 19 |drho|\ H = SLD(headgroup) - SLD(solvent), and |drho|\ T = SLD(tail) - SLD(solvent). 19 where |delta|\ T = tail length (or *tail_length*), |delta|\ H = head thickness 20 (or *h_thickness*), |drho|\ H = SLD(headgroup) - SLD(solvent), 21 and |drho|\ T = SLD(tail) - SLD(solvent). 20 22 21 The 2D scattering intensity is calculated in the same way as 1D, where the *q* vector is defined as 23 The 2D scattering intensity is calculated in the same way as 1D, where 24 the *q* vector is defined as 22 25 23 26 .. math:: … … 25 28 Q = \sqrt{Q_x^2 + Q_y^2} 26 29 27 The returned value is in units of |cm^-1|, on absolute scale. In the parameters, *sld_tail* = SLD of the tail group, 28 and *sld_head* = SLD of the head group. 29 30 ============== ======== ============= 31 Parameter name Units Default value 32 ============== ======== ============= 33 background |cm^-1| 0.0 34 head_sld |Ang^-2| 3e-06 35 scale None 1 36 solvent_sld |Ang^-2| 6e-06 37 head_length |Ang| 10 38 tail_length |Ang| 15 39 sld (tail) |Ang^-2| 0 40 ============== ======== ============= 30 The returned value is in units of |cm^-1|, on absolute scale. In the 31 parameters, *sld_tail* = SLD of the tail group, and *sld_head* = SLD 32 of the head group. 41 33 42 34 .. image:: img/lamellarFFHG_138.jpg … … 44 36 *Figure. 1D plot using the default values (w/1000 data point).* 45 37 46 Our model uses the form factor calculations implemented in a c-library provided by the NIST Center for Neutron Research47 (Kline, 2006).38 Our model uses the form factor calculations implemented in a C library 39 provided by the NIST Center for Neutron Research (Kline, 2006). 48 40 49 41 REFERENCE … … 56 48 """ 57 49 58 from numpy import pi,inf50 from numpy import inf 59 51 60 52 name = "lamellar_FFHG" … … 96 88 Iq = """ 97 89 const double qsq = q*q; 98 const double drh = head_sld - solvent_sld; 99 const double drt = sld - solvent_sld; //correction 13FEB06 by L.Porcar 100 const double qT = q*tail_length; 101 double Pq, inten; 102 Pq = drh*(sin(q*(head_length+tail_length))-sin(qT)) + drt*sin(qT); 103 Pq *= Pq; 104 Pq *= 4.0/(qsq); 105 106 inten = 2.0e-4*M_PI*Pq/qsq; 107 108 return inten /= 2.0*(head_length+tail_length); //normalize by the bilayer thickness 109 90 const double drh = head_sld - solvent_sld; 91 const double drt = sld - solvent_sld; //correction 13FEB06 by L.Porcar 92 const double qT = q*tail_length; 93 double Pq, inten; 94 Pq = drh*(sin(q*(head_length+tail_length))-sin(qT)) + drt*sin(qT); 95 Pq *= Pq; 96 Pq *= 4.0/(qsq); 97 98 inten = 2.0e-4*M_PI*Pq/qsq; 99 100 // normalize by the bilayer thickness 101 inten /= 2.0*(head_length+tail_length); 102 103 return inten; 110 104 """ 111 105 112 106 Iqxy = """ 113 107 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS); … … 118 112 119 113 demo = dict( 120 scale=1, background=0, 121 tail_length=15,head_length=10, 122 sld=0.4, head_sld=3.0, solvent_sld=6.0, 123 tail_length_pd= 0.2, tail_length_pd_n=40, 124 head_length_pd= 0.01, head_length_pd_n=40, 125 ) 114 scale=1, background=0, 115 tail_length=15,head_length=10, 116 sld=0.4, head_sld=3.0, solvent_sld=6.0, 117 tail_length_pd= 0.2, tail_length_pd_n=40, 118 head_length_pd= 0.01, head_length_pd_n=40, 119 ) 120 126 121 oldname = 'LamellarFFHGModel' 127 oldpars = dict(head_length='h_thickness', sld='sld_tail', head_sld='sld_head', solvent_sld='sld_solvent') 122 oldpars = dict(head_length='h_thickness', sld='sld_tail', 123 head_sld='sld_head', solvent_sld='sld_solvent') 128 124 -
sasmodels/models/lamellarPC.py
ra5d0d00 r3c56da87 1 1 # Note: model title and parameter table are inserted automatically 2 2 r""" 3 This model calculates the scattering from a stack of repeating lamellar structures. The stacks of lamellae (infinite 4 in lateral dimension) are treated as a paracrystal to account for the repeating spacing. The repeat distance is further 5 characterized by a Gaussian polydispersity. **This model can be used for large multilamellar vesicles.** 3 This model calculates the scattering from a stack of repeating lamellar 4 structures. The stacks of lamellae (infinite in lateral dimension) are 5 treated as a paracrystal to account for the repeating spacing. The repeat 6 distance is further characterized by a Gaussian polydispersity. **This model 7 can be used for large multilamellar vesicles.** 6 8 7 9 *2.1.33.1. Definition* … … 11 13 .. image:: img/image145.jpg 12 14 13 The form factor of the bilayer is approximated as the cross section of an infinite, planar bilayer of thickness *t* 15 The form factor of the bilayer is approximated as the cross section of an 16 infinite, planar bilayer of thickness *t* 14 17 15 18 .. image:: img/image146.jpg 16 19 17 Here, the scale factor is used instead of the mass per area of the bilayer (*G*). The scale factor is the volume 18 fraction of the material in the bilayer, *not* the total excluded volume of the paracrystal. *Z*\ :sub:`N`\ *(q)* 19 describes the interference effects for aggregates consisting of more than one bilayer. The equations used are (3-5) 20 Here, the scale factor is used instead of the mass per area of the 21 bilayer (*G*). The scale factor is the volume fraction of the material in 22 the bilayer, *not* the total excluded volume of the paracrystal. 23 *Z*\ :sub:`N`\ *(q)* describes the interference effects for aggregates 24 consisting of more than one bilayer. The equations used are (3-5) 20 25 from the Bergstrom reference below. 21 26 22 Non-integer numbers of stacks are calculated as a linear combination of the lower and higher values 27 Non-integer numbers of stacks are calculated as a linear combination of 28 the lower and higher values 23 29 24 30 .. image:: img/image147.jpg 25 31 26 The 2D scattering intensity is the same as 1D, regardless of the orientation of the *q* vector which is defined as 32 The 2D scattering intensity is the same as 1D, regardless of the orientation 33 of the *q* vector which is defined as 27 34 28 35 .. math:: … … 30 37 Q = \sqrt{Q_x^2 + Q_y^2} 31 38 32 The parameters of the model are *Nlayers* = no. of layers, and *pd_spacing* = polydispersity of spacing. 39 The parameters of the model are *Nlayers* = no. of layers, and 40 *pd_spacing* = polydispersity of spacing. 33 41 34 42 ============== ======== ============= … … 49 57 *Figure. 1D plot using the default values above (w/20000 data point).* 50 58 51 Our model uses the form factor calculations implemented in a c-library provided by the NIST Center for Neutron Research52 (Kline, 2006).59 Our model uses the form factor calculations implemented in a C library 60 provided by the NIST Center for Neutron Research (Kline, 2006). 53 61 54 62 REFERENCE 55 63 56 M Bergstrom, J S Pedersen, P Schurtenberger, S U Egelhaaf, *J. Phys. Chem. B*, 103 (1999) 9888-9897 64 M Bergstrom, J S Pedersen, P Schurtenberger, S U Egelhaaf, 65 *J. Phys. Chem. B*, 103 (1999) 9888-9897 57 66 58 67 """ 59 68 60 from numpy import pi,inf69 from numpy import inf 61 70 62 71 name = "lamellarPC" … … 90 99 ] 91 100 92 101 93 102 source = [ "lamellarPC_kernel.c"] 94 103 … … 105 114 106 115 demo = dict( 107 scale=1, background=0, 108 thickness=33,Nlayers=20,spacing=250,spacing_polydisp=0.2, 109 sld=1.0, solvent_sld=6.34, 110 thickness_pd= 0.2, thickness_pd_n=40 111 ) 116 scale=1, background=0, 117 thickness=33, Nlayers=20, spacing=250, spacing_polydisp=0.2, 118 sld=1.0, solvent_sld=6.34, 119 thickness_pd= 0.2, thickness_pd_n=40 120 ) 121 112 122 oldname = 'LamellarPCrystalModel' 113 oldpars = dict(spacing_polydisp='pd_spacing', sld='sld_layer',solvent_sld='sld_solvent') 123 oldpars = dict( 124 spacing_polydisp='pd_spacing', sld='sld_layer', 125 solvent_sld='sld_solvent' 126 ) 114 127 115 128 -
sasmodels/models/sphere.py
ra5d0d00 r3c56da87 58 58 """ 59 59 60 from numpy import pi,inf60 from numpy import inf 61 61 62 62 name = "sphere" -
sasmodels/models/spherepy.py
ra5d0d00 r3c56da87 59 59 60 60 import numpy as np 61 from numpy import pi, inf, sin, cos, sqrt, exp,log61 from numpy import pi, inf, sin, cos, sqrt, log 62 62 63 63 name = "sphere" -
sasmodels/models/stickyhardsphere.py
r1353f60 r3c56da87 1 1 # Note: model title and parameter table are inserted automatically 2 r"""This calculates the interparticle structure factor for a hard sphere fluid with 3 a narrow attractive well. A perturbative solution of the Percus-Yevick closure is used. 4 The strength of the attractive well is described in terms of "stickiness" 5 as defined below. The returned value is a dimensionless structure factor, *S(q)*. 2 r""" 3 This calculates the interparticle structure factor for a hard sphere fluid 4 with a narrow attractive well. A perturbative solution of the Percus-Yevick 5 closure is used. The strength of the attractive well is described in terms 6 of "stickiness" as defined below. The returned value is a dimensionless 7 structure factor, *S(q)*. 6 8 7 The perturb (perturbation parameter), |epsilon|, should be held between 0.01 and 0.1. 8 It is best to hold the perturbation parameter fixed and let the "stickiness" vary to 9 adjust the interaction strength. The stickiness, |tau|, is defined in the equation 10 below and is a function of both the perturbation parameter and the interaction strength. 11 |tau| and |epsilon| are defined in terms of the hard sphere diameter (|sigma| = 2\*\ *R*\ ), 12 the width of the square well, |bigdelta| (same units as *R*), and the depth of the well, 13 *Uo*, in units of kT. From the definition, it is clear that smaller |tau| means stronger 14 attraction. 9 The perturb (perturbation parameter), |epsilon|, should be held between 0.01 10 and 0.1. It is best to hold the perturbation parameter fixed and let 11 the "stickiness" vary to adjust the interaction strength. The stickiness, 12 |tau|, is defined in the equation below and is a function of both the 13 perturbation parameter and the interaction strength. |tau| and |epsilon| 14 are defined in terms of the hard sphere diameter (|sigma| = 2\*\ *R*\ ), the 15 width of the square well, |bigdelta| (same units as *R*), and the depth of 16 the well, *Uo*, in units of kT. From the definition, it is clear that 17 smaller |tau| means stronger attraction. 15 18 16 19 .. image:: img/stickyhardsphere_228.PNG … … 20 23 .. image:: img/stickyhardsphere_229.PNG 21 24 22 The Percus-Yevick (PY) closure was used for this calculation, and is an adequate closure 23 for an attractive interparticle potential. This solution has been compared to Monte Carlo 24 simulations for a square well fluid, with good agreement. 25 The Percus-Yevick (PY) closure was used for this calculation, and is an 26 adequate closure for an attractive interparticle potential. This solution 27 has been compared to Monte Carlo simulations for a square well fluid, with 28 good agreement. 25 29 26 The true particle volume fraction, |phi|, is not equal to *h*, which appears in most of 27 the reference. The two are related in equation (24) of the reference. The reference also 28 describes the relationship between this perturbation solution and the original sticky hard 29 sphere (or adhesive sphere) model by Baxter. 30 The true particle volume fraction, |phi|, is not equal to *h*, which appears 31 in most of the reference. The two are related in equation (24) of the 32 reference. The reference also describes the relationship between this 33 perturbation solution and the original sticky hard sphere (or adhesive 34 sphere) model by Baxter. 30 35 31 NB: The calculation can go haywire for certain combinations of the input parameters, 32 producing unphysical solutions - in this case errors are reported to the command window and 33 the *S(q)* is set to -1 (so it will disappear on a log-log plot). Use tight bounds to keep 34 the parameters to values that you know are physical (test them) and keep nudging them until 36 NB: The calculation can go haywire for certain combinations of the input 37 parameters, producing unphysical solutions - in this case errors are 38 reported to the command window and the *S(q)* is set to -1 (so it will 39 disappear on a log-log plot). Use tight bounds to keep the parameters to 40 values that you know are physical (test them) and keep nudging them until 35 41 the optimization does not hit the constraints. 36 42 37 In sasview the effective radius will be calculated from the parameters used in the38 form factor P(Q) that this S(Q) is combined with.43 In sasview the effective radius will be calculated from the parameters 44 used in the form factor P(Q) that this S(Q) is combined with. 39 45 40 For 2D data: The 2D scattering intensity is calculated in the same way as 1D, where41 the *q* vector is defined as46 For 2D data: The 2D scattering intensity is calculated in the same way 47 as 1D, where the *q* vector is defined as 42 48 43 49 .. math:: … … 99 105 100 106 Iq = """ 101 102 103 104 107 double onemineps,eta; 108 double sig,aa,etam1,etam1sq,qa,qb,qc,radic; 109 double lam,lam2,test,mu,alpha,beta; 110 double kk,k2,k3,ds,dc,aq1,aq2,aq3,aq,bq1,bq2,bq3,bq,sq; 105 111 106 107 112 onemineps = 1.0-perturb; 113 eta = volfraction/onemineps/onemineps/onemineps; 108 114 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 115 sig = 2.0 * effect_radius; 116 aa = sig/onemineps; 117 etam1 = 1.0 - eta; 118 etam1sq=etam1*etam1; 119 //C 120 //C SOLVE QUADRATIC FOR LAMBDA 121 //C 122 qa = eta/12.0; 123 qb = -1.0*(stickiness + eta/etam1); 124 qc = (1.0 + eta/2.0)/etam1sq; 125 radic = qb*qb - 4.0*qa*qc; 126 if(radic<0) { 127 //if(x>0.01 && x<0.015) 128 // Print "Lambda unphysical - both roots imaginary" 129 //endif 130 return(-1.0); 131 } 132 //C KEEP THE SMALLER ROOT, THE LARGER ONE IS UNPHYSICAL 133 lam = (-1.0*qb-sqrt(radic))/(2.0*qa); 134 lam2 = (-1.0*qb+sqrt(radic))/(2.0*qa); 135 if(lam2<lam) { 136 lam = lam2; 137 } 138 test = 1.0 + 2.0*eta; 139 mu = lam*eta*etam1; 140 if(mu>test) { 141 //if(x>0.01 && x<0.015) 142 // Print "Lambda unphysical mu>test" 143 //endif 144 return(-1.0); 145 } 146 alpha = (1.0 + 2.0*eta - mu)/etam1sq; 147 beta = (mu - 3.0*eta)/(2.0*etam1sq); 148 //C 149 //C CALCULATE THE STRUCTURE FACTOR 150 //C 151 kk = q*aa; 152 k2 = kk*kk; 153 k3 = kk*k2; 154 SINCOS(kk,ds,dc); 155 //ds = sin(kk); 156 //dc = cos(kk); 157 aq1 = ((ds - kk*dc)*alpha)/k3; 158 aq2 = (beta*(1.0-dc))/k2; 159 aq3 = (lam*ds)/(12.0*kk); 160 aq = 1.0 + 12.0*eta*(aq1+aq2-aq3); 161 // 162 bq1 = alpha*(0.5/kk - ds/k2 + (1.0 - dc)/k3); 163 bq2 = beta*(1.0/kk - ds/k2); 164 bq3 = (lam/12.0)*((1.0 - dc)/kk); 165 bq = 12.0*eta*(bq1+bq2-bq3); 166 // 167 sq = 1.0/(aq*aq +bq*bq); 162 168 163 169 return(sq); 164 170 """ 165 171 … … 176 182 stickiness=0.2, effect_radius_pd=0.1, effect_radius_pd_n=40) 177 183 178 179 -
sasmodels/models/triaxial_ellipsoid.py
ra5d0d00 r3c56da87 90 90 """ 91 91 92 from numpy import pi,inf92 from numpy import inf 93 93 94 94 name = "triaxial_ellipsoid" -
sasmodels/sasview_model.py
rde0c4ba r3c56da87 69 69 self.dispersion = dict() 70 70 partype = model.info['partype'] 71 for name, units, default, limits, ptype, descriptionin model.info['parameters']:71 for name, units, default, limits, _, _ in model.info['parameters']: 72 72 self.params[name] = default 73 73 self.details[name] = [units] + limits … … 120 120 121 121 122 # pylint: disable=no-self-use 122 123 def getProfile(self): 123 124 """ … … 213 214 """ 214 215 if isinstance(x, (list, tuple)): 216 # pylint: disable=unpacking-non-sequence 215 217 q, phi = x 216 218 return self.calculate_Iq([q * math.cos(phi)], … … 263 265 264 266 265 :param qdist: ndarray of scalar q-values or list [qx,qy] where qx,qy are 1D ndarrays 267 :param qdist: ndarray of scalar q-values or list [qx,qy] 268 where qx,qy are 1D ndarrays 266 269 """ 267 270 if isinstance(qdist, (list, tuple)): … … 279 282 280 283 else: 281 raise TypeError("evalDistribution expects q or [qx, qy], not %r" % type(qdist)) 284 raise TypeError("evalDistribution expects q or [qx, qy], not %r" 285 % type(qdist)) 282 286 283 287 def calculate_Iq(self, *args): … … 382 386 limits = self._model.info['limits'] 383 387 dis = self.dispersion[par] 384 v , w= weights.get_weights(388 value, weight = weights.get_weights( 385 389 dis['type'], dis['npts'], dis['width'], dis['nsigmas'], 386 390 self.params[par], limits[par], par in relative) 387 return v , w / w.max()388 391 return value, weight / np.sum(weight) 392 -
sasmodels/sesans.py
rc97724e r3c56da87 22 22 # TODO: dead code; for now the call to the hankel transform happens in BumpsModel 23 23 class SesansCalculator: 24 def __init__(self, sans_kernel, q_zmax, Rmax, SElength, wavelength, thickness):25 self._set_kernel( sans_kernel, q_zmax, Rmax)24 def __init__(self, kernel, q_zmax, Rmax, SElength, wavelength, thickness): 25 self._set_kernel(kernel, q_zmax, Rmax) 26 26 self.SElength = SElength 27 27 self.wavelength = wavelength 28 28 self.thickness = thickness 29 29 30 def _set_kernel(self, sans_kernel, q_zmax, Rmax):31 input = sans_kernel.make_input([make_q(q_zmax, Rmax)])32 self.sans_calculator = sans_kernel(input)30 def _set_kernel(self, kernel, q_zmax, Rmax): 31 kernel_input = kernel.make_input([make_q(q_zmax, Rmax)]) 32 self.sans_calculator = kernel(kernel_input) 33 33 34 34 def __call__(self, pars, pd_pars, cutoff=1e-5): -
sasmodels/weights.py
r5d4777d r3c56da87 26 26 return pars 27 27 28 # pylint: disable=no-self-use 28 29 def set_weights(self, values, weights): 29 30 raise RuntimeError("set_weights is only available for ArrayDispersion")
Note: See TracChangeset
for help on using the changeset viewer.