Changeset c2e4be1 in sasmodels
- Timestamp:
- Mar 21, 2016 2:05:59 AM (9 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 2e613f3
- Parents:
- 31d22de (diff), 303d8d6 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Files:
-
- 2 added
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/developer/calculator.rst
rd5ac45f r03cac08 7 7 - KERNEL declares a function to be available externally 8 8 - KERNEL_NAME is the name of the function being declared 9 - MAX_PD is the maximum depth of the polydispersity loop 9 10 - NPARS is the number of parameters in the kernel 10 11 - PARAMETER_TABLE is the declaration of the parameters to the kernel:: … … 29 30 double sld_solvent 30 31 31 - CALL_IQ(q, nq,i, pars) is the declaration of a call to the kernel::32 - CALL_IQ(q, i, pars) is the declaration of a call to the kernel:: 32 33 33 34 Cylinder: 34 35 35 #define CALL_IQ(q, nq, i, var) \ 36 Iq(q[i], \ 36 #define CALL_IQ(q, i, var) Iq(q[i], \ 37 37 var.length, \ 38 38 var.radius, \ … … 42 42 Multi-shell cylinder: 43 43 44 #define CALL_IQ(q, nq, i, var) \ 45 Iq(q[i], \ 44 #define CALL_IQ(q, i, var) Iq(q[i], \ 46 45 var.num_shells, \ 47 46 var.length, \ … … 50 49 var.sld_solvent) 51 50 51 Cylinder2D: 52 53 #define CALL_IQ(q, i, var) Iqxy(q[2*i], q[2*i+1], \ 54 var.length, \ 55 var.radius, \ 56 var.sld, \ 57 var.sld_solvent, \ 58 var.theta, \ 59 var.phi) 60 52 61 - CALL_VOLUME(var) is similar, but for calling the form volume:: 53 62 … … 69 78 inline bool constrained(p1, p2, p3) { return expression; } 70 79 #define INVALID(var) constrained(var.p1, var.p2, var.p3) 71 72 - IQ_FUNC could be Iq or Iqxy73 - IQ_PARS could be q[i] or q[2*i],q[2*i+1]74 80 75 81 Our design supports a limited number of polydispersity loops, wherein … … 200 206 201 207 TODO: cutoff 208 209 For accuracy we may want to introduce Kahan summation into the integration:: 210 211 212 double accumulated_error = 0.0; 213 ... 214 #if USE_KAHAN_SUMMATION 215 const double y = next - accumulated_error; 216 const double t = ret + y; 217 accumulated_error = (t - ret) - y; 218 ret = t; 219 #else 220 ret += next; 221 #endif -
sasmodels/bumps_model.py
ra84a0ca r303d8d6 81 81 from bumps.names import Parameter 82 82 83 pars = {} 83 pars = {} # => floating point parameters 84 84 for p in model_info['parameters']: 85 85 value = kwargs.pop(p.name, p.default) 86 86 pars[p.name] = Parameter.default(value, name=p.name, limits=p.limits) 87 for name in model_info['par type']['pd-2d']:87 for name in model_info['par_type']['pd']: 88 88 for xpart, xdefault, xlimits in [ 89 89 ('_pd', 0., pars[name].limits), … … 95 95 pars[xname] = Parameter.default(xvalue, name=xname, limits=xlimits) 96 96 97 pd_types = {} 98 for name in model_info['par type']['pd-2d']:97 pd_types = {} # => distribution names 98 for name in model_info['par_type']['pd']: 99 99 xname = name + '_pd_type' 100 100 xvalue = kwargs.pop(xname, 'gaussian') -
sasmodels/compare.py
r72a081d r303d8d6 308 308 exclude = lambda n: False 309 309 else: 310 partype = model_info['partype'] 311 par1d = set(partype['fixed-1d']+partype['pd-1d']) 310 par1d = model_info['par_type']['1d'] 312 311 exclude = lambda n: n not in par1d 313 312 lines = [] … … 870 869 pars, pd_types = bumps_model.create_parameters(model_info, **opts['pars']) 871 870 if not opts['is2d']: 872 active = [base + ext 873 for base in model_info['partype']['pd-1d'] 874 for ext in ['', '_pd', '_pd_n', '_pd_nsigma']] 875 active.extend(model_info['partype']['fixed-1d']) 876 for k in active: 877 v = pars[k] 878 v.range(*parameter_range(k, v.value)) 871 for name in model_info['par_type']['1d']: 872 for ext in ['', '_pd', '_pd_n', '_pd_nsigma']: 873 k = name+ext 874 v = pars.get(k, None) 875 if v is not None: 876 v.range(*parameter_range(k, v.value)) 879 877 else: 880 878 for k, v in pars.items(): -
sasmodels/core.py
rcf52f9c r303d8d6 173 173 return model(q_vectors) 174 174 175 def get_weights( model_info, pars, name):175 def get_weights(parameter, values): 176 176 """ 177 177 Generate the distribution for parameter *name* given the parameter values … … 181 181 from the *pars* dictionary for parameter value and parameter dispersion. 182 182 """ 183 relative = name in model_info['partype']['pd-rel'] 184 limits = model_info['limits'][name] 185 disperser = pars.get(name+'_pd_type', 'gaussian') 186 value = pars.get(name, model_info['defaults'][name]) 187 npts = pars.get(name+'_pd_n', 0) 188 width = pars.get(name+'_pd', 0.0) 189 nsigma = pars.get(name+'_pd_nsigma', 3.0) 183 value = values.get(parameter.name, parameter.default) 184 if parameter.type not in ('volume', 'orientation'): 185 return [value], [] 186 relative = parameter.type == 'volume' 187 limits = parameter.limits 188 disperser = values.get(parameter.name+'_pd_type', 'gaussian') 189 npts = values.get(parameter.name+'_pd_n', 0) 190 width = values.get(parameter.name+'_pd', 0.0) 191 nsigma = values.get(parameter.name+'_pd_nsigma', 3.0) 190 192 value, weight = weights.get_weights( 191 193 disperser, npts, width, nsigma, value, limits, relative) … … 206 208 return value, weight 207 209 208 def call_kernel(kernel, pars, cutoff=0, mono=False):210 def call_kernel(kernel, values, cutoff=0, mono=False): 209 211 """ 210 212 Call *kernel* returned from :func:`make_kernel` with parameters *pars*. … … 219 221 *mono* is True if polydispersity should be set to none on all parameters. 220 222 """ 221 fixed_pars = [pars.get(name, kernel.info['defaults'][name]) 222 for name in kernel.fixed_pars] 223 if mono: 224 pd_pars = [( np.array([pars[name]]), np.array([1.0]) ) 225 for name in kernel.pd_pars] 226 else: 227 pd_pars = [get_weights(kernel.info, pars, name) for name in kernel.pd_pars] 228 return kernel(fixed_pars, pd_pars, cutoff=cutoff) 223 if mono or True: 224 pars = np.array([values.get(p.name, p.default) 225 for p in kernel.info['parameters']]) 226 weights = np.array([1.0]) 227 details = kernel.info['mono_details'] 228 return kernel(pars, weights, details, cutoff) 229 else: 230 pairs = [get_weights(p, values) for p in kernel.info['parameters']] 231 weights, pars = [v for v in zip(*pairs)] 232 details = generate.poly_details(kernel.info, weights, pars) 233 weights, pars = [np.hstack(v) for v in (weights, pars)] 234 return kernel(pars, weights, details, cutoff) 229 235 230 236 def call_ER_VR(model_info, vol_pars): … … 249 255 250 256 251 def call_ER( info, pars):252 """ 253 Call the model ER function using * pars*.254 * info* is either *model.info* if you have a loaded model, or *kernel.info*255 if youhave a model kernel prepared for evaluation.256 """ 257 ER = info.get('ER', None)257 def call_ER(model_info, values): 258 """ 259 Call the model ER function using *values*. *model_info* is either 260 *model.info* if you have a loaded model, or *kernel.info* if you 261 have a model kernel prepared for evaluation. 262 """ 263 ER = model_info.get('ER', None) 258 264 if ER is None: 259 265 return 1.0 260 266 else: 261 vol_pars = [get_weights(info, pars, name) 262 for name in info['partype']['volume']] 267 vol_pars = [get_weights(parameter, values) 268 for parameter in model_info['parameters'] 269 if parameter.type == 'volume'] 263 270 value, weight = dispersion_mesh(vol_pars) 264 271 individual_radii = ER(*value) … … 266 273 return np.sum(weight*individual_radii) / np.sum(weight) 267 274 268 def call_VR( info, pars):275 def call_VR(model_info, values): 269 276 """ 270 277 Call the model VR function using *pars*. … … 272 279 if you have a model kernel prepared for evaluation. 273 280 """ 274 VR = info.get('VR', None)281 VR = model_info.get('VR', None) 275 282 if VR is None: 276 283 return 1.0 277 284 else: 278 vol_pars = [get_weights(info, pars, name) 279 for name in info['partype']['volume']] 285 vol_pars = [get_weights(parameter, values) 286 for parameter in model_info['parameters'] 287 if parameter.type == 'volume'] 280 288 value, weight = dispersion_mesh(vol_pars) 281 289 whole, part = VR(*value) -
sasmodels/direct_model.py
r02e70ff r303d8d6 66 66 self.data_type = 'Iq' 67 67 68 partype = model.info['partype']69 70 68 if self.data_type == 'sesans': 71 69 … … 81 79 q_mono = sesans.make_all_q(data) 82 80 elif self.data_type == 'Iqxy': 81 partype = model.info['par_type'] 83 82 if not partype['orientation'] and not partype['magnetic']: 84 83 raise ValueError("not 2D without orientation or magnetic parameters") -
sasmodels/generate.py
rd5ac45f r303d8d6 236 236 237 237 TEMPLATE_ROOT = dirname(__file__) 238 239 MAX_PD = 4 238 240 239 241 F16 = np.dtype('float16') … … 352 354 return [_search(search_path, f) for f in model_info['source']] 353 355 354 355 356 def convert_type(source, dtype): 356 357 """ … … 417 418 if filename not in _template_cache or mtime > _template_cache[filename][0]: 418 419 with open(path) as fid: 419 _template_cache[filename] = (mtime, fid.read() )420 _template_cache[filename] = (mtime, fid.read(), path) 420 421 return _template_cache[filename][1] 422 423 def model_templates(): 424 # TODO: fails DRY; templates are listed in two places. 425 # should instead have model_info contain a list of paths 426 return [joinpath(TEMPLATE_ROOT, filename) 427 for filename in ('kernel_header.c', 'kernel_iq.c')] 428 429 430 _FN_TEMPLATE = """\ 431 double %(name)s(%(pars)s); 432 double %(name)s(%(pars)s) { 433 %(body)s 434 } 435 436 437 """ 421 438 422 439 def _gen_fn(name, pars, body): … … 431 448 } 432 449 """ 433 template = """\434 double %(name)s(%(pars)s);435 double %(name)s(%(pars)s) {436 %(body)s437 }438 439 440 """441 450 par_decl = ', '.join('double ' + p for p in pars) if pars else 'void' 442 return template % {'name': name, 'body': body, 'pars': par_decl} 443 444 def _gen_call_pars(name, pars): 445 name += "." 446 return ",".join(name+p for p in pars) 451 return _FN_TEMPLATE % {'name': name, 'body': body, 'pars': par_decl} 452 453 def _call_pars(prefix, pars): 454 """ 455 Return a list of *prefix.parameter* from parameter items. 456 """ 457 prefix += "." 458 return [prefix+p for p in pars] 459 460 _IQXY_PATTERN = re.compile("^((inline|static) )? *(double )? *Iqxy *([(]|$)", 461 flags=re.MULTILINE) 462 def _have_Iqxy(sources): 463 """ 464 Return true if any file defines Iqxy. 465 466 Note this is not a C parser, and so can be easily confused by 467 non-standard syntax. Also, it will incorrectly identify the following 468 as having Iqxy:: 469 470 /* 471 double Iqxy(qx, qy, ...) { ... fill this in later ... } 472 */ 473 474 If you want to comment out an Iqxy function, use // on the front of the 475 line instead. 476 """ 477 for code in sources: 478 if _IQXY_PATTERN.search(code): 479 return True 480 else: 481 return False 447 482 448 483 def make_source(model_info): … … 464 499 # for computing volume even if we allow non-disperse volume parameters. 465 500 466 # Load template 467 source = [load_template('kernel_header.c')] 468 469 # Load additional sources 470 source += [open(f).read() for f in model_sources(model_info)] 471 472 # Prepare defines 473 defines = [] 474 475 iq_parameters = [p.name 476 for p in model_info['parameters'][2:] # skip scale, background 477 if p.name in model_info['par_set']['1d']] 478 iqxy_parameters = [p.name 479 for p in model_info['parameters'][2:] # skip scale, background 480 if p.name in model_info['par_set']['2d']] 481 volume_parameters = model_info['par_type']['volume'] 501 # kernel_iq assumes scale and background are the first parameters; 502 # they should be first for 1d and 2d parameter lists as well. 503 assert model_info['parameters'][0].name == 'scale' 504 assert model_info['parameters'][1].name == 'background' 505 506 # Identify parameter types 507 iq_parameters = model_info['par_type']['1d'][2:] 508 iqxy_parameters = model_info['par_type']['2d'][2:] 509 vol_parameters = model_info['par_type']['volume'] 510 511 # Load templates and user code 512 kernel_header = load_template('kernel_header.c') 513 kernel_code = load_template('kernel_iq.c') 514 user_code = [open(f).read() for f in model_sources(model_info)] 515 516 # Build initial sources 517 source = [kernel_header] + user_code 482 518 483 519 # Generate form_volume function, etc. from body only 484 520 if model_info['form_volume'] is not None: 485 pnames = [p .name for p in volume_parameters]521 pnames = [p for p in vol_parameters] 486 522 source.append(_gen_fn('form_volume', pnames, model_info['form_volume'])) 487 523 if model_info['Iq'] is not None: 488 pnames = ['q'] + [p .namefor p in iq_parameters]524 pnames = ['q'] + [p for p in iq_parameters] 489 525 source.append(_gen_fn('Iq', pnames, model_info['Iq'])) 490 526 if model_info['Iqxy'] is not None: 491 pnames = ['qx', 'qy'] + [p .namefor p in iqxy_parameters]527 pnames = ['qx', 'qy'] + [p for p in iqxy_parameters] 492 528 source.append(_gen_fn('Iqxy', pnames, model_info['Iqxy'])) 493 529 494 # Fill in definitions for volume parameters 495 if volume_parameters: 496 deref_vol = ",".join("v."+p.name for p in volume_parameters) 497 defines.append(('CALL_VOLUME(v)', 'form_volume(%s)\n'%deref_vol)) 530 # Define the parameter table 531 source.append("#define PARAMETER_TABLE \\") 532 source.append("\\\n ".join("double %s;"%p.name 533 for p in model_info['parameters'][2:])) 534 535 # Define the function calls 536 if vol_parameters: 537 refs = ",".join(_call_pars("v", vol_parameters)) 538 call_volume = "#define CALL_VOLUME(v) form_volume(%s)"%refs 498 539 else: 499 540 # Model doesn't have volume. We could make the kernel run a little 500 541 # faster by not using/transferring the volume normalizations, but 501 542 # the ifdef's reduce readability more than is worthwhile. 502 defines.append(('CALL_VOLUME(v)', '0.0')) 503 504 # Fill in definitions for Iq parameters 505 defines.append(('KERNEL_NAME', model_info['name'])) 506 defines.append(('IQ_PARAMETERS', ', '.join(iq_parameters))) 507 if fixed_1d: 508 defines.append(('IQ_FIXED_PARAMETER_DECLARATIONS', 509 ', \\\n '.join('const double %s' % p for p in fixed_1d))) 510 # Fill in definitions for Iqxy parameters 511 defines.append(('IQXY_KERNEL_NAME', model_info['name'] + '_Iqxy')) 512 defines.append(('IQXY_PARAMETERS', ', '.join(iqxy_parameters))) 513 if fixed_2d: 514 defines.append(('IQXY_FIXED_PARAMETER_DECLARATIONS', 515 ', \\\n '.join('const double %s' % p for p in fixed_2d))) 516 if pd_2d: 517 defines.append(('IQXY_WEIGHT_PRODUCT', 518 '*'.join(p + '_w' for p in pd_2d))) 519 defines.append(('IQXY_DISPERSION_LENGTH_DECLARATIONS', 520 ', \\\n '.join('const int N%s' % p for p in pd_2d))) 521 defines.append(('IQXY_DISPERSION_LENGTH_SUM', 522 '+'.join('N' + p for p in pd_2d))) 523 open_loops, close_loops = build_polydispersity_loops(pd_2d) 524 defines.append(('IQXY_OPEN_LOOPS', 525 open_loops.replace('\n', ' \\\n'))) 526 defines.append(('IQXY_CLOSE_LOOPS', 527 close_loops.replace('\n', ' \\\n'))) 528 # Need to know if we have a theta parameter for Iqxy; it is not there 529 # for the magnetic sphere model, for example, which has a magnetic 530 # orientation but no shape orientation. 531 if 'theta' in pd_2d: 532 defines.append(('IQXY_HAS_THETA', '1')) 533 534 #for d in defines: print(d) 535 defines = '\n'.join('#define %s %s' % (k, v) for k, v in defines) 536 sources = '\n\n'.join(source) 537 return C_KERNEL_TEMPLATE % { 538 'DEFINES': defines, 539 'SOURCES': sources, 540 } 543 call_volume = "#define CALL_VOLUME(v) 0.0" 544 source.append(call_volume) 545 546 refs = ["q[i]"] + _call_pars("v", iq_parameters) 547 call_iq = "#define CALL_IQ(q,i,v) Iq(%s)" % (",".join(refs)) 548 if _have_Iqxy(user_code): 549 # Call 2D model 550 refs = ["q[2*i]", "q[2*i+1]"] + _call_pars("v", iqxy_parameters) 551 call_iqxy = "#define CALL_IQ(q,i,v) Iqxy(%s)" % (",".join(refs)) 552 else: 553 # Call 1D model with sqrt(qx^2 + qy^2) 554 warnings.warn("Creating Iqxy = Iq(sqrt(qx^2 + qy^2))") 555 # still defined:: refs = ["q[i]"] + _call_pars("v", iq_parameters) 556 pars_sqrt = ["sqrt(q[2*i]*q[2*i]+q[2*i+1]*q[2*i+1])"] + refs[1:] 557 call_iqxy = "#define CALL_IQ(q,i,v) Iq(%s)" % (",".join(pars_sqrt)) 558 559 # Fill in definitions for numbers of parameters 560 source.append("#define MAX_PD %s"%model_info['max_pd']) 561 source.append("#define NPARS %d"%(len(model_info['parameters'])-2)) 562 563 # TODO: allow mixed python/opencl kernels? 564 565 # define the Iq kernel 566 source.append("#define KERNEL_NAME %s_Iq"%model_info['name']) 567 source.append(call_iq) 568 source.append(kernel_code) 569 source.append("#undef CALL_IQ") 570 source.append("#undef KERNEL_NAME") 571 572 # define the Iqxy kernel from the same source with different #defines 573 source.append("#define KERNEL_NAME %s_Iqxy"%model_info['name']) 574 source.append(call_iqxy) 575 source.append(kernel_code) 576 source.append("#undef CALL_IQ") 577 source.append("#undef KERNEL_NAME") 578 579 return '\n'.join(source) 541 580 542 581 def categorize_parameters(pars): … … 551 590 * *magnetic* set of parameters that are used to compute magnetic 552 591 patterns (which includes all 1D and 2D parameters) 553 * *sesans* set of parameters that are used to compute sesans patterns 554 (which is just 1D without background) 555 * *pd-relative* is the set of parameters with relative distribution 592 * *pd_relative* is the set of parameters with relative distribution 556 593 width (e.g., radius +/- 10%) rather than absolute distribution 557 594 width (e.g., theta +/- 6 degrees). 558 595 """ 559 596 par_set = {} 560 par_set['1d'] = [p for p in pars if p.type not in ('orientation', 'magnetic')]561 par_set['2d'] = [p for p in pars if p.type != 'magnetic']562 par_set['magnetic'] = [p for p in pars]563 par_set['pd'] = [p for p in pars if p.type in ('volume', 'orientation')]564 par_set['pd_relative'] = [p for p in pars if p.type == 'volume']597 par_set['1d'] = [p.name for p in pars if p.type not in ('orientation', 'magnetic')] 598 par_set['2d'] = [p.name for p in pars if p.type != 'magnetic'] 599 par_set['magnetic'] = [p.name for p in pars] 600 par_set['pd'] = [p.name for p in pars if p.type in ('volume', 'orientation')] 601 par_set['pd_relative'] = [p.name for p in pars if p.type == 'volume'] 565 602 return par_set 566 603 … … 605 642 pars = [Parameter(*p) for p in model_info['parameters']] 606 643 # Fill in the derived attributes 644 par_type = collect_types(pars) 645 par_type.update(categorize_parameters(pars)) 607 646 model_info['parameters'] = pars 608 partype = categorize_parameters(pars) 609 model_info['limits'] = dict((p.name, p.limits) for p in pars) 610 model_info['par_type'] = collect_types(pars) 611 model_info['par_set'] = categorize_parameters(pars) 612 model_info['defaults'] = dict((p.name, p.default) for p in pars) 647 model_info['par_type'] = par_type 613 648 if model_info.get('demo', None) is None: 614 model_info['demo'] = model_info['defaults'] 615 model_info['has_2d'] = partype['orientation'] or partype['magnetic'] 649 model_info['demo'] = dict((p.name, p.default) for p in pars) 650 model_info['has_2d'] = par_type['orientation'] or par_type['magnetic'] 651 652 def mono_details(max_pd, npars): 653 par_offset = 5*max_pd 654 const_offset = par_offset + 3*npars 655 656 mono = np.zeros(const_offset + 3, 'i') 657 mono[0] = 0 # pd_par: arbitrary order; use first 658 mono[1*max_pd] = 1 # pd_length: only one element 659 mono[2*max_pd] = 2 # pd_offset: skip scale and background 660 mono[3*max_pd] = 1 # pd_stride: vectors of length 1 661 mono[4*max_pd-1] = 1 # pd_stride[-1]: only one element in set 662 mono[4*max_pd] = 0 # pd_isvol: doens't matter if no norm 663 mono[par_offset:par_offset+npars] = np.arange(2, npars+2, dtype='i') 664 # par_offset: copied in order 665 mono[par_offset+npars:par_offset+2*npars] = 0 666 # par_coord: no coordinated parameters 667 mono[par_offset+npars] = 1 # par_coord[0]: except par 0 668 mono[par_offset+2*npars:par_offset+3*npars] = 0 669 # fast coord with 0 670 mono[const_offset] = 1 # fast_coord_count: one fast index 671 mono[const_offset+1] = -1 # theta_var: None 672 mono[const_offset+2] = 0 # fast_theta: False 673 return mono 674 675 def poly_details(model_info, weights, pars, constraints=None): 676 if constraints is not None: 677 # Need to find the independently varying pars and sort them 678 # Need to build a coordination list for the dependent variables 679 # Need to generate a constraints function which takes values 680 # and weights, returning par blocks 681 raise ValueError("Can't handle constraints yet") 682 683 raise ValueError("polydispersity not supported") 684 685 686 def create_default_functions(model_info): 687 """ 688 Autogenerate missing functions, such as Iqxy from Iq. 689 690 This only works for Iqxy when Iq is written in python. :func:`make_source` 691 performs a similar role for Iq written in C. 692 """ 693 if model_info['Iq'] is not None and model_info['Iqxy'] is None: 694 if model_info['par_type']['1d'] != model_info['par_type']['2d']: 695 raise ValueError("Iqxy model is missing") 696 Iq = model_info['Iq'] 697 def Iqxy(qx, qy, **kw): 698 return Iq(np.sqrt(qx**2 + qy**2), **kw) 699 model_info['Iqxy'] = Iqxy 616 700 617 701 def make_model_info(kernel_module): … … 640 724 model variants (e.g., the list of cases) or is None if there are no 641 725 model variants 642 * *defaults* is the *{parameter: value}* table built from the parameter 643 description table. 644 * *limits* is the *{parameter: [min, max]}* table built from the 645 parameter description table. 646 * *partypes* categorizes the model parameters. See 726 * *par_type* categorizes the model parameters. See 647 727 :func:`categorize_parameters` for details. 648 728 * *demo* contains the *{parameter: value}* map used in compare (and maybe … … 661 741 *model_info* blocks for the composition objects. This allows us to 662 742 build complete product and mixture models from just the info. 743 * *max_pd* is the max polydispersity dimension. This is constant and 744 should not be reset. You may be able to change it when the program 745 starts by setting *sasmodels.generate.MAX_PD*. 663 746 664 747 """ … … 671 754 name = " ".join(w.capitalize() for w in kernel_id.split('_')) 672 755 model_info = dict( 756 max_pd=MAX_PD, 673 757 id=kernel_id, # string used to load the kernel 674 758 filename=abspath(kernel_module.__file__), … … 688 772 oldpars=getattr(kernel_module, 'oldpars', {}), 689 773 tests=getattr(kernel_module, 'tests', []), 774 mono_details = mono_details(MAX_PD, len(kernel_module.parameters)) 690 775 ) 691 776 process_parameters(model_info) … … 693 778 functions = "ER VR form_volume Iq Iqxy shape sesans".split() 694 779 model_info.update((k, getattr(kernel_module, k, None)) for k in functions) 780 create_default_functions(model_info) 695 781 return model_info 696 782 -
sasmodels/kernel_header.c
r2e44ac7 r03cac08 1 1 #ifdef __OPENCL_VERSION__ 2 2 # define USE_OPENCL 3 #elif defined(_OPENMP) 4 # define USE_OPENMP 3 5 #endif 4 5 #define USE_KAHAN_SUMMATION 06 6 7 7 // If opencl is not available, then we are compiling a C function … … 16 16 #include <float.h> 17 17 #define kernel extern "C" __declspec( dllexport ) 18 inlinedouble trunc(double x) { return x>=0?floor(x):-floor(-x); }19 inlinedouble fmin(double x, double y) { return x>y ? y : x; }20 inlinedouble fmax(double x, double y) { return x<y ? y : x; }21 inlinedouble isnan(double x) { return _isnan(x); }18 static double trunc(double x) { return x>=0?floor(x):-floor(-x); } 19 static double fmin(double x, double y) { return x>y ? y : x; } 20 static double fmax(double x, double y) { return x<y ? y : x; } 21 static double isnan(double x) { return _isnan(x); } 22 22 #define NAN (std::numeric_limits<double>::quiet_NaN()) // non-signalling NaN 23 23 static double cephes_expm1(double x) { … … 48 48 #define kernel extern "C" 49 49 #endif 50 inlinevoid SINCOS(double angle, double &svar, double &cvar) { svar=sin(angle); cvar=cos(angle); }50 static void SINCOS(double angle, double &svar, double &cvar) { svar=sin(angle); cvar=cos(angle); } 51 51 # else 52 52 #include <stdio.h> … … 99 99 # define M_4PI_3 4.18879020478639 100 100 #endif 101 //inline double square(double x) { return pow(x,2.0); } 102 //inline double square(double x) { return pown(x,2); } 103 inline double square(double x) { return x*x; } 104 inline double cube(double x) { return x*x*x; } 105 inline double sinc(double x) { return x==0 ? 1.0 : sin(x)/x; } 101 static double square(double x) { return x*x; } 102 static double cube(double x) { return x*x*x; } 103 static double sinc(double x) { return x==0 ? 1.0 : sin(x)/x; } 106 104 -
sasmodels/kernel_iq.c
rd5ac45f r303d8d6 12 12 */ 13 13 14 #ifndef _PAR_BLOCK_ // protected block so we can include this code twice. 15 #define _PAR_BLOCK_ 14 16 15 17 #define MAX_PD 4 // MAX_PD is the max number of polydisperse parameters 16 #define PD_2N 16 // PD_2N is the size of the coordination step table17 18 18 19 typedef struct { … … 31 32 32 33 typedef struct { 33 PARAMETER_ DECL;34 PARAMETER_TABLE; 34 35 } ParameterBlock; 35 36 #define FULL_KERNEL_NAME KERNEL_NAME ## _ ## IQ_FUNC 37 KERNEL 38 void FULL_KERNEL_NAME( 36 #endif 37 38 39 kernel 40 void KERNEL_NAME( 39 41 int nq, // number of q values 42 const int pd_start, // where we are in the polydispersity loop 43 const int pd_stop, // where we are stopping in the polydispersity loop 40 44 global const ProblemDetails *problem, 41 45 global const double *weights, 42 46 global const double *pars, 43 47 global const double *q, // nq q values, with padding to boundary 44 global double *result, // nq return values, again with padding 45 const double cutoff, // cutoff in the polydispersity weight product 46 const int pd_start, // where we are in the polydispersity loop 47 const int pd_stop, // where we are stopping in the polydispersity loop 48 global double *result, // nq+3 return values, again with padding 49 const double cutoff // cutoff in the polydispersity weight product 48 50 ) 49 51 { 50 52 printf("hello\n"); 51 53 // Storage for the current parameter values. These will be updated as we 52 54 // walk the polydispersity cube. 53 55 local ParameterBlock local_pars; // current parameter values 54 const double *parvec = &local_pars; // Alias named parameters with a vector56 double *pvec = (double *)(&local_pars); // Alias named parameters with a vector 55 57 56 58 local int offset[NPARS-2]; … … 60 62 // Shouldn't need to copy!! 61 63 for (int k=0; k < NPARS; k++) { 62 p arvec[k] = pars[k+2]; // skip scale and background64 pvec[k] = pars[k+2]; // skip scale and background 63 65 } 64 66 … … 68 70 for (int i=0; i < nq; i++) { 69 71 { 70 const double scattering = IQ_FUNC(IQ_PARS, IQ_PARAMETERS);72 const double scattering = CALL_IQ(q, i, local_pars); 71 73 result[i] += pars[0]*scattering + pars[1]; 72 74 } … … 99 101 norm = result[nq]; 100 102 vol = result[nq+1]; 101 norm_vol = result s[nq+2];103 norm_vol = result[nq+2]; 102 104 } 103 105 104 106 // Location in the polydispersity hypercube, one index per dimension. 105 local int pd_index[ PD_MAX];107 local int pd_index[MAX_PD]; 106 108 107 109 // Trigger the reset behaviour that happens at the end the fast loop 108 110 // by setting the initial index >= weight vector length. 109 pd_index[0] = pd_length[0]; 111 pd_index[0] = problem->pd_length[0]; 112 110 113 111 114 // need product of weights at every Iq calc, so keep product of … … 120 123 for (int loop_index=pd_start; loop_index < pd_stop; loop_index++) { 121 124 // check if indices need to be updated 122 if (pd_index[0] >= p d_length[0]) {125 if (pd_index[0] >= problem->pd_length[0]) { 123 126 124 127 // RESET INDICES 125 pd_index[0] = loop_index%p d_length[0];128 pd_index[0] = loop_index%problem->pd_length[0]; 126 129 partial_weight = 1.0; 127 130 partial_volweight = 1.0; 128 131 for (int k=1; k < MAX_PD; k++) { 129 pd_index[k] = (loop_index%p d_length[k])/pd_stride[k];130 const double wi = weights[p d_offset[0]+pd_index[0]];132 pd_index[k] = (loop_index%problem->pd_length[k])/problem->pd_stride[k]; 133 const double wi = weights[problem->pd_offset[0]+pd_index[0]]; 131 134 partial_weight *= wi; 132 if (p d_isvol[k]) partial_volweight *= wi;135 if (problem->pd_isvol[k]) partial_volweight *= wi; 133 136 } 134 137 for (int k=0; k < NPARS; k++) { 135 int coord = p ar_coord[k];136 int this_offset = p ar_offset[k];138 int coord = problem->par_coord[k]; 139 int this_offset = problem->par_offset[k]; 137 140 int block_size = 1; 138 141 for (int bit=0; bit < MAX_PD && coord != 0; bit++) { 139 142 if (coord&1) { 140 143 this_offset += block_size * pd_index[bit]; 141 block_size *= p d_length[bit];144 block_size *= problem->pd_length[bit]; 142 145 } 143 146 coord /= 2; 144 147 } 145 148 offset[k] = this_offset; 146 parvec[k] = pars[this_offset]; 147 } 148 weight = partial_weight * weights[pd_offset[0]+pd_index[0]] 149 if (theta_var >= 0) { 150 spherical_correction = fabs(cos(M_PI_180*parvec[theta_var])); 151 } 152 if (!fast_theta) weight *= spherical_correction; 149 pvec[k] = pars[this_offset]; 150 } 151 weight = partial_weight * weights[problem->pd_offset[0]+pd_index[0]]; 152 if (problem->theta_var >= 0) { 153 spherical_correction = fabs(cos(M_PI_180*pvec[problem->theta_var])); 154 } 155 if (!problem->fast_theta) { 156 weight *= spherical_correction; 157 } 153 158 154 159 } else { … … 156 161 // INCREMENT INDICES 157 162 pd_index[0] += 1; 158 const double wi = weights[p d_offset[0]+pd_index[0]];163 const double wi = weights[problem->pd_offset[0]+pd_index[0]]; 159 164 weight = partial_weight*wi; 160 if (p d_isvol[0]) vol_weight *= wi;165 if (problem->pd_isvol[0]) vol_weight *= wi; 161 166 for (int k=0; k < problem->fast_coord_count; k++) { 162 parvec[ fast_coord_index[k]] 163 = pars[offset[fast_coord_index[k]] + pd_index[0]]; 164 } 165 if (fast_theta) weight *= fabs(cos(M_PI_180*parvec[theta_var])); 166 167 pvec[problem->fast_coord_index[k]] 168 = pars[offset[problem->fast_coord_index[k]]++]; 169 } 170 if (problem->fast_theta) { 171 weight *= fabs(cos(M_PI_180*pvec[problem->theta_var])); 172 } 167 173 } 168 174 … … 171 177 #endif 172 178 179 // Accumulate I(q) 180 // Note: weight==0 must always be excluded 173 181 if (weight > cutoff) { 174 182 norm += weight; … … 180 188 #endif 181 189 for (int i=0; i < nq; i++) { 182 { 183 const double scattering = CALL_IQ(q, nq, i, local_pars); 190 const double scattering = CALL_IQ(q, i, local_pars); 184 191 result[i] += weight*scattering; 185 192 } 193 } 186 194 } 187 195 //Makes a normalization avialable for the next round … … 191 199 192 200 //End of the PD loop we can normalize 193 if (pd_stop == pd_stride[MAX_PD-1]) {}201 if (pd_stop >= problem->pd_stride[MAX_PD-1]) { 194 202 #ifdef USE_OPENMP 195 203 #pragma omp parallel for -
sasmodels/kerneldll.py
r17bbadd r303d8d6 61 61 if sys.platform == 'darwin': 62 62 #COMPILE = "gcc-mp-4.7 -shared -fPIC -std=c99 -fopenmp -O2 -Wall %s -o %s -lm -lgomp" 63 COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm "63 COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm -fno-unknown-pragmas" 64 64 elif os.name == 'nt': 65 65 # call vcvarsall.bat before compiling to set path, headers, libs, etc. … … 80 80 COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 81 81 if "SAS_OPENMP" in os.environ: 82 COMPILE = COMPILE + " -fopenmp" 82 COMPILE += " -fopenmp" 83 else: 84 COMPILE += " -fWno-unknown-pragmas" 83 85 else: 84 86 COMPILE = "cc -shared -fPIC -fopenmp -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" … … 140 142 141 143 source = generate.convert_type(source, dtype) 142 source_files = generate.model_sources(model_info) + [model_info['filename']] 144 source_files = (generate.model_sources(model_info) 145 + [model_info['filename']] 146 + generate.model_templates()) 143 147 dll = dll_path(model_info, dtype) 144 148 newest = max(os.path.getmtime(f) for f in source_files) … … 170 174 return DllModel(filename, model_info, dtype=dtype) 171 175 172 173 IQ_ARGS = [c_void_p, c_void_p, c_int]174 IQXY_ARGS = [c_void_p, c_void_p, c_void_p, c_int]175 176 176 class DllModel(object): 177 177 """ … … 195 195 196 196 def _load_dll(self): 197 Nfixed1d = len(self.info['partype']['fixed-1d'])198 Nfixed2d = len(self.info['partype']['fixed-2d'])199 Npd1d = len(self.info['partype']['pd-1d'])200 Npd2d = len(self.info['partype']['pd-2d'])201 202 197 #print("dll", self.dllpath) 203 198 try: … … 210 205 else c_double if self.dtype == generate.F64 211 206 else c_longdouble) 212 pd_args_1d = [c_void_p, fp] + [c_int]*Npd1d if Npd1d else [] 213 pd_args_2d = [c_void_p, fp] + [c_int]*Npd2d if Npd2d else [] 207 208 # int, int, int, int*, double*, double*, double*, double*, double*, double 209 argtypes = [c_int]*3 + [c_void_p]*5 + [fp] 214 210 self.Iq = self.dll[generate.kernel_name(self.info, False)] 215 self.Iq.argtypes = IQ_ARGS + pd_args_1d + [fp]*Nfixed1d216 217 211 self.Iqxy = self.dll[generate.kernel_name(self.info, True)] 218 self.Iqxy.argtypes = IQXY_ARGS + pd_args_2d + [fp]*Nfixed2d 212 self.Iq.argtypes = argtypes 213 self.Iqxy.argtypes = argtypes 219 214 220 215 def __getstate__(self): … … 261 256 self.q_input = q_input 262 257 self.kernel = kernel 263 self.res = np.empty(q_input.nq , q_input.dtype)258 self.res = np.empty(q_input.nq+3, q_input.dtype) 264 259 dim = '2d' if q_input.is_2d else '1d' 265 self.fixed_pars = model_info['partype']['fixed-' + dim] 266 self.pd_pars = model_info['partype']['pd-' + dim] 260 self.parameters = model_info['par_type'][dim] 267 261 268 262 # In dll kernel, but not in opencl kernel 269 263 self.p_res = self.res.ctypes.data 270 264 271 def __call__(self, fixed_pars, pd_pars, cutoff):265 def __call__(self, details, values, weights, cutoff): 272 266 real = (np.float32 if self.q_input.dtype == generate.F32 273 267 else np.float64 if self.q_input.dtype == generate.F64 274 268 else np.float128) 275 276 nq = c_int(self.q_input.nq) 277 if pd_pars: 278 cutoff = real(cutoff) 279 loops_N = [np.uint32(len(p[0])) for p in pd_pars] 280 loops = np.hstack(pd_pars) 281 loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() 282 p_loops = loops.ctypes.data 283 dispersed = [p_loops, cutoff] + loops_N 284 else: 285 dispersed = [] 286 fixed = [real(p) for p in fixed_pars] 287 args = self.q_input.q_pointers + [self.p_res, nq] + dispersed + fixed 288 #print(pars) 269 args = [ 270 self.q_input.nq, # nq 271 0, # pd_start 272 1, # pd_stop 273 details.ctypes.data, # problem 274 weights.ctypes.data, # weights 275 values.ctypes.data, #pars 276 self.q_input.q_pointers[0], #q 277 self.p_res, # results 278 real(cutoff), # cutoff 279 ] 289 280 self.kernel(*args) 290 281 291 return self.res 282 return self.res[:-3] 292 283 293 284 def release(self): -
sasmodels/models/cylinder.c
r50e1e40 r03cac08 3 3 double Iqxy(double qx, double qy, double sld, double solvent_sld, 4 4 double radius, double length, double theta, double phi); 5 6 #define INVALID(v) (v.radius<0 || v.length<0) 5 7 6 8 double form_volume(double radius, double length) … … 15 17 double length) 16 18 { 17 // TODO: return NaN if radius<0 or length<0?18 19 // precompute qr and qh to save time in the loop 19 20 const double qr = q*radius; … … 47 48 double phi) 48 49 { 49 // TODO: return NaN if radius<0 or length<0?50 50 double sn, cn; // slots to hold sincos function output 51 51 -
sasmodels/models/gel_fit.c
r30b4ddf r03cac08 1 double form_volume(void); 2 3 double Iq(double q, 4 double guinier_scale, 5 double lorentzian_scale, 6 double gyration_radius, 7 double fractal_exp, 8 double cor_length); 9 10 double Iqxy(double qx, double qy, 11 double guinier_scale, 12 double lorentzian_scale, 13 double gyration_radius, 14 double fractal_exp, 15 double cor_length); 16 17 static double _gel_fit_kernel(double q, 1 static double Iq(double q, 18 2 double guinier_scale, 19 3 double lorentzian_scale, … … 24 8 // Lorentzian Term 25 9 ////////////////////////double a(x[i]*x[i]*zeta*zeta); 26 double lorentzian_term = q*q*cor_length*cor_length;10 double lorentzian_term = square(q*cor_length); 27 11 lorentzian_term = 1.0 + ((fractal_exp + 1.0)/3.0)*lorentzian_term; 28 12 lorentzian_term = pow(lorentzian_term, (fractal_exp/2.0) ); … … 30 14 // Exponential Term 31 15 ////////////////////////double d(x[i]*x[i]*rg*rg); 32 double exp_term = q*q*gyration_radius*gyration_radius;16 double exp_term = square(q*gyration_radius); 33 17 exp_term = exp(-1.0*(exp_term/3.0)); 34 18 … … 37 21 return result; 38 22 } 39 double form_volume(void){40 // Unused, so free to return garbage.41 return NAN;42 }43 44 double Iq(double q,45 double guinier_scale,46 double lorentzian_scale,47 double gyration_radius,48 double fractal_exp,49 double cor_length)50 {51 return _gel_fit_kernel(q,52 guinier_scale,53 lorentzian_scale,54 gyration_radius,55 fractal_exp,56 cor_length);57 }58 59 // Iqxy is never called since no orientation or magnetic parameters.60 double Iqxy(double qx, double qy,61 double guinier_scale,62 double lorentzian_scale,63 double gyration_radius,64 double fractal_exp,65 double cor_length)66 {67 double q = sqrt(qx*qx + qy*qy);68 return _gel_fit_kernel(q,69 guinier_scale,70 lorentzian_scale,71 gyration_radius,72 fractal_exp,73 cor_length);74 }75 -
sasmodels/product.py
r72a081d r303d8d6 99 99 # a parameter map. 100 100 par_map = {} 101 p_info = p_kernel.info['par type']102 s_info = s_kernel.info['par type']101 p_info = p_kernel.info['par_type'] 102 s_info = s_kernel.info['par_type'] 103 103 vol_pars = set(p_info['volume']) 104 104 if dim == '2d': -
sasmodels/resolution.py
ra146eaa r303d8d6 504 504 from scipy.integrate import romberg 505 505 506 if any(k not in form.info['defaults'] for k in pars.keys()):507 keys = set(form.info['defaults'].keys())508 extra = set(pars.keys()) - keys506 par_set = set([p.name for p in form.info['parameters']]) 507 if any(k not in par_set for k in pars.keys()): 508 extra = set(pars.keys()) - par_set 509 509 raise ValueError("bad parameters: [%s] not in [%s]"% 510 510 (", ".join(sorted(extra)), ", ".join(sorted(keys)))) … … 558 558 from scipy.integrate import romberg 559 559 560 if any(k not in form.info['defaults'] for k in pars.keys()):561 keys = set(form.info['defaults'].keys())562 extra = set(pars.keys()) - keys560 par_set = set([p.name for p in form.info['parameters']]) 561 if any(k not in par_set for k in pars.keys()): 562 extra = set(pars.keys()) - par_set 563 563 raise ValueError("bad parameters: [%s] not in [%s]"% 564 564 (", ".join(sorted(extra)), ", ".join(sorted(keys)))) -
sasmodels/kernelpy.py
rd5ac45f r31d22de 128 128 129 129 def __call__(self, fixed, pd, cutoff=1e-5): 130 print("fixed",fixed)131 print("pd", pd)130 #print("fixed",fixed) 131 #print("pd", pd) 132 132 args = self.args[:] # grab a copy of the args 133 133 form, form_volume = self.kernel, self.info['form_volume'] … … 187 187 ################################################################ 188 188 189 #TODO: Wojtek's notes190 #TODO: The goal is to restructure polydispersity loop191 #so it allows fitting arbitrary polydispersity parameters192 #and it accepts cases like coupled parameters193 189 weight = np.empty(len(pd), 'd') 194 190 if weight.size > 0: … … 264 260 result = scale * ret / norm + background 265 261 return result 266 267 """268 Python driver for python kernels269 270 Calls the kernel with a vector of $q$ values for a single parameter set.271 Polydispersity is supported by looping over different parameter sets and272 summing the results. The interface to :class:`PyModel` matches those for273 :class:`kernelcl.GpuModel` and :class:`kerneldll.DllModel`.274 """275 import numpy as np276 from numpy import pi, cos277 278 from .generate import F64279 280 class PyModel(object):281 """282 Wrapper for pure python models.283 """284 def __init__(self, model_info):285 self.info = model_info286 287 def __call__(self, q_vectors):288 q_input = PyInput(q_vectors, dtype=F64)289 kernel = self.info['Iqxy'] if q_input.is_2d else self.info['Iq']290 return PyKernel(kernel, self.info, q_input)291 292 def release(self):293 """294 Free resources associated with the model.295 """296 pass297 298 class PyInput(object):299 """300 Make q data available to the gpu.301 302 *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,303 and *[qx, qy]* for 2-D data. Internally, the vectors will be reallocated304 to get the best performance on OpenCL, which may involve shifting and305 stretching the array to better match the memory architecture. Additional306 points will be evaluated with *q=1e-3*.307 308 *dtype* is the data type for the q vectors. The data type should be309 set to match that of the kernel, which is an attribute of310 :class:`GpuProgram`. Note that not all kernels support double311 precision, so even if the program was created for double precision,312 the *GpuProgram.dtype* may be single precision.313 314 Call :meth:`release` when complete. Even if not called directly, the315 buffer will be released when the data object is freed.316 """317 def __init__(self, q_vectors, dtype):318 self.nq = q_vectors[0].size319 self.dtype = dtype320 self.is_2d = (len(q_vectors) == 2)321 self.q_vectors = [np.ascontiguousarray(q, self.dtype) for q in q_vectors]322 self.q_pointers = [q.ctypes.data for q in self.q_vectors]323 324 def release(self):325 """326 Free resources associated with the model inputs.327 """328 self.q_vectors = []329 330 class PyKernel(object):331 """332 Callable SAS kernel.333 334 *kernel* is the DllKernel object to call.335 336 *model_info* is the module information337 338 *q_input* is the DllInput q vectors at which the kernel should be339 evaluated.340 341 The resulting call method takes the *pars*, a list of values for342 the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)343 vectors for the polydisperse parameters. *cutoff* determines the344 integration limits: any points with combined weight less than *cutoff*345 will not be calculated.346 347 Call :meth:`release` when done with the kernel instance.348 """349 def __init__(self, kernel, model_info, q_input):350 self.info = model_info351 self.q_input = q_input352 self.res = np.empty(q_input.nq, q_input.dtype)353 dim = '2d' if q_input.is_2d else '1d'354 # Loop over q unless user promises that the kernel is vectorized by355 # taggining it with vectorized=True356 if not getattr(kernel, 'vectorized', False):357 if dim == '2d':358 def vector_kernel(qx, qy, *args):359 """360 Vectorized 2D kernel.361 """362 return np.array([kernel(qxi, qyi, *args)363 for qxi, qyi in zip(qx, qy)])364 else:365 def vector_kernel(q, *args):366 """367 Vectorized 1D kernel.368 """369 return np.array([kernel(qi, *args)370 for qi in q])371 self.kernel = vector_kernel372 else:373 self.kernel = kernel374 fixed_pars = model_info['partype']['fixed-' + dim]375 pd_pars = model_info['partype']['pd-' + dim]376 vol_pars = model_info['partype']['volume']377 378 # First two fixed pars are scale and background379 pars = [p.name for p in model_info['parameters'][2:]]380 offset = len(self.q_input.q_vectors)381 self.args = self.q_input.q_vectors + [None] * len(pars)382 self.fixed_index = np.array([pars.index(p) + offset383 for p in fixed_pars[2:]])384 self.pd_index = np.array([pars.index(p) + offset385 for p in pd_pars])386 self.vol_index = np.array([pars.index(p) + offset387 for p in vol_pars])388 try: self.theta_index = pars.index('theta') + offset389 except ValueError: self.theta_index = -1390 391 # Caller needs fixed_pars and pd_pars392 self.fixed_pars = fixed_pars393 self.pd_pars = pd_pars394 395 def __call__(self, fixed, pd, cutoff=1e-5):396 #print("fixed",fixed)397 #print("pd", pd)398 args = self.args[:] # grab a copy of the args399 form, form_volume = self.kernel, self.info['form_volume']400 # First two fixed401 scale, background = fixed[:2]402 for index, value in zip(self.fixed_index, fixed[2:]):403 args[index] = float(value)404 res = _loops(form, form_volume, cutoff, scale, background, args,405 pd, self.pd_index, self.vol_index, self.theta_index)406 407 return res408 409 def release(self):410 """411 Free resources associated with the kernel.412 """413 self.q_input = None414 415 def _loops(form, form_volume, cutoff, scale, background,416 args, pd, pd_index, vol_index, theta_index):417 """418 *form* is the name of the form function, which should be vectorized over419 q, but otherwise have an interface like the opencl kernels, with the420 q parameters first followed by the individual parameters in the order421 given in model.parameters (see :mod:`sasmodels.generate`).422 423 *form_volume* calculates the volume of the shape. *vol_index* gives424 the list of volume parameters425 426 *cutoff* ignores the corners of the dispersion hypercube427 428 *scale*, *background* multiplies the resulting form and adds an offset429 430 *args* is the prepopulated set of arguments to the form function, starting431 with the q vectors, and including slots for all the parameters. The432 values for the parameters will be substituted with values from the433 dispersion functions.434 435 *pd* is the list of dispersion parameters436 437 *pd_index* are the indices of the dispersion parameters in *args*438 439 *vol_index* are the indices of the volume parameters in *args*440 441 *theta_index* is the index of the theta parameter for the sasview442 spherical correction, or -1 if there is no angular dispersion443 """444 445 ################################################################446 # #447 # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #448 # !! !! #449 # !! KEEP THIS CODE CONSISTENT WITH KERNEL_TEMPLATE.C !! #450 # !! !! #451 # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #452 # #453 ################################################################454 455 weight = np.empty(len(pd), 'd')456 if weight.size > 0:457 # weight vector, to be populated by polydispersity loops458 459 # identify which pd parameters are volume parameters460 vol_weight_index = np.array([(index in vol_index) for index in pd_index])461 462 # Sort parameters in decreasing order of pd length463 Npd = np.array([len(pdi[0]) for pdi in pd], 'i')464 order = np.argsort(Npd)[::-1]465 stride = np.cumprod(Npd[order])466 pd = [pd[index] for index in order]467 pd_index = pd_index[order]468 vol_weight_index = vol_weight_index[order]469 470 fast_value = pd[0][0]471 fast_weight = pd[0][1]472 else:473 stride = np.array([1])474 vol_weight_index = slice(None, None)475 # keep lint happy476 fast_value = [None]477 fast_weight = [None]478 479 ret = np.zeros_like(args[0])480 norm = np.zeros_like(ret)481 vol = np.zeros_like(ret)482 vol_norm = np.zeros_like(ret)483 for k in range(stride[-1]):484 # update polydispersity parameter values485 fast_index = k % stride[0]486 if fast_index == 0: # bottom loop complete ... check all other loops487 if weight.size > 0:488 for i, index, in enumerate(k % stride):489 args[pd_index[i]] = pd[i][0][index]490 weight[i] = pd[i][1][index]491 else:492 args[pd_index[0]] = fast_value[fast_index]493 weight[0] = fast_weight[fast_index]494 495 # Computes the weight, and if it is not sufficient then ignore this496 # parameter set.497 # Note: could precompute w1*...*wn so we only need to multiply by w0498 w = np.prod(weight)499 if w > cutoff:500 # Note: can precompute spherical correction if theta_index is not501 # the fast index. Correction factor for spherical integration502 #spherical_correction = abs(cos(pi*args[phi_index])) if phi_index>=0 else 1.0503 spherical_correction = (abs(cos(pi * args[theta_index])) * pi / 2504 if theta_index >= 0 else 1.0)505 #spherical_correction = 1.0506 507 # Call the scattering function and adds it to the total.508 I = form(*args)509 if np.isnan(I).any(): continue510 ret += w * I * spherical_correction511 norm += w512 513 # Volume normalization.514 # If there are "volume" polydispersity parameters, then these515 # will be used to call the form_volume function from the user516 # supplied kernel, and accumulate a normalized weight.517 # Note: can precompute volume norm if fast index is not a volume518 if form_volume:519 vol_args = [args[index] for index in vol_index]520 vol_weight = np.prod(weight[vol_weight_index])521 vol += vol_weight * form_volume(*vol_args)522 vol_norm += vol_weight523 524 positive = (vol * vol_norm != 0.0)525 ret[positive] *= vol_norm[positive] / vol[positive]526 result = scale * ret / norm + background527 return result
Note: See TracChangeset
for help on using the changeset viewer.