Changeset bf6631a in sasmodels
- Timestamp:
- Jan 28, 2016 8:42:56 AM (9 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 4f2478e, 168052c
- Parents:
- f94d8a2 (diff), 384d114 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Files:
-
- 5 added
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
extra/pylint.rc
r5ef0633 r37a7252 6 6 # Python code to execute, usually for sys.path manipulation such as 7 7 # pygtk.require(). 8 #init-hook= 8 #init-hook='import os, sys; sys.path.extend([os.getcwd(),os.path.join(os.getcwd(),"extra")])' 9 9 10 10 # Profiled execution. -
sasmodels/__init__.py
r9890053 r37a7252 1 """ 2 sasmodels 3 ========= 4 5 **sasmodels** is a package containing models for small angle neutron and xray 6 scattering. Models supported are the one dimensional circular average and 7 two dimensional oriented patterns. As well as the form factor calculations 8 for the individual shapes **sasmodels** also provides automatic shape 9 polydispersity, angular dispersion and resolution convolution. SESANS 10 patterns can be computed for any model. 11 12 Models can be written in python or in C. C models can run on the GPU if 13 OpenCL drivers are available. See :mod:`generate` for details on 14 defining new models. 15 """ 16 1 17 __version__ = "0.9" -
sasmodels/bumps_model.py
rec7e360 r37a7252 6 6 arguments to set the initial value for each parameter. 7 7 8 :class:`Experiment` combines the *Model* function with a data file loaded by the9 sasview data loader. *Experiment* takes a *cutoff* parameter controlling8 :class:`Experiment` combines the *Model* function with a data file loaded by 9 the sasview data loader. *Experiment* takes a *cutoff* parameter controlling 10 10 how far the polydispersity integral extends. 11 11 12 12 """ 13 14 __all__ = [ 15 "Model", "Experiment", 16 ] 13 17 14 18 import warnings … … 20 24 21 25 # CRUFT: old style bumps wrapper which doesn't separate data and model 26 # pylint: disable=invalid-name 22 27 def BumpsModel(data, model, cutoff=1e-5, **kw): 28 r""" 29 Bind a model to data, along with a polydispersity cutoff. 30 31 *data* is a :class:`data.Data1D`, :class:`data.Data2D` or 32 :class:`data.Sesans` object. Use :func:`data.empty_data1D` or 33 :func:`data.empty_data2D` to define $q, \Delta q$ calculation 34 points for displaying the SANS curve when there is no measured data. 35 36 *model* is a runnable module as returned from :func:`core.load_model`. 37 38 *cutoff* is the polydispersity weight cutoff. 39 40 Any additional *key=value* pairs are model dependent parameters. 41 42 Returns an :class:`Experiment` object. 43 44 Note that the usual Bumps semantics is not fully supported, since 45 assigning *M.name = parameter* on the returned experiment object 46 does not set that parameter in the model. Range setting will still 47 work as expected though. 48 49 .. deprecated:: 0.1 50 Use :class:`Experiment` instead. 51 """ 23 52 warnings.warn("Use of BumpsModel is deprecated. Use bumps_model.Experiment instead.") 53 54 # Create the model and experiment 24 55 model = Model(model, **kw) 25 56 experiment = Experiment(data=data, model=model, cutoff=cutoff) 26 for k in model._parameter_names: 27 setattr(experiment, k, getattr(model, k)) 57 58 # Copy the model parameters up to the experiment object. 59 for k, v in model.parameters().items(): 60 setattr(experiment, k, v) 28 61 return experiment 29 62 63 30 64 def create_parameters(model_info, **kwargs): 65 """ 66 Generate Bumps parameters from the model info. 67 68 *model_info* is returned from :func:`generate.model_info` on the 69 model definition module. 70 71 Any additional *key=value* pairs are initial values for the parameters 72 to the models. Uninitialized parameters will use the model default 73 value. 74 75 Returns a dictionary of *{name: Parameter}* containing the bumps 76 parameters for each model parameter, and a dictionary of 77 *{name: str}* containing the polydispersity distribution types. 78 """ 31 79 # lazy import; this allows the doc builder and nosetests to run even 32 80 # when bumps is not on the path. 33 81 from bumps.names import Parameter 34 35 partype = model_info['partype']36 82 37 83 pars = {} … … 40 86 value = kwargs.pop(name, default) 41 87 pars[name] = Parameter.default(value, name=name, limits=limits) 42 for name in partype['pd-2d']:88 for name in model_info['partype']['pd-2d']: 43 89 for xpart, xdefault, xlimits in [ 44 ('_pd', 0., limits),45 ('_pd_n', 35., (0, 1000)),46 ('_pd_nsigma', 3., (0, 10)),47 ]:90 ('_pd', 0., limits), 91 ('_pd_n', 35., (0, 1000)), 92 ('_pd_nsigma', 3., (0, 10)), 93 ]: 48 94 xname = name + xpart 49 95 xvalue = kwargs.pop(xname, xdefault) … … 51 97 52 98 pd_types = {} 53 for name in partype['pd-2d']:99 for name in model_info['partype']['pd-2d']: 54 100 xname = name + '_pd_type' 55 101 xvalue = kwargs.pop(xname, 'gaussian') … … 63 109 64 110 class Model(object): 111 """ 112 Bumps wrapper for a SAS model. 113 114 *model* is a runnable module as returned from :func:`core.load_model`. 115 116 *cutoff* is the polydispersity weight cutoff. 117 118 Any additional *key=value* pairs are model dependent parameters. 119 """ 65 120 def __init__(self, model, **kwargs): 66 121 self._sasmodel = model 67 122 pars, pd_types = create_parameters(model.info, **kwargs) 68 for k, v in pars.items():123 for k, v in pars.items(): 69 124 setattr(self, k, v) 70 for k, v in pd_types.items():125 for k, v in pd_types.items(): 71 126 setattr(self, k, v) 72 127 self._parameter_names = list(pars.keys()) … … 75 130 def parameters(self): 76 131 """ 77 Return a dictionary of parameters 132 Return a dictionary of parameters objects for the parameters, 133 excluding polydispersity distribution type. 78 134 """ 79 135 return dict((k, getattr(self, k)) for k in self._parameter_names) 80 136 81 137 def state(self): 138 """ 139 Return a dictionary of current values for all the parameters, 140 including polydispersity distribution type. 141 """ 82 142 pars = dict((k, getattr(self, k).value) for k in self._parameter_names) 83 143 pars.update((k, getattr(self, k)) for k in self._pd_type_names) … … 85 145 86 146 class Experiment(DataMixin): 87 """ 88 Return a bumps wrapper for a SAS model. 89 90 *data* is the data to be fitted. 91 92 *model* is the SAS model from :func:`core.load_model`. 147 r""" 148 Bumps wrapper for a SAS experiment. 149 150 *data* is a :class:`data.Data1D`, :class:`data.Data2D` or 151 :class:`data.Sesans` object. Use :func:`data.empty_data1D` or 152 :func:`data.empty_data2D` to define $q, \Delta q$ calculation 153 points for displaying the SANS curve when there is no measured data. 154 155 *model* is a :class:`Model` object. 93 156 94 157 *cutoff* is the integration cutoff, which avoids computing the 95 158 the SAS model where the polydispersity weight is low. 96 159 97 Model parameters can be initialized with additional keyword 98 arguments, or by assigning to model.parameter_name.value. 99 100 The resulting bumps model can be used directly in a FitProblem call. 160 The resulting model can be used directly in a Bumps FitProblem call. 101 161 """ 102 162 def __init__(self, data, model, cutoff=1e-5): … … 109 169 110 170 def update(self): 171 """ 172 Call when model parameters have changed and theory needs to be 173 recalculated. 174 """ 111 175 self._cache = {} 112 176 113 177 def numpoints(self): 114 178 """ 115 Return the number ofpoints179 Return the number of data points 116 180 """ 117 181 return len(self.Iq) … … 124 188 125 189 def theory(self): 190 """ 191 Return the theory corresponding to the model parameters. 192 193 This method uses lazy evaluation, and requires model.update() to be 194 called when the parameters have changed. 195 """ 126 196 if 'theory' not in self._cache: 127 197 pars = self.model.state() … … 130 200 131 201 def residuals(self): 202 """ 203 Return theory minus data normalized by uncertainty. 204 """ 132 205 #if np.any(self.err ==0): print("zeros in err") 133 206 return (self.theory() - self.Iq) / self.dIq 134 207 135 208 def nllf(self): 209 """ 210 Return the negative log likelihood of seeing data given the model 211 parameters, up to a normalizing constant which depends on the data 212 uncertainty. 213 """ 136 214 delta = self.residuals() 137 215 #if np.any(np.isnan(R)): print("NaN in residuals") … … 149 227 150 228 def simulate_data(self, noise=None): 229 """ 230 Generate simulated data. 231 """ 151 232 Iq = self.theory() 152 233 self._set_data(Iq, noise) 153 234 154 235 def save(self, basename): 236 """ 237 Save the model parameters and data into a file. 238 239 Not Implemented. 240 """ 155 241 pass 156 242 -
sasmodels/compare.py
r608e31e rd15a908 72 72 # doc string so that we can display it at run time if there is an error. 73 73 # lin 74 __doc__ = __doc__ + """ 74 __doc__ = (__doc__ # pylint: disable=redefined-builtin 75 + """ 75 76 Program description 76 77 ------------------- 77 78 78 """ + USAGE 79 """ 80 + USAGE) 79 81 80 82 … … 281 283 theory = lambda: smearer.get_value() 282 284 else: 283 theory = lambda: model.evalDistribution([data.qx_data[index], data.qy_data[index]]) 285 theory = lambda: model.evalDistribution([data.qx_data[index], 286 data.qy_data[index]]) 284 287 elif smearer is not None: 285 288 theory = lambda: smearer(model.evalDistribution(data.x)) … … 416 419 try: 417 420 base_value, base_time = time_calculation(base, pars, Nbase) 418 print("%s t=%.1f ms, intensity=%.0f"%(base.engine, base_time, sum(base_value))) 421 print("%s t=%.1f ms, intensity=%.0f" 422 % (base.engine, base_time, sum(base_value))) 419 423 except ImportError: 420 424 traceback.print_exc() … … 426 430 try: 427 431 comp_value, comp_time = time_calculation(comp, pars, Ncomp) 428 print("%s t=%.1f ms, intensity=%.0f"%(comp.engine, comp_time, sum(comp_value))) 432 print("%s t=%.1f ms, intensity=%.0f" 433 % (comp.engine, comp_time, sum(comp_value))) 429 434 except ImportError: 430 435 traceback.print_exc() … … 433 438 # Compare, but only if computing both forms 434 439 if Nbase > 0 and Ncomp > 0: 435 #print("speedup %.2g"%(comp_time/base_time))436 #print("max |base/comp|", max(abs(base_value/comp_value)), "%.15g"%max(abs(base_value)), "%.15g"%max(abs(comp_value)))437 #comp *= max(base_value/comp_value)438 440 resid = (base_value - comp_value) 439 441 relerr = resid/comp_value 440 _print_stats("|%s-%s|"%(base.engine, comp.engine) + (" "*(3+len(comp.engine))), 442 _print_stats("|%s-%s|" 443 % (base.engine, comp.engine) + (" "*(3+len(comp.engine))), 441 444 resid) 442 _print_stats("|(%s-%s)/%s|"%(base.engine, comp.engine, comp.engine), 445 _print_stats("|(%s-%s)/%s|" 446 % (base.engine, comp.engine, comp.engine), 443 447 relerr) 444 448 … … 591 595 invalid = [o[1:] for o in flags 592 596 if o[1:] not in NAME_OPTIONS 593 597 and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)] 594 598 if invalid: 595 599 print("Invalid options: %s"%(", ".join(invalid))) … … 597 601 598 602 603 # pylint: disable=bad-whitespace 599 604 # Interpret the flags 600 605 opts = { … … 651 656 elif arg == '-sasview': engines.append(arg[1:]) 652 657 elif arg == '-edit': opts['explore'] = True 658 # pylint: enable=bad-whitespace 653 659 654 660 if len(engines) == 0: … … 675 681 presets = {} 676 682 for arg in values: 677 k, v = arg.split('=',1)683 k, v = arg.split('=', 1) 678 684 if k not in pars: 679 685 # extract base name without polydispersity info 680 686 s = set(p.split('_pd')[0] for p in pars) 681 print("%r invalid; parameters are: %s"%(k, ", ".join(sorted(s))))687 print("%r invalid; parameters are: %s"%(k, ", ".join(sorted(s)))) 682 688 sys.exit(1) 683 689 presets[k] = float(v) if not k.endswith('type') else v … … 697 703 698 704 # Create the computational engines 699 data, _ index= make_data(opts)705 data, _ = make_data(opts) 700 706 if n1: 701 707 base = make_engine(model_definition, data, engines[0], opts['cutoff']) … … 707 713 comp = None 708 714 715 # pylint: disable=bad-whitespace 709 716 # Remember it all 710 717 opts.update({ … … 718 725 'engines' : [base, comp], 719 726 }) 727 # pylint: enable=bad-whitespace 720 728 721 729 return opts 722 730 723 def main():724 opts = parse_opts()725 if opts['explore']:726 explore(opts)727 else:728 compare(opts)729 730 731 def explore(opts): 732 """ 733 Explore the model using the Bumps GUI. 734 """ 731 735 import wx 732 736 from bumps.names import FitProblem … … 734 738 735 739 problem = FitProblem(Explore(opts)) 736 is Mac = "cocoa" in wx.version()740 is_mac = "cocoa" in wx.version() 737 741 app = wx.App() 738 742 frame = AppFrame(parent=None, title="explore") 739 if not is Mac: frame.Show()743 if not is_mac: frame.Show() 740 744 frame.panel.set_model(model=problem) 741 745 frame.panel.Layout() 742 746 frame.panel.aui.Split(0, wx.TOP) 743 if is Mac: frame.Show()747 if is_mac: frame.Show() 744 748 app.MainLoop() 745 749 746 750 class Explore(object): 747 751 """ 748 Return a bumps wrapper for a SAS model comparison. 752 Bumps wrapper for a SAS model comparison. 753 754 The resulting object can be used as a Bumps fit problem so that 755 parameters can be adjusted in the GUI, with plots updated on the fly. 749 756 """ 750 757 def __init__(self, opts): … … 787 794 Return cost. 788 795 """ 796 # pylint: disable=no-self-use 789 797 return 0. # No nllf 790 798 … … 804 812 805 813 814 def main(): 815 """ 816 Main program. 817 """ 818 opts = parse_opts() 819 if opts['explore']: 820 explore(opts) 821 else: 822 compare(opts) 823 806 824 if __name__ == "__main__": 807 825 main() -
sasmodels/compare_many.py
r6458608 rd15a908 1 1 #!/usr/bin/env python 2 """ 3 Program to compare results from many random parameter sets for a given model. 4 5 The result is a comma separated value (CSV) table that can be redirected 6 from standard output into a file and loaded into a spreadsheet. 7 8 The models are compared for each parameter set and if the difference is 9 greater than expected for that precision, the parameter set is labeled 10 as bad and written to the output, along with the random seed used to 11 generate that parameter value. This seed can be used with :mod:`compare` 12 to reload and display the details of the model. 13 """ 2 14 from __future__ import print_function 3 15 … … 14 26 15 27 def calc_stats(target, value, index): 28 """ 29 Calculate statistics between the target value and the computed value. 30 31 *target* and *value* are the vectors being compared, with the 32 difference normalized by *target* to get relative error. Only 33 the elements listed in *index* are used, though index may be 34 and empty slice defined by *slice(None, None)*. 35 36 Returns: 37 38 *maxrel* the maximum relative difference 39 40 *rel95* the relative difference with the 5% biggest differences ignored 41 42 *maxabs* the maximum absolute difference for the 5% biggest differences 43 44 *maxval* the maximum value for the 5% biggest differences 45 """ 16 46 resid = abs(value-target)[index] 17 47 relerr = resid/target[index] 18 s rel= np.argsort(relerr)48 sorted_rel_index = np.argsort(relerr) 19 49 #p90 = int(len(relerr)*0.90) 20 50 p95 = int(len(relerr)*0.95) 21 51 maxrel = np.max(relerr) 22 rel95 = relerr[s rel[p95]]23 maxabs = np.max(resid[s rel[p95:]])24 maxval = np.max(value[s rel[p95:]])25 return maxrel, rel95,maxabs,maxval52 rel95 = relerr[sorted_rel_index[p95]] 53 maxabs = np.max(resid[sorted_rel_index[p95:]]) 54 maxval = np.max(value[sorted_rel_index[p95:]]) 55 return maxrel, rel95, maxabs, maxval 26 56 27 57 def print_column_headers(pars, parts): 58 """ 59 Generate column headers for the differences and for the parameters, 60 and print them to standard output. 61 """ 28 62 stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|')) 29 63 groups = [''] … … 36 70 print(','.join('"%s"'%c for c in columns)) 37 71 72 # Target 'good' value for various precision levels. 38 73 PRECISION = { 39 74 'fast': 1e-3, … … 48 83 def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5, 49 84 base='sasview', comp='double'): 50 is2D = hasattr(data, 'qx_data') 85 r""" 86 Compare the model under different calculation engines. 87 88 *name* is the name of the model. 89 90 *data* is the data object giving $q, \Delta q$ calculation points. 91 92 *index* is the active set of points. 93 94 *N* is the number of comparisons to make. 95 96 *cutoff* is the polydispersity weight cutoff to make the calculation 97 a little bit faster. 98 99 *base* and *comp* are the names of the calculation engines to compare. 100 """ 101 102 is_2d = hasattr(data, 'qx_data') 51 103 model_definition = core.load_model_definition(name) 52 104 pars = get_demo_pars(model_definition) 53 105 header = ('\n"Model","%s","Count","%d","Dimension","%s"' 54 % (name, N, "2D" if is 2Delse "1D"))106 % (name, N, "2D" if is_2d else "1D")) 55 107 if not mono: header += ',"Cutoff",%g'%(cutoff,) 56 108 print(header) 57 109 58 if is 2D:110 if is_2d: 59 111 info = generate.make_info(model_definition) 60 112 partype = info['partype'] … … 69 121 # declarations are not available in python 2.7. 70 122 def try_model(fn, pars): 123 """ 124 Return the model evaluated at *pars*. If there is an exception, 125 print it and return NaN of the right shape. 126 """ 71 127 try: 72 128 result = fn(**pars) … … 82 138 return result 83 139 def check_model(pars): 140 """ 141 Run the two calculators against *pars*, returning statistics 142 on the differences. See :func:`calc_stats` for the list of stats. 143 """ 84 144 base_value = try_model(calc_base, pars) 85 145 comp_value = try_model(calc_comp, pars) … … 108 168 good = [True] 109 169 columns = check_model(pars_i) 110 columns += [v for _, v in sorted(pars_i.items())]170 columns += [v for _, v in sorted(pars_i.items())] 111 171 if first: 112 172 labels = [" vs. ".join((calc_base.engine, calc_comp.engine))] … … 121 181 122 182 def print_usage(): 183 """ 184 Print the command usage string. 185 """ 123 186 print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)") 124 187 125 188 126 189 def print_models(): 190 """ 191 Print the list of available models in columns. 192 """ 127 193 print(columnize(MODELS, indent=" ")) 128 194 129 195 130 196 def print_help(): 197 """ 198 Print usage string, the option description and the list of available models. 199 """ 131 200 print_usage() 132 201 print("""\ … … 158 227 159 228 def main(): 160 if len(sys.argv) not in (6,7): 229 """ 230 Main program. 231 """ 232 if len(sys.argv) not in (6, 7): 161 233 print_help() 162 234 sys.exit(1) … … 182 254 183 255 data, index = make_data({'qmax':1.0, 'is2d':is2D, 'nq':Nq, 'res':0., 184 256 'accuracy': 'Low', 'view':'log'}) 185 257 model_list = [model] if model != "all" else MODELS 186 258 for model in model_list: -
sasmodels/convert.py
r29da213 rd15a908 49 49 """ 50 50 return dict((p, (v*1e6 if p.endswith('sld') else v)) 51 for p, v in pars.items())51 for p, v in pars.items()) 52 52 53 53 def convert_model(name, pars): … … 55 55 Convert model from old style parameter names to new style. 56 56 """ 57 _, _ = name,pars # lint57 _, _ = name, pars # lint 58 58 raise NotImplementedError 59 59 # need to load all new models in order to determine old=>new … … 67 67 """ 68 68 return dict((p, (v*1e-6 if p.endswith('sld') else v)) 69 for p, v in pars.items())69 for p, v in pars.items()) 70 70 71 71 def _remove_pd(pars, key, name): -
sasmodels/core.py
rcde11f0f rd15a908 2 2 Core model handling routines. 3 3 """ 4 __all__ = ["list_models", "load_model_definition", "precompile_dll", 5 "load_model", "make_kernel", "call_kernel", "call_ER", "call_VR" ] 4 __all__ = [ 5 "list_models", "load_model_definition", "precompile_dll", 6 "load_model", "make_kernel", "call_kernel", "call_ER", "call_VR", 7 ] 6 8 7 9 from os.path import basename, dirname, join as joinpath … … 61 63 62 64 def isstr(s): 65 """ 66 Return True if *s* is a string-like object. 67 """ 63 68 try: s + '' 64 69 except: return False … … 99 104 # source = open(info['name']+'.cl','r').read() 100 105 101 if (platform =="dll"106 if (platform == "dll" 102 107 or not HAVE_OPENCL 103 108 or not kernelcl.environment().has_type(dtype)): … … 128 133 width = pars.get(name+'_pd', 0.0) 129 134 nsigma = pars.get(name+'_pd_nsigma', 3.0) 130 value, weight = weights.get_weights(135 value, weight = weights.get_weights( 131 136 disperser, npts, width, nsigma, value, limits, relative) 132 137 return value, weight / np.sum(weight) … … 195 200 for name in info['partype']['volume']] 196 201 value, weight = dispersion_mesh(vol_pars) 197 whole, part = VR(*value)202 whole, part = VR(*value) 198 203 return np.sum(weight*part)/np.sum(weight*whole) 199 204 -
sasmodels/data.py
r013adb7 rd15a908 284 284 mdata[mdata <= 0] = masked 285 285 plt.errorbar(data.x/10, scale*mdata, yerr=data.dy, fmt='.') 286 all_positive = all_positive and (mdata >0).all()286 all_positive = all_positive and (mdata > 0).all() 287 287 some_present = some_present or (mdata.count() > 0) 288 288 … … 292 292 mtheory[~np.isfinite(mtheory)] = masked 293 293 if view is 'log': 294 mtheory[mtheory <=0] = masked294 mtheory[mtheory <= 0] = masked 295 295 plt.plot(data.x/10, scale*mtheory, '-', hold=True) 296 all_positive = all_positive and (mtheory >0).all()296 all_positive = all_positive and (mtheory > 0).all() 297 297 some_present = some_present or (mtheory.count() > 0) 298 298 … … 396 396 plt.title('theory') 397 397 h = plt.colorbar() 398 h.set_label(r'$\log_{10}I(q)$' if view =='log'398 h.set_label(r'$\log_{10}I(q)$' if view == 'log' 399 399 else r'$q^4 I(q)$' if view == 'q4' 400 400 else '$I(q)$') … … 411 411 plt.title('residuals') 412 412 h = plt.colorbar() 413 h.set_label( '$\Delta I(q)$')413 h.set_label(r'$\Delta I(q)$') 414 414 415 415 -
sasmodels/resolution.py
r5258859 rfdc538a 135 135 """ 136 136 #print("apply shapes", theory.shape, weight_matrix.shape) 137 Iq = np.dot(theory[None, :], weight_matrix)137 Iq = np.dot(theory[None, :], weight_matrix) 138 138 #print("result shape",Iq.shape) 139 139 return Iq.flatten() … … 153 153 # neither trapezoid nor Simpson's rule improved the accuracy. 154 154 edges = bin_edges(q_calc) 155 edges[edges <0.0] = 0.0 # clip edges below zero156 G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:])155 edges[edges < 0.0] = 0.0 # clip edges below zero 156 G = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 157 157 weights = G[1:] - G[:-1] 158 weights /= np.sum(weights, axis=0)[None, :]158 weights /= np.sum(weights, axis=0)[None, :] 159 159 return weights 160 160 … … 287 287 # The current algorithm is a midpoint rectangle rule. 288 288 q_edges = bin_edges(q_calc) # Note: requires q > 0 289 q_edges[q_edges <0.0] = 0.0 # clip edges below zero289 q_edges[q_edges < 0.0] = 0.0 # clip edges below zero 290 290 weights = np.zeros((len(q), len(q_calc)), 'd') 291 291 … … 306 306 #print(qi - h, qi + h) 307 307 #print(in_x + abs_x) 308 weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)308 weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h) 309 309 else: 310 310 L = n_height 311 311 for k in range(-L, L+1): 312 weights[i, :] += _q_perp_weights(q_edges, qi+k*h/L, w)313 weights[i, :] /= 2*L + 1312 weights[i, :] += _q_perp_weights(q_edges, qi+k*h/L, w) 313 weights[i, :] /= 2*L + 1 314 314 315 315 return weights.T … … 358 358 log-scaled data. 359 359 """ 360 if len(x) < 2 or (np.diff(x) <0).any():360 if len(x) < 2 or (np.diff(x) < 0).any(): 361 361 raise ValueError("Expected bins to be an increasing set") 362 362 edges = np.hstack([ … … 373 373 """ 374 374 step = np.diff(q) 375 index = step >max_step375 index = step > max_step 376 376 if np.any(index): 377 377 inserts = [] 378 for q_i, step_i in zip(q[:-1][index],step[index]):378 for q_i, step_i in zip(q[:-1][index], step[index]): 379 379 n = np.ceil(step_i/max_step) 380 inserts.extend(q_i + np.arange(1, n)*(step_i/n))380 inserts.extend(q_i + np.arange(1, n)*(step_i/n)) 381 381 # Extend a couple of fringes beyond the end of the data 382 inserts.extend(q[-1] + np.arange(1, 8)*max_step)383 q_calc = np.sort(np.hstack((q, inserts)))382 inserts.extend(q[-1] + np.arange(1, 8)*max_step) 383 q_calc = np.sort(np.hstack((q, inserts))) 384 384 else: 385 385 q_calc = q … … 399 399 if q_min < q[0]: 400 400 if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 401 n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] >q[0] else 15401 n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15 402 402 q_low = np.linspace(q_min, q[0], n_low+1)[:-1] 403 403 else: 404 404 q_low = [] 405 405 if q_max > q[-1]: 406 n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] >q[-2] else 15406 n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15 407 407 q_high = np.linspace(q[-1], q_max, n_high+1)[1:] 408 408 else: … … 452 452 if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 453 453 n_low = log_delta_q * (log(q[0])-log(q_min)) 454 q_low 454 q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1] 455 455 else: 456 456 q_low = [] … … 489 489 that make it slow to evaluate but give it good accuracy. 490 490 """ 491 from scipy.integrate import romberg , dblquad491 from scipy.integrate import romberg 492 492 493 493 if any(k not in form.info['defaults'] for k in pars.keys()): … … 520 520 result[i] = r/(2*h) 521 521 else: 522 w_grid = np.linspace(0, w, 21)[None, :]523 h_grid = np.linspace(-h, h, 23)[:, None]522 w_grid = np.linspace(0, w, 21)[None, :] 523 h_grid = np.linspace(-h, h, 23)[:, None] 524 524 u = sqrt((qi+h_grid)**2 + w_grid**2) 525 525 Iu = np.interp(u, q_calc, Iq) 526 526 #print(np.trapz(Iu, w_grid, axis=1)) 527 Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:, 0])527 Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:, 0]) 528 528 result[i] = Is / (2*h*w) 529 """ 530 r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w, 531 args=(qi,)) 532 result[i] = r/(w*2*h) 533 """ 529 # from scipy.integrate import dblquad 530 # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w, 531 # args=(qi,)) 532 # result[i] = r/(w*2*h) 534 533 535 534 # r should be [float, ...], but it is [array([float]), array([float]),...] … … 553 552 554 553 _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq) 555 r = [romberg(_fn, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi, args=(qi, dqi),556 divmax=100, vec_func=True, tol=0, rtol=1e-8)557 for qi, dqi in zip(q,q_width)]554 r = [romberg(_fn, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi, 555 args=(qi, dqi), divmax=100, vec_func=True, tol=0, rtol=1e-8) 556 for qi, dqi in zip(q, q_width)] 558 557 return np.asarray(r).flatten() 559 558 … … 595 594 theory = self.Iq(resolution.q_calc) 596 595 output = resolution.apply(theory) 597 answer = [ 9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 598 5.5555, 4.5584, 3.5606, 2.5623, 2.0000 ] 596 answer = [ 597 9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 598 5.5555, 4.5584, 3.5606, 2.5623, 2.0000, 599 ] 599 600 np.testing.assert_allclose(output, answer, atol=1e-4) 600 601 … … 608 609 theory = 1000*self.Iq(resolution.q_calc**4) 609 610 output = resolution.apply(theory) 610 answer = [ 8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 611 5.40363, 4.40655, 3.40880, 2.41058, 2.00000 ] 611 answer = [ 612 8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 613 5.40363, 4.40655, 3.40880, 2.41058, 2.00000, 614 ] 612 615 np.testing.assert_allclose(output, answer, atol=1e-4) 613 616 … … 620 623 theory = self.Iq(resolution.q_calc) 621 624 output = resolution.apply(theory) 622 answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 625 answer = [ 626 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 627 ] 623 628 np.testing.assert_allclose(output, answer, atol=1e-4) 624 629 … … 632 637 theory = self.Iq(resolution.q_calc) 633 638 output = resolution.apply(theory) 634 answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 639 answer = [ 640 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 641 ] 635 642 np.testing.assert_allclose(output, answer, atol=1e-4) 636 643 … … 652 659 theory = 12.0-1000.0*resolution.q_calc 653 660 output = resolution.apply(theory) 654 answer = [ 10.44785079, 9.84991299, 8.98101708, 655 7.99906585, 6.99998311, 6.00001689, 656 5.00093415, 4.01898292, 3.15008701, 2.55214921] 661 answer = [ 662 10.44785079, 9.84991299, 8.98101708, 663 7.99906585, 6.99998311, 6.00001689, 664 5.00093415, 4.01898292, 3.15008701, 2.55214921, 665 ] 657 666 np.testing.assert_allclose(output, answer, atol=1e-8) 658 667 … … 783 792 } 784 793 form = load_model('ellipsoid', dtype='double') 785 q = np.logspace(log10(4e-5), log10(2.5e-2), 68)794 q = np.logspace(log10(4e-5), log10(2.5e-2), 68) 786 795 width, height = 0.117, 0. 787 796 resolution = Slit1D(q, width=width, height=height) … … 1071 1080 #h = 0.0277790 1072 1081 resolution = Slit1D(q, w, h) 1073 _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))1082 _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h)) 1074 1083 1075 1084 def demo(): -
sasmodels/sesans.py
r3c56da87 r384d114 12 12 import numpy as np 13 13 from numpy import pi, exp 14 15 14 from scipy.special import jv as besselj 16 15 17 def make_q(q_zmax, Rmax): 16 def make_q(q_max, Rmax): 17 r""" 18 Return a $q$ vector suitable for SESANS covering from $2\pi/ (10 R_{\max})$ 19 to $q_max$. 20 """ 18 21 q_min = dq = 0.1 * 2*pi / Rmax 19 #q_min = 0.00003 20 return np.arange(q_min, q_zmax, dq) 21 22 # TODO: dead code; for now the call to the hankel transform happens in BumpsModel 23 class SesansCalculator: 24 def __init__(self, kernel, q_zmax, Rmax, SElength, wavelength, thickness): 25 self._set_kernel(kernel, q_zmax, Rmax) 26 self.SElength = SElength 27 self.wavelength = wavelength 28 self.thickness = thickness 29 30 def _set_kernel(self, kernel, q_zmax, Rmax): 31 kernel_input = kernel.make_input([make_q(q_zmax, Rmax)]) 32 self.sans_calculator = kernel(kernel_input) 33 34 def __call__(self, pars, pd_pars, cutoff=1e-5): 35 Iq = self.sans_calculator(pars, pd_pars, cutoff) 36 P = hankel(self.SElength, self.wavelength, self.thickness, self.q, Iq) 37 self.Iq = Iq 38 return P 22 return np.arange(q_min, q_max, dq) 39 23 40 24 def hankel(SElength, wavelength, thickness, q, Iq): 41 """25 r""" 42 26 Compute the expected SESANS polarization for a given SANS pattern. 43 27 44 Uses the hankel transform followed by the exponential. The values 45 for zz (or spin echo length, or delta), wavelength and sample thickness46 information should come from the dataset. *q* should be chosen such47 that the oscillations in *I(q)* are well sampled (e.g., 5*2*pi/d_max).28 Uses the hankel transform followed by the exponential. The values for *zz* 29 (or spin echo length, or delta), wavelength and sample thickness should 30 come from the dataset. $q$ should be chosen such that the oscillations 31 in $I(q)$ are well sampled (e.g., $5 \cdot 2 \pi/d_{\max}$). 48 32 49 *SElength* [A] is the set of z points at which to compute the hankel transform 33 *SElength* [A] is the set of $z$ points at which to compute the 34 Hankel transform 50 35 51 36 *wavelength* [m] is the wavelength of each individual point *zz* … … 53 38 *thickness* [cm] is the sample thickness. 54 39 55 *q* [A ^{-1}] is the set of q points at which the model has been computed.56 These should be equally spaced.40 *q* [A$^{-1}$] is the set of $q$ points at which the model has been 41 computed. These should be equally spaced. 57 42 58 *I* [cm ^{-1}] is the value of the SANS model at *q*43 *I* [cm$^{-1}$] is the value of the SANS model at *q* 59 44 """ 60 45 G = np.zeros(len(SElength), 'd') 61 46 for i in range(len(SElength)): 62 integr = besselj(0, q*SElength[i])*Iq*q47 integr = besselj(0, q*SElength[i])*Iq*q 63 48 G[i] = np.sum(integr) 64 dq=(q[1]-q[0])*1e10 # [m^-1] step size in q, needed for integration 65 G *= dq*1e10*2*pi # integr step, conver q into [m**-1] and 2 pi circle integr 49 50 # [m^-1] step size in q, needed for integration 51 dq=(q[1]-q[0])*1e10 52 53 # integration step, convert q into [m**-1] and 2 pi circle integration 54 G *= dq*1e10*2*pi 55 66 56 P = exp(thickness*wavelength**2/(4*pi**2)*(G-G[0])) 67 57
Note: See TracChangeset
for help on using the changeset viewer.