Changes in / [bf6631a:f94d8a2] in sasmodels
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
extra/pylint.rc
r37a7252 r5ef0633 6 6 # Python code to execute, usually for sys.path manipulation such as 7 7 # pygtk.require(). 8 #init-hook= 'import os, sys; sys.path.extend([os.getcwd(),os.path.join(os.getcwd(),"extra")])'8 #init-hook= 9 9 10 10 # Profiled execution. -
sasmodels/__init__.py
r37a7252 r9890053 1 """2 sasmodels3 =========4 5 **sasmodels** is a package containing models for small angle neutron and xray6 scattering. Models supported are the one dimensional circular average and7 two dimensional oriented patterns. As well as the form factor calculations8 for the individual shapes **sasmodels** also provides automatic shape9 polydispersity, angular dispersion and resolution convolution. SESANS10 patterns can be computed for any model.11 12 Models can be written in python or in C. C models can run on the GPU if13 OpenCL drivers are available. See :mod:`generate` for details on14 defining new models.15 """16 17 1 __version__ = "0.9" -
sasmodels/bumps_model.py
r37a7252 rec7e360 6 6 arguments to set the initial value for each parameter. 7 7 8 :class:`Experiment` combines the *Model* function with a data file loaded by 9 thesasview data loader. *Experiment* takes a *cutoff* parameter controlling8 :class:`Experiment` combines the *Model* function with a data file loaded by the 9 sasview data loader. *Experiment* takes a *cutoff* parameter controlling 10 10 how far the polydispersity integral extends. 11 11 12 12 """ 13 14 __all__ = [15 "Model", "Experiment",16 ]17 13 18 14 import warnings … … 24 20 25 21 # CRUFT: old style bumps wrapper which doesn't separate data and model 26 # pylint: disable=invalid-name27 22 def BumpsModel(data, model, cutoff=1e-5, **kw): 28 r"""29 Bind a model to data, along with a polydispersity cutoff.30 31 *data* is a :class:`data.Data1D`, :class:`data.Data2D` or32 :class:`data.Sesans` object. Use :func:`data.empty_data1D` or33 :func:`data.empty_data2D` to define $q, \Delta q$ calculation34 points for displaying the SANS curve when there is no measured data.35 36 *model* is a runnable module as returned from :func:`core.load_model`.37 38 *cutoff* is the polydispersity weight cutoff.39 40 Any additional *key=value* pairs are model dependent parameters.41 42 Returns an :class:`Experiment` object.43 44 Note that the usual Bumps semantics is not fully supported, since45 assigning *M.name = parameter* on the returned experiment object46 does not set that parameter in the model. Range setting will still47 work as expected though.48 49 .. deprecated:: 0.150 Use :class:`Experiment` instead.51 """52 23 warnings.warn("Use of BumpsModel is deprecated. Use bumps_model.Experiment instead.") 53 54 # Create the model and experiment55 24 model = Model(model, **kw) 56 25 experiment = Experiment(data=data, model=model, cutoff=cutoff) 57 58 # Copy the model parameters up to the experiment object. 59 for k, v in model.parameters().items(): 60 setattr(experiment, k, v) 26 for k in model._parameter_names: 27 setattr(experiment, k, getattr(model, k)) 61 28 return experiment 62 29 63 64 30 def create_parameters(model_info, **kwargs): 65 """66 Generate Bumps parameters from the model info.67 68 *model_info* is returned from :func:`generate.model_info` on the69 model definition module.70 71 Any additional *key=value* pairs are initial values for the parameters72 to the models. Uninitialized parameters will use the model default73 value.74 75 Returns a dictionary of *{name: Parameter}* containing the bumps76 parameters for each model parameter, and a dictionary of77 *{name: str}* containing the polydispersity distribution types.78 """79 31 # lazy import; this allows the doc builder and nosetests to run even 80 32 # when bumps is not on the path. 81 33 from bumps.names import Parameter 34 35 partype = model_info['partype'] 82 36 83 37 pars = {} … … 86 40 value = kwargs.pop(name, default) 87 41 pars[name] = Parameter.default(value, name=name, limits=limits) 88 for name in model_info['partype']['pd-2d']:42 for name in partype['pd-2d']: 89 43 for xpart, xdefault, xlimits in [ 90 91 92 93 44 ('_pd', 0., limits), 45 ('_pd_n', 35., (0, 1000)), 46 ('_pd_nsigma', 3., (0, 10)), 47 ]: 94 48 xname = name + xpart 95 49 xvalue = kwargs.pop(xname, xdefault) … … 97 51 98 52 pd_types = {} 99 for name in model_info['partype']['pd-2d']:53 for name in partype['pd-2d']: 100 54 xname = name + '_pd_type' 101 55 xvalue = kwargs.pop(xname, 'gaussian') … … 109 63 110 64 class Model(object): 111 """112 Bumps wrapper for a SAS model.113 114 *model* is a runnable module as returned from :func:`core.load_model`.115 116 *cutoff* is the polydispersity weight cutoff.117 118 Any additional *key=value* pairs are model dependent parameters.119 """120 65 def __init__(self, model, **kwargs): 121 66 self._sasmodel = model 122 67 pars, pd_types = create_parameters(model.info, **kwargs) 123 for k, 68 for k,v in pars.items(): 124 69 setattr(self, k, v) 125 for k, 70 for k,v in pd_types.items(): 126 71 setattr(self, k, v) 127 72 self._parameter_names = list(pars.keys()) … … 130 75 def parameters(self): 131 76 """ 132 Return a dictionary of parameters objects for the parameters, 133 excluding polydispersity distribution type. 77 Return a dictionary of parameters 134 78 """ 135 79 return dict((k, getattr(self, k)) for k in self._parameter_names) 136 80 137 81 def state(self): 138 """139 Return a dictionary of current values for all the parameters,140 including polydispersity distribution type.141 """142 82 pars = dict((k, getattr(self, k).value) for k in self._parameter_names) 143 83 pars.update((k, getattr(self, k)) for k in self._pd_type_names) … … 145 85 146 86 class Experiment(DataMixin): 147 r"""148 Bumps wrapper for a SAS experiment.87 """ 88 Return a bumps wrapper for a SAS model. 149 89 150 *data* is a :class:`data.Data1D`, :class:`data.Data2D` or 151 :class:`data.Sesans` object. Use :func:`data.empty_data1D` or 152 :func:`data.empty_data2D` to define $q, \Delta q$ calculation 153 points for displaying the SANS curve when there is no measured data. 90 *data* is the data to be fitted. 154 91 155 *model* is a :class:`Model` object.92 *model* is the SAS model from :func:`core.load_model`. 156 93 157 94 *cutoff* is the integration cutoff, which avoids computing the 158 95 the SAS model where the polydispersity weight is low. 159 96 160 The resulting model can be used directly in a Bumps FitProblem call. 97 Model parameters can be initialized with additional keyword 98 arguments, or by assigning to model.parameter_name.value. 99 100 The resulting bumps model can be used directly in a FitProblem call. 161 101 """ 162 102 def __init__(self, data, model, cutoff=1e-5): … … 169 109 170 110 def update(self): 171 """172 Call when model parameters have changed and theory needs to be173 recalculated.174 """175 111 self._cache = {} 176 112 177 113 def numpoints(self): 178 114 """ 179 Return the number of datapoints115 Return the number of points 180 116 """ 181 117 return len(self.Iq) … … 188 124 189 125 def theory(self): 190 """191 Return the theory corresponding to the model parameters.192 193 This method uses lazy evaluation, and requires model.update() to be194 called when the parameters have changed.195 """196 126 if 'theory' not in self._cache: 197 127 pars = self.model.state() … … 200 130 201 131 def residuals(self): 202 """203 Return theory minus data normalized by uncertainty.204 """205 132 #if np.any(self.err ==0): print("zeros in err") 206 133 return (self.theory() - self.Iq) / self.dIq 207 134 208 135 def nllf(self): 209 """210 Return the negative log likelihood of seeing data given the model211 parameters, up to a normalizing constant which depends on the data212 uncertainty.213 """214 136 delta = self.residuals() 215 137 #if np.any(np.isnan(R)): print("NaN in residuals") … … 227 149 228 150 def simulate_data(self, noise=None): 229 """230 Generate simulated data.231 """232 151 Iq = self.theory() 233 152 self._set_data(Iq, noise) 234 153 235 154 def save(self, basename): 236 """237 Save the model parameters and data into a file.238 239 Not Implemented.240 """241 155 pass 242 156 -
sasmodels/compare.py
rd15a908 r608e31e 72 72 # doc string so that we can display it at run time if there is an error. 73 73 # lin 74 __doc__ = (__doc__ # pylint: disable=redefined-builtin 75 + """ 74 __doc__ = __doc__ + """ 76 75 Program description 77 76 ------------------- 78 77 79 """ 80 + USAGE) 78 """ + USAGE 81 79 82 80 … … 283 281 theory = lambda: smearer.get_value() 284 282 else: 285 theory = lambda: model.evalDistribution([data.qx_data[index], 286 data.qy_data[index]]) 283 theory = lambda: model.evalDistribution([data.qx_data[index], data.qy_data[index]]) 287 284 elif smearer is not None: 288 285 theory = lambda: smearer(model.evalDistribution(data.x)) … … 419 416 try: 420 417 base_value, base_time = time_calculation(base, pars, Nbase) 421 print("%s t=%.1f ms, intensity=%.0f" 422 % (base.engine, base_time, sum(base_value))) 418 print("%s t=%.1f ms, intensity=%.0f"%(base.engine, base_time, sum(base_value))) 423 419 except ImportError: 424 420 traceback.print_exc() … … 430 426 try: 431 427 comp_value, comp_time = time_calculation(comp, pars, Ncomp) 432 print("%s t=%.1f ms, intensity=%.0f" 433 % (comp.engine, comp_time, sum(comp_value))) 428 print("%s t=%.1f ms, intensity=%.0f"%(comp.engine, comp_time, sum(comp_value))) 434 429 except ImportError: 435 430 traceback.print_exc() … … 438 433 # Compare, but only if computing both forms 439 434 if Nbase > 0 and Ncomp > 0: 435 #print("speedup %.2g"%(comp_time/base_time)) 436 #print("max |base/comp|", max(abs(base_value/comp_value)), "%.15g"%max(abs(base_value)), "%.15g"%max(abs(comp_value))) 437 #comp *= max(base_value/comp_value) 440 438 resid = (base_value - comp_value) 441 439 relerr = resid/comp_value 442 _print_stats("|%s-%s|" 443 % (base.engine, comp.engine) + (" "*(3+len(comp.engine))), 440 _print_stats("|%s-%s|"%(base.engine, comp.engine) + (" "*(3+len(comp.engine))), 444 441 resid) 445 _print_stats("|(%s-%s)/%s|" 446 % (base.engine, comp.engine, comp.engine), 442 _print_stats("|(%s-%s)/%s|"%(base.engine, comp.engine, comp.engine), 447 443 relerr) 448 444 … … 595 591 invalid = [o[1:] for o in flags 596 592 if o[1:] not in NAME_OPTIONS 597 and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)]593 and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)] 598 594 if invalid: 599 595 print("Invalid options: %s"%(", ".join(invalid))) … … 601 597 602 598 603 # pylint: disable=bad-whitespace604 599 # Interpret the flags 605 600 opts = { … … 656 651 elif arg == '-sasview': engines.append(arg[1:]) 657 652 elif arg == '-edit': opts['explore'] = True 658 # pylint: enable=bad-whitespace659 653 660 654 if len(engines) == 0: … … 681 675 presets = {} 682 676 for arg in values: 683 k, v = arg.split('=',1)677 k,v = arg.split('=',1) 684 678 if k not in pars: 685 679 # extract base name without polydispersity info 686 680 s = set(p.split('_pd')[0] for p in pars) 687 print("%r invalid; parameters are: %s"%(k, 681 print("%r invalid; parameters are: %s"%(k,", ".join(sorted(s)))) 688 682 sys.exit(1) 689 683 presets[k] = float(v) if not k.endswith('type') else v … … 703 697 704 698 # Create the computational engines 705 data, _ = make_data(opts)699 data, _index = make_data(opts) 706 700 if n1: 707 701 base = make_engine(model_definition, data, engines[0], opts['cutoff']) … … 713 707 comp = None 714 708 715 # pylint: disable=bad-whitespace716 709 # Remember it all 717 710 opts.update({ … … 725 718 'engines' : [base, comp], 726 719 }) 727 # pylint: enable=bad-whitespace728 720 729 721 return opts 730 722 723 def main(): 724 opts = parse_opts() 725 if opts['explore']: 726 explore(opts) 727 else: 728 compare(opts) 729 731 730 def explore(opts): 732 """733 Explore the model using the Bumps GUI.734 """735 731 import wx 736 732 from bumps.names import FitProblem … … 738 734 739 735 problem = FitProblem(Explore(opts)) 740 is _mac = "cocoa" in wx.version()736 isMac = "cocoa" in wx.version() 741 737 app = wx.App() 742 738 frame = AppFrame(parent=None, title="explore") 743 if not is _mac: frame.Show()739 if not isMac: frame.Show() 744 740 frame.panel.set_model(model=problem) 745 741 frame.panel.Layout() 746 742 frame.panel.aui.Split(0, wx.TOP) 747 if is _mac: frame.Show()743 if isMac: frame.Show() 748 744 app.MainLoop() 749 745 750 746 class Explore(object): 751 747 """ 752 Bumps wrapper for a SAS model comparison. 753 754 The resulting object can be used as a Bumps fit problem so that 755 parameters can be adjusted in the GUI, with plots updated on the fly. 748 Return a bumps wrapper for a SAS model comparison. 756 749 """ 757 750 def __init__(self, opts): … … 794 787 Return cost. 795 788 """ 796 # pylint: disable=no-self-use797 789 return 0. # No nllf 798 790 … … 812 804 813 805 814 def main():815 """816 Main program.817 """818 opts = parse_opts()819 if opts['explore']:820 explore(opts)821 else:822 compare(opts)823 824 806 if __name__ == "__main__": 825 807 main() -
sasmodels/compare_many.py
rd15a908 r6458608 1 1 #!/usr/bin/env python 2 """3 Program to compare results from many random parameter sets for a given model.4 5 The result is a comma separated value (CSV) table that can be redirected6 from standard output into a file and loaded into a spreadsheet.7 8 The models are compared for each parameter set and if the difference is9 greater than expected for that precision, the parameter set is labeled10 as bad and written to the output, along with the random seed used to11 generate that parameter value. This seed can be used with :mod:`compare`12 to reload and display the details of the model.13 """14 2 from __future__ import print_function 15 3 … … 26 14 27 15 def calc_stats(target, value, index): 28 """29 Calculate statistics between the target value and the computed value.30 31 *target* and *value* are the vectors being compared, with the32 difference normalized by *target* to get relative error. Only33 the elements listed in *index* are used, though index may be34 and empty slice defined by *slice(None, None)*.35 36 Returns:37 38 *maxrel* the maximum relative difference39 40 *rel95* the relative difference with the 5% biggest differences ignored41 42 *maxabs* the maximum absolute difference for the 5% biggest differences43 44 *maxval* the maximum value for the 5% biggest differences45 """46 16 resid = abs(value-target)[index] 47 17 relerr = resid/target[index] 48 s orted_rel_index= np.argsort(relerr)18 srel = np.argsort(relerr) 49 19 #p90 = int(len(relerr)*0.90) 50 20 p95 = int(len(relerr)*0.95) 51 21 maxrel = np.max(relerr) 52 rel95 = relerr[s orted_rel_index[p95]]53 maxabs = np.max(resid[s orted_rel_index[p95:]])54 maxval = np.max(value[s orted_rel_index[p95:]])55 return maxrel, rel95, maxabs,maxval22 rel95 = relerr[srel[p95]] 23 maxabs = np.max(resid[srel[p95:]]) 24 maxval = np.max(value[srel[p95:]]) 25 return maxrel,rel95,maxabs,maxval 56 26 57 27 def print_column_headers(pars, parts): 58 """59 Generate column headers for the differences and for the parameters,60 and print them to standard output.61 """62 28 stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|')) 63 29 groups = [''] … … 70 36 print(','.join('"%s"'%c for c in columns)) 71 37 72 # Target 'good' value for various precision levels.73 38 PRECISION = { 74 39 'fast': 1e-3, … … 83 48 def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5, 84 49 base='sasview', comp='double'): 85 r""" 86 Compare the model under different calculation engines. 87 88 *name* is the name of the model. 89 90 *data* is the data object giving $q, \Delta q$ calculation points. 91 92 *index* is the active set of points. 93 94 *N* is the number of comparisons to make. 95 96 *cutoff* is the polydispersity weight cutoff to make the calculation 97 a little bit faster. 98 99 *base* and *comp* are the names of the calculation engines to compare. 100 """ 101 102 is_2d = hasattr(data, 'qx_data') 50 is2D = hasattr(data, 'qx_data') 103 51 model_definition = core.load_model_definition(name) 104 52 pars = get_demo_pars(model_definition) 105 53 header = ('\n"Model","%s","Count","%d","Dimension","%s"' 106 % (name, N, "2D" if is _2delse "1D"))54 % (name, N, "2D" if is2D else "1D")) 107 55 if not mono: header += ',"Cutoff",%g'%(cutoff,) 108 56 print(header) 109 57 110 if is _2d:58 if is2D: 111 59 info = generate.make_info(model_definition) 112 60 partype = info['partype'] … … 121 69 # declarations are not available in python 2.7. 122 70 def try_model(fn, pars): 123 """124 Return the model evaluated at *pars*. If there is an exception,125 print it and return NaN of the right shape.126 """127 71 try: 128 72 result = fn(**pars) … … 138 82 return result 139 83 def check_model(pars): 140 """141 Run the two calculators against *pars*, returning statistics142 on the differences. See :func:`calc_stats` for the list of stats.143 """144 84 base_value = try_model(calc_base, pars) 145 85 comp_value = try_model(calc_comp, pars) … … 168 108 good = [True] 169 109 columns = check_model(pars_i) 170 columns += [v for _, 110 columns += [v for _,v in sorted(pars_i.items())] 171 111 if first: 172 112 labels = [" vs. ".join((calc_base.engine, calc_comp.engine))] … … 181 121 182 122 def print_usage(): 183 """184 Print the command usage string.185 """186 123 print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)") 187 124 188 125 189 126 def print_models(): 190 """191 Print the list of available models in columns.192 """193 127 print(columnize(MODELS, indent=" ")) 194 128 195 129 196 130 def print_help(): 197 """198 Print usage string, the option description and the list of available models.199 """200 131 print_usage() 201 132 print("""\ … … 227 158 228 159 def main(): 229 """ 230 Main program. 231 """ 232 if len(sys.argv) not in (6, 7): 160 if len(sys.argv) not in (6,7): 233 161 print_help() 234 162 sys.exit(1) … … 254 182 255 183 data, index = make_data({'qmax':1.0, 'is2d':is2D, 'nq':Nq, 'res':0., 256 'accuracy': 'Low', 'view':'log'})184 'accuracy': 'Low', 'view':'log'}) 257 185 model_list = [model] if model != "all" else MODELS 258 186 for model in model_list: -
sasmodels/convert.py
rd15a908 r29da213 49 49 """ 50 50 return dict((p, (v*1e6 if p.endswith('sld') else v)) 51 for p, 51 for p,v in pars.items()) 52 52 53 53 def convert_model(name, pars): … … 55 55 Convert model from old style parameter names to new style. 56 56 """ 57 _, _ = name,pars # lint57 _,_ = name,pars # lint 58 58 raise NotImplementedError 59 59 # need to load all new models in order to determine old=>new … … 67 67 """ 68 68 return dict((p, (v*1e-6 if p.endswith('sld') else v)) 69 for p, 69 for p,v in pars.items()) 70 70 71 71 def _remove_pd(pars, key, name): -
sasmodels/core.py
rd15a908 rcde11f0f 2 2 Core model handling routines. 3 3 """ 4 __all__ = [ 5 "list_models", "load_model_definition", "precompile_dll", 6 "load_model", "make_kernel", "call_kernel", "call_ER", "call_VR", 7 ] 4 __all__ = ["list_models", "load_model_definition", "precompile_dll", 5 "load_model", "make_kernel", "call_kernel", "call_ER", "call_VR" ] 8 6 9 7 from os.path import basename, dirname, join as joinpath … … 63 61 64 62 def isstr(s): 65 """66 Return True if *s* is a string-like object.67 """68 63 try: s + '' 69 64 except: return False … … 104 99 # source = open(info['name']+'.cl','r').read() 105 100 106 if (platform =="dll"101 if (platform=="dll" 107 102 or not HAVE_OPENCL 108 103 or not kernelcl.environment().has_type(dtype)): … … 133 128 width = pars.get(name+'_pd', 0.0) 134 129 nsigma = pars.get(name+'_pd_nsigma', 3.0) 135 value, 130 value,weight = weights.get_weights( 136 131 disperser, npts, width, nsigma, value, limits, relative) 137 132 return value, weight / np.sum(weight) … … 200 195 for name in info['partype']['volume']] 201 196 value, weight = dispersion_mesh(vol_pars) 202 whole, 197 whole,part = VR(*value) 203 198 return np.sum(weight*part)/np.sum(weight*whole) 204 199 -
sasmodels/data.py
rd15a908 r013adb7 284 284 mdata[mdata <= 0] = masked 285 285 plt.errorbar(data.x/10, scale*mdata, yerr=data.dy, fmt='.') 286 all_positive = all_positive and (mdata >0).all()286 all_positive = all_positive and (mdata>0).all() 287 287 some_present = some_present or (mdata.count() > 0) 288 288 … … 292 292 mtheory[~np.isfinite(mtheory)] = masked 293 293 if view is 'log': 294 mtheory[mtheory <=0] = masked294 mtheory[mtheory<=0] = masked 295 295 plt.plot(data.x/10, scale*mtheory, '-', hold=True) 296 all_positive = all_positive and (mtheory >0).all()296 all_positive = all_positive and (mtheory>0).all() 297 297 some_present = some_present or (mtheory.count() > 0) 298 298 … … 396 396 plt.title('theory') 397 397 h = plt.colorbar() 398 h.set_label(r'$\log_{10}I(q)$' if view =='log'398 h.set_label(r'$\log_{10}I(q)$' if view=='log' 399 399 else r'$q^4 I(q)$' if view == 'q4' 400 400 else '$I(q)$') … … 411 411 plt.title('residuals') 412 412 h = plt.colorbar() 413 h.set_label( r'$\Delta I(q)$')413 h.set_label('$\Delta I(q)$') 414 414 415 415 -
sasmodels/resolution.py
rfdc538a r5258859 135 135 """ 136 136 #print("apply shapes", theory.shape, weight_matrix.shape) 137 Iq = np.dot(theory[None, 137 Iq = np.dot(theory[None,:], weight_matrix) 138 138 #print("result shape",Iq.shape) 139 139 return Iq.flatten() … … 153 153 # neither trapezoid nor Simpson's rule improved the accuracy. 154 154 edges = bin_edges(q_calc) 155 edges[edges <0.0] = 0.0 # clip edges below zero156 G = erf( (edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])155 edges[edges<0.0] = 0.0 # clip edges below zero 156 G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] ) 157 157 weights = G[1:] - G[:-1] 158 weights /= np.sum(weights, axis=0)[None, 158 weights /= np.sum(weights, axis=0)[None,:] 159 159 return weights 160 160 … … 287 287 # The current algorithm is a midpoint rectangle rule. 288 288 q_edges = bin_edges(q_calc) # Note: requires q > 0 289 q_edges[q_edges <0.0] = 0.0 # clip edges below zero289 q_edges[q_edges<0.0] = 0.0 # clip edges below zero 290 290 weights = np.zeros((len(q), len(q_calc)), 'd') 291 291 … … 306 306 #print(qi - h, qi + h) 307 307 #print(in_x + abs_x) 308 weights[i, 308 weights[i,:] = (in_x + abs_x) * np.diff(q_edges) / (2*h) 309 309 else: 310 310 L = n_height 311 311 for k in range(-L, L+1): 312 weights[i, 313 weights[i, 312 weights[i,:] += _q_perp_weights(q_edges, qi+k*h/L, w) 313 weights[i,:] /= 2*L + 1 314 314 315 315 return weights.T … … 358 358 log-scaled data. 359 359 """ 360 if len(x) < 2 or (np.diff(x) <0).any():360 if len(x) < 2 or (np.diff(x)<0).any(): 361 361 raise ValueError("Expected bins to be an increasing set") 362 362 edges = np.hstack([ … … 373 373 """ 374 374 step = np.diff(q) 375 index = step >max_step375 index = step>max_step 376 376 if np.any(index): 377 377 inserts = [] 378 for q_i, step_i in zip(q[:-1][index],step[index]):378 for q_i,step_i in zip(q[:-1][index],step[index]): 379 379 n = np.ceil(step_i/max_step) 380 inserts.extend(q_i + np.arange(1, 380 inserts.extend(q_i + np.arange(1,n)*(step_i/n)) 381 381 # Extend a couple of fringes beyond the end of the data 382 inserts.extend(q[-1] + np.arange(1, 383 q_calc = np.sort(np.hstack((q, 382 inserts.extend(q[-1] + np.arange(1,8)*max_step) 383 q_calc = np.sort(np.hstack((q,inserts))) 384 384 else: 385 385 q_calc = q … … 399 399 if q_min < q[0]: 400 400 if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 401 n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] >q[0] else 15401 n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1]>q[0] else 15 402 402 q_low = np.linspace(q_min, q[0], n_low+1)[:-1] 403 403 else: 404 404 q_low = [] 405 405 if q_max > q[-1]: 406 n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] >q[-2] else 15406 n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1]>q[-2] else 15 407 407 q_high = np.linspace(q[-1], q_max, n_high+1)[1:] 408 408 else: … … 452 452 if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 453 453 n_low = log_delta_q * (log(q[0])-log(q_min)) 454 q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]454 q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1] 455 455 else: 456 456 q_low = [] … … 489 489 that make it slow to evaluate but give it good accuracy. 490 490 """ 491 from scipy.integrate import romberg 491 from scipy.integrate import romberg, dblquad 492 492 493 493 if any(k not in form.info['defaults'] for k in pars.keys()): … … 520 520 result[i] = r/(2*h) 521 521 else: 522 w_grid = np.linspace(0, w, 21)[None, 523 h_grid = np.linspace(-h, h, 23)[:, 522 w_grid = np.linspace(0, w, 21)[None,:] 523 h_grid = np.linspace(-h, h, 23)[:,None] 524 524 u = sqrt((qi+h_grid)**2 + w_grid**2) 525 525 Iu = np.interp(u, q_calc, Iq) 526 526 #print(np.trapz(Iu, w_grid, axis=1)) 527 Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:, 527 Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:,0]) 528 528 result[i] = Is / (2*h*w) 529 # from scipy.integrate import dblquad 530 # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w, 531 # args=(qi,)) 532 # result[i] = r/(w*2*h) 529 """ 530 r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w, 531 args=(qi,)) 532 result[i] = r/(w*2*h) 533 """ 533 534 534 535 # r should be [float, ...], but it is [array([float]), array([float]),...] … … 552 553 553 554 _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq) 554 r = [romberg(_fn, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,555 args=(qi, dqi),divmax=100, vec_func=True, tol=0, rtol=1e-8)556 for qi, dqi in zip(q,q_width)]555 r = [romberg(_fn, max(qi-nsigma*dqi,1e-10*q[0]), qi+nsigma*dqi, args=(qi, dqi), 556 divmax=100, vec_func=True, tol=0, rtol=1e-8) 557 for qi,dqi in zip(q,q_width)] 557 558 return np.asarray(r).flatten() 558 559 … … 594 595 theory = self.Iq(resolution.q_calc) 595 596 output = resolution.apply(theory) 596 answer = [ 597 9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 598 5.5555, 4.5584, 3.5606, 2.5623, 2.0000, 599 ] 597 answer = [ 9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 598 5.5555, 4.5584, 3.5606, 2.5623, 2.0000 ] 600 599 np.testing.assert_allclose(output, answer, atol=1e-4) 601 600 … … 609 608 theory = 1000*self.Iq(resolution.q_calc**4) 610 609 output = resolution.apply(theory) 611 answer = [ 612 8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 613 5.40363, 4.40655, 3.40880, 2.41058, 2.00000, 614 ] 610 answer = [ 8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 611 5.40363, 4.40655, 3.40880, 2.41058, 2.00000 ] 615 612 np.testing.assert_allclose(output, answer, atol=1e-4) 616 613 … … 623 620 theory = self.Iq(resolution.q_calc) 624 621 output = resolution.apply(theory) 625 answer = [ 626 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 627 ] 622 answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 628 623 np.testing.assert_allclose(output, answer, atol=1e-4) 629 624 … … 637 632 theory = self.Iq(resolution.q_calc) 638 633 output = resolution.apply(theory) 639 answer = [ 640 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 641 ] 634 answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 642 635 np.testing.assert_allclose(output, answer, atol=1e-4) 643 636 … … 659 652 theory = 12.0-1000.0*resolution.q_calc 660 653 output = resolution.apply(theory) 661 answer = [ 662 10.44785079, 9.84991299, 8.98101708, 663 7.99906585, 6.99998311, 6.00001689, 664 5.00093415, 4.01898292, 3.15008701, 2.55214921, 665 ] 654 answer = [ 10.44785079, 9.84991299, 8.98101708, 655 7.99906585, 6.99998311, 6.00001689, 656 5.00093415, 4.01898292, 3.15008701, 2.55214921] 666 657 np.testing.assert_allclose(output, answer, atol=1e-8) 667 658 … … 792 783 } 793 784 form = load_model('ellipsoid', dtype='double') 794 q = np.logspace(log10(4e-5), 785 q = np.logspace(log10(4e-5),log10(2.5e-2), 68) 795 786 width, height = 0.117, 0. 796 787 resolution = Slit1D(q, width=width, height=height) … … 1080 1071 #h = 0.0277790 1081 1072 resolution = Slit1D(q, w, h) 1082 _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, 1073 _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w,h)) 1083 1074 1084 1075 def demo(): -
sasmodels/sesans.py
r384d114 r3c56da87 12 12 import numpy as np 13 13 from numpy import pi, exp 14 14 15 from scipy.special import jv as besselj 15 16 16 def make_q(q_max, Rmax): 17 r""" 18 Return a $q$ vector suitable for SESANS covering from $2\pi/ (10 R_{\max})$ 19 to $q_max$. 20 """ 17 def make_q(q_zmax, Rmax): 21 18 q_min = dq = 0.1 * 2*pi / Rmax 22 return np.arange(q_min, q_max, dq) 19 #q_min = 0.00003 20 return np.arange(q_min, q_zmax, dq) 21 22 # TODO: dead code; for now the call to the hankel transform happens in BumpsModel 23 class SesansCalculator: 24 def __init__(self, kernel, q_zmax, Rmax, SElength, wavelength, thickness): 25 self._set_kernel(kernel, q_zmax, Rmax) 26 self.SElength = SElength 27 self.wavelength = wavelength 28 self.thickness = thickness 29 30 def _set_kernel(self, kernel, q_zmax, Rmax): 31 kernel_input = kernel.make_input([make_q(q_zmax, Rmax)]) 32 self.sans_calculator = kernel(kernel_input) 33 34 def __call__(self, pars, pd_pars, cutoff=1e-5): 35 Iq = self.sans_calculator(pars, pd_pars, cutoff) 36 P = hankel(self.SElength, self.wavelength, self.thickness, self.q, Iq) 37 self.Iq = Iq 38 return P 23 39 24 40 def hankel(SElength, wavelength, thickness, q, Iq): 25 r"""41 """ 26 42 Compute the expected SESANS polarization for a given SANS pattern. 27 43 28 Uses the hankel transform followed by the exponential. The values for *zz*29 (or spin echo length, or delta), wavelength and sample thickness should30 come from the dataset. $q$ should be chosen such that the oscillations31 in $I(q)$ are well sampled (e.g., $5 \cdot 2 \pi/d_{\max}$).44 Uses the hankel transform followed by the exponential. The values 45 for zz (or spin echo length, or delta), wavelength and sample thickness 46 information should come from the dataset. *q* should be chosen such 47 that the oscillations in *I(q)* are well sampled (e.g., 5*2*pi/d_max). 32 48 33 *SElength* [A] is the set of $z$ points at which to compute the 34 Hankel transform 49 *SElength* [A] is the set of z points at which to compute the hankel transform 35 50 36 51 *wavelength* [m] is the wavelength of each individual point *zz* … … 38 53 *thickness* [cm] is the sample thickness. 39 54 40 *q* [A $^{-1}$] is the set of $q$ points at which the model has been41 computed.These should be equally spaced.55 *q* [A^{-1}] is the set of q points at which the model has been computed. 56 These should be equally spaced. 42 57 43 *I* [cm $^{-1}$] is the value of the SANS model at *q*58 *I* [cm^{-1}] is the value of the SANS model at *q* 44 59 """ 45 60 G = np.zeros(len(SElength), 'd') 46 61 for i in range(len(SElength)): 47 integr = besselj(0, 62 integr = besselj(0,q*SElength[i])*Iq*q 48 63 G[i] = np.sum(integr) 49 50 # [m^-1] step size in q, needed for integration 51 dq=(q[1]-q[0])*1e10 52 53 # integration step, convert q into [m**-1] and 2 pi circle integration 54 G *= dq*1e10*2*pi 55 64 dq=(q[1]-q[0])*1e10 # [m^-1] step size in q, needed for integration 65 G *= dq*1e10*2*pi # integr step, conver q into [m**-1] and 2 pi circle integr 56 66 P = exp(thickness*wavelength**2/(4*pi**2)*(G-G[0])) 57 67
Note: See TracChangeset
for help on using the changeset viewer.