Changeset 639c4e3 in sasmodels
- Timestamp:
- Apr 26, 2016 9:43:53 AM (9 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- ed246ab
- Parents:
- cebbb5a (diff), fa227d3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Files:
-
- 12 added
- 54 edited
Legend:
- Unmodified
- Added
- Removed
-
sasmodels/kerneldll.py
r8d62008 r639c4e3 87 87 COMPILE = " ".join((CC, LN)) 88 88 else: 89 COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 89 # fPIC is unused on windows 90 # COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 91 COMPILE = "gcc -shared -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 90 92 if "SAS_OPENMP" in os.environ: 91 93 COMPILE += " -fopenmp" … … 93 95 COMPILE = "cc -shared -fPIC -fopenmp -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 94 96 95 DLL_PATH = tempfile.gettempdir() 97 # Assume the default location of module DLLs is within the sasmodel directory. 98 DLL_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "models", "dll") 96 99 97 100 ALLOW_SINGLE_PRECISION_DLLS = True -
sasmodels/model_test.py
r38a9b07 r639c4e3 288 288 Returns 0 if success or 1 if any tests fail. 289 289 """ 290 import xmlrunner 290 try: 291 from xmlrunner import XMLTestRunner as TestRunner 292 test_args = { 'output': 'logs' } 293 except ImportError: 294 from unittest import TextTestRunner as TestRunner 295 test_args = { } 291 296 292 297 models = sys.argv[1:] … … 327 332 return 1 328 333 329 #runner = unittest.TextTestRunner() 330 runner = xmlrunner.XMLTestRunner(output='logs', verbosity=verbosity) 334 runner = TestRunner(verbosity=verbosity, **test_args) 331 335 result = runner.run(make_suite(loaders, models)) 332 336 return 1 if result.failures or result.errors else 0 -
sasmodels/models/core_multi_shell.c
rf7930be rabdd01c 182 182 if (r == r0) { 183 183 // no thickness, so nothing to add 184 } else if (fabs(A[i]) < 1 e-16 || sld_out[i] == sld_in[i]) {184 } else if (fabs(A[i]) < 1.0e-16 || sld_out[i] == sld_in[i]) { 185 185 f -= f_constant(q, r0, sld_in[i]); 186 186 f += f_constant(q, r, sld_in[i]); 187 } else if (fabs(A[i]) < 1 e-4) {187 } else if (fabs(A[i]) < 1.0e-4) { 188 188 const double slope = (sld_out[i] - sld_in[i])/thickness[i]; 189 189 f -= f_linear(q, r0, sld_in[i], slope); -
sasmodels/models/core_shell_parallelepiped.c
r44bd2be rabdd01c 48 48 double a_scaled = a_side / b_side; 49 49 double c_scaled = c_side / b_side; 50 double arim_scaled = arim_thickness / b_side; 51 double brim_scaled = brim_thickness / b_side; 52 50 53 51 // DelRho values (note that drC is not used later) 54 52 double dr0 = core_sld-solvent_sld; -
sasmodels/models/elliptical_cylinder.c
r43b7eea rabdd01c 87 87 88 88 const double vol = form_volume(r_minor, r_ratio, length); 89 return answer*vol*vol*1 e-4;89 return answer*vol*vol*1.0e-4; 90 90 } 91 91 -
sasmodels/models/onion.c
rce896fd r639c4e3 80 80 if (r == r0) { 81 81 // no thickness, so nothing to add 82 } else if (fabs(A[i]) < 1 e-16 || sld_out[i] == sld_in[i]) {82 } else if (fabs(A[i]) < 1.0e-16 || sld_out[i] == sld_in[i]) { 83 83 f -= f_constant(q, r0, sld_in[i]); 84 84 f += f_constant(q, r, sld_in[i]); 85 } else if (fabs(A[i]) < 1 e-4) {85 } else if (fabs(A[i]) < 1.0e-4) { 86 86 const double slope = (sld_out[i] - sld_in[i])/thickness[i]; 87 87 f -= f_linear(q, r0, sld_in[i], slope); -
sasmodels/models/rpa.c
rd2bb604 r639c4e3 17 17 Phi[0]=Phi[1]=0.0000001; 18 18 Kab=Kac=Kad=Kbc=Kbd=-0.0004; 19 L [0]=L[1]=1e-12;20 v [0]=v[1]=100.0;21 b [0]=b[1]=5.0;19 La=Lb=1.0e-12; 20 va=vb=100.0; 21 ba=bb=5.0; 22 22 } else if (icase <= 4) { 23 23 Phi[0]=0.0000001; 24 24 Kab=Kac=Kad=-0.0004; 25 L [0]=1e-12;26 v [0]=100.0;27 b [0]=5.0;25 La=1.0e-12; 26 va=100.0; 27 ba=5.0; 28 28 } 29 29 #else -
setup.py
rf903f0a re1454ab 1 import os 1 2 from setuptools import setup,find_packages 3 4 # Create the model .so's 5 os.system("python gen_so.py") 2 6 3 7 packages = find_packages(exclude=['contrib', 'docs', 'tests*']) … … 5 9 'sasmodels.models': ['*.c','lib/*.c'], 6 10 'sasmodels': ['*.c'], 11 'sasmodels.models.dll': ['*.so'], 7 12 } 8 13 required = [] -
.gitignore
r4a82d4d rf2f67a6 4 4 /*.csv 5 5 *.pyc 6 *.cl7 6 *.so 8 7 *.obj -
doc/developer/index.rst
r56fc97a rb85be2d 3 3 *************************** 4 4 5 .. toctree:: 6 :numbered: 4 7 :maxdepth: 4 5 8 9 calculator.rst -
doc/genapi.py
r3d5c6f8 ra5b8477 59 59 #('alignment', 'GPU data alignment [unused]'), 60 60 ('bumps_model', 'Bumps interface'), 61 ('compare', 'Compare models on different compute engines'), 61 62 ('convert', 'Sasview to sasmodel converter'), 62 63 ('core', 'Model access'), 64 ('data', 'Data layout and plotting routines'), 65 ('details', 'Parameter packing for kernel calls'), 63 66 ('direct_model', 'Simple interface'), 64 67 ('exception', 'Annotate exceptions'), 68 #('frozendict', 'Freeze a dictionary to make it immutable'), 65 69 ('generate', 'Model parser'), 70 ('kernel', 'Evaluator type definitions'), 66 71 ('kernelcl', 'OpenCL model evaluator'), 67 72 ('kerneldll', 'Ctypes model evaluator'), 68 73 ('kernelpy', 'Python model evaluator'), 74 ('list_pars', 'Identify all parameters in all models'), 75 ('mixture', 'Mixture model evaluator'), 69 76 ('model_test', 'Unit test support'), 77 ('modelinfo', 'Parameter and model definitions'), 78 ('product', 'Product model evaluator'), 70 79 ('resolution', '1-D resolution functions'), 71 80 ('resolution2d', '2-D resolution functions'), 72 81 ('sasview_model', 'Sasview interface'), 73 ('sesans', 'SESANS model evaluator'), 82 ('sesans', 'SESANS calculation routines'), 83 #('transition', 'Model stepper for automatic model selection'), 74 84 ('weights', 'Distribution functions'), 75 #('transition', 'Model stepper for automatic model selection'),76 85 ] 77 86 package='sasmodels' -
doc/genmodel.py
r89f4163 ra5b8477 1 from __future__ import print_function 2 1 3 import sys, os, math, re 2 4 import numpy as np 3 5 import matplotlib.pyplot as plt 4 import pylab5 6 sys.path.insert(0, os.path.abspath('..')) 6 7 from sasmodels import generate, core … … 8 9 from sasmodels.data import empty_data1D, empty_data2D 9 10 10 11 # Convert ../sasmodels/models/name.py to name 12 model_name = os.path.basename(sys.argv[1])[:-3] 13 model_info = core.load_model_info(model_name) 14 model = core.build_model(model_info) 15 16 # Load the doc string from the module definition file and store it in rst 17 docstr = generate.make_doc(model_info) 18 19 20 # Calculate 1D curve for default parameters 21 pars = dict((p.name, p.default) for p in model_info['parameters']) 22 23 # Plotting ranges and options 24 opts = { 25 'xscale' : 'log', 26 'yscale' : 'log' if not model_info['structure_factor'] else 'linear', 27 'zscale' : 'log' if not model_info['structure_factor'] else 'linear', 28 'q_min' : 0.001, 29 'q_max' : 1.0, 30 'nq' : 1000, 31 'nq2d' : 1000, 32 'vmin' : 1e-3, # floor for the 2D data results 33 'qx_max' : 0.5, 34 #'colormap' : 'gist_ncar', 35 'colormap' : 'nipy_spectral', 36 #'colormap' : 'jet', 37 } 38 11 try: 12 from typing import Dict, Any 13 except ImportError: 14 pass 15 else: 16 from matplotlib.axes import Axes 17 from sasmodels.kernel import KernelModel 18 from sasmodels.modelinfo import ModelInfo 39 19 40 20 def plot_1d(model, opts, ax): 21 # type: (KernelModel, Dict[str, Any], Axes) -> None 22 """ 23 Create a 1-D image. 24 """ 41 25 q_min, q_max, nq = opts['q_min'], opts['q_max'], opts['nq'] 42 26 q_min = math.log10(q_min) … … 47 31 Iq1D = calculator() 48 32 49 ax.plot(q, Iq1D, color='blue', lw=2, label=model _info['name'])33 ax.plot(q, Iq1D, color='blue', lw=2, label=model.info.name) 50 34 ax.set_xlabel(r'$Q \/(\AA^{-1})$') 51 35 ax.set_ylabel(r'$I(Q) \/(\mathrm{cm}^{-1})$') … … 55 39 56 40 def plot_2d(model, opts, ax): 41 # type: (KernelModel, Dict[str, Any], Axes) -> None 42 """ 43 Create a 2-D image. 44 """ 57 45 qx_max, nq2d = opts['qx_max'], opts['nq2d'] 58 q = np.linspace(-qx_max, qx_max, nq2d) 46 q = np.linspace(-qx_max, qx_max, nq2d) # type: np.ndarray 59 47 data2d = empty_data2D(q, resolution=0.0) 60 48 calculator = DirectModel(data2d, model) … … 64 52 Iq2D = np.log(np.clip(Iq2D, opts['vmin'], np.inf)) 65 53 ax.imshow(Iq2D, interpolation='nearest', aspect=1, origin='lower', 66 extent=[-qx_max, qx_max, -qx_max, qx_max], cmap=opts['colormap'])54 extent=[-qx_max, qx_max, -qx_max, qx_max], cmap=opts['colormap']) 67 55 ax.set_xlabel(r'$Q_x \/(\AA^{-1})$') 68 56 ax.set_ylabel(r'$Q_y \/(\AA^{-1})$') 69 57 70 # Generate image 71 fig_height = 3.0 # in 72 fig_left = 0.6 # in 73 fig_right = 0.5 # in 74 fig_top = 0.6*0.25 # in 75 fig_bottom = 0.6*0.75 76 if model_info['has_2d']: 77 plot_height = fig_height - (fig_top+fig_bottom) 78 plot_width = plot_height 79 fig_width = 2*(plot_width + fig_left + fig_right) 80 aspect = (fig_width, fig_height) 81 ratio = aspect[0]/aspect[1] 82 ax_left = fig_left/fig_width 83 ax_bottom = fig_bottom/fig_height 84 ax_height = plot_height/fig_height 85 ax_width = ax_height/ratio # square axes 86 fig = plt.figure(figsize=aspect) 87 ax2d = fig.add_axes([0.5+ax_left, ax_bottom, ax_width, ax_height]) 88 plot_2d(model, opts, ax2d) 89 ax1d = fig.add_axes([ax_left, ax_bottom, ax_width, ax_height]) 90 plot_1d(model, opts, ax1d) 91 #ax.set_aspect('square') 92 else: 93 plot_height = fig_height - (fig_top+fig_bottom) 94 plot_width = (1+np.sqrt(5))/2*fig_height 95 fig_width = plot_width + fig_left + fig_right 96 ax_left = fig_left/fig_width 97 ax_bottom = fig_bottom/fig_height 98 ax_width = plot_width/fig_width 99 ax_height = plot_height/fig_height 100 aspect = (fig_width, fig_height) 101 fig = plt.figure(figsize=aspect) 102 ax1d = fig.add_axes([ax_left, ax_bottom, ax_width, ax_height]) 103 plot_1d(model, opts, ax1d) 58 def figfile(model_info): 59 # type: (ModelInfo) -> str 60 return model_info.id + '_autogenfig.png' 104 61 105 # Save image in model/img 106 figname = model_name + '_autogenfig.png' 107 filename = os.path.join('model', 'img', figname) 108 plt.savefig(filename, bbox_inches='tight') 109 #print "figure saved in",filename 62 def make_figure(model_info, opts): 63 # type: (ModelInfo, Dict[str, Any]) -> None 64 """ 65 Generate the figure file to include in the docs. 66 """ 67 model = core.build_model(model_info) 110 68 111 # Auto caption for figure 112 captionstr = '\n' 113 captionstr += '.. figure:: img/' + model_info['id'] + '_autogenfig.png\n' 114 captionstr += '\n' 115 if model_info['has_2d']: 116 captionstr += ' 1D and 2D plots corresponding to the default parameters of the model.\n' 117 else: 118 captionstr += ' 1D plot corresponding to the default parameters of the model.\n' 119 captionstr += '\n' 69 fig_height = 3.0 # in 70 fig_left = 0.6 # in 71 fig_right = 0.5 # in 72 fig_top = 0.6*0.25 # in 73 fig_bottom = 0.6*0.75 74 if model_info.parameters.has_2d: 75 plot_height = fig_height - (fig_top+fig_bottom) 76 plot_width = plot_height 77 fig_width = 2*(plot_width + fig_left + fig_right) 78 aspect = (fig_width, fig_height) 79 ratio = aspect[0]/aspect[1] 80 ax_left = fig_left/fig_width 81 ax_bottom = fig_bottom/fig_height 82 ax_height = plot_height/fig_height 83 ax_width = ax_height/ratio # square axes 84 fig = plt.figure(figsize=aspect) 85 ax2d = fig.add_axes([0.5+ax_left, ax_bottom, ax_width, ax_height]) 86 plot_2d(model, opts, ax2d) 87 ax1d = fig.add_axes([ax_left, ax_bottom, ax_width, ax_height]) 88 plot_1d(model, opts, ax1d) 89 #ax.set_aspect('square') 90 else: 91 plot_height = fig_height - (fig_top+fig_bottom) 92 plot_width = (1+np.sqrt(5))/2*fig_height 93 fig_width = plot_width + fig_left + fig_right 94 ax_left = fig_left/fig_width 95 ax_bottom = fig_bottom/fig_height 96 ax_width = plot_width/fig_width 97 ax_height = plot_height/fig_height 98 aspect = (fig_width, fig_height) 99 fig = plt.figure(figsize=aspect) 100 ax1d = fig.add_axes([ax_left, ax_bottom, ax_width, ax_height]) 101 plot_1d(model, opts, ax1d) 120 102 121 # Add figure reference and caption to documentation (at end, before References) 122 pattern = '\*\*REFERENCE' 123 m = re.search(pattern, docstr.upper()) 103 # Save image in model/img 104 path = os.path.join('model', 'img', figfile(model_info)) 105 plt.savefig(path, bbox_inches='tight') 106 #print("figure saved in",path) 124 107 125 if m: 126 docstr1 = docstr[:m.start()] 127 docstr2 = docstr[m.start():] 128 docstr = docstr1 + captionstr + docstr2 129 else: 130 print '------------------------------------------------------------------' 131 print 'References NOT FOUND for model: ', model_info['id'] 132 print '------------------------------------------------------------------' 133 docstr = docstr + captionstr 108 def gen_docs(model_info): 109 # type: (ModelInfo) -> None 110 """ 111 Generate the doc string with the figure inserted before the references. 112 """ 134 113 135 open(sys.argv[2],'w').write(docstr) 114 # Load the doc string from the module definition file and store it in rst 115 docstr = generate.make_doc(model_info) 116 117 # Auto caption for figure 118 captionstr = '\n' 119 captionstr += '.. figure:: img/' + figfile(model_info) + '\n' 120 captionstr += '\n' 121 if model_info.parameters.has_2d: 122 captionstr += ' 1D and 2D plots corresponding to the default parameters of the model.\n' 123 else: 124 captionstr += ' 1D plot corresponding to the default parameters of the model.\n' 125 captionstr += '\n' 126 127 # Add figure reference and caption to documentation (at end, before References) 128 pattern = '\*\*REFERENCE' 129 match = re.search(pattern, docstr.upper()) 130 131 if match: 132 docstr1 = docstr[:match.start()] 133 docstr2 = docstr[match.start():] 134 docstr = docstr1 + captionstr + docstr2 135 else: 136 print('------------------------------------------------------------------') 137 print('References NOT FOUND for model: ', model_info.id) 138 print('------------------------------------------------------------------') 139 docstr += captionstr 140 141 open(sys.argv[2],'w').write(docstr) 142 143 def process_model(path): 144 # type: (str) -> None 145 """ 146 Generate doc file and image file for the given model definition file. 147 """ 148 149 # Load the model file 150 model_name = os.path.basename(path)[:-3] 151 model_info = core.load_model_info(model_name) 152 153 # Plotting ranges and options 154 opts = { 155 'xscale' : 'log', 156 'yscale' : 'log' if not model_info.structure_factor else 'linear', 157 'zscale' : 'log' if not model_info.structure_factor else 'linear', 158 'q_min' : 0.001, 159 'q_max' : 1.0, 160 'nq' : 1000, 161 'nq2d' : 1000, 162 'vmin' : 1e-3, # floor for the 2D data results 163 'qx_max' : 0.5, 164 #'colormap' : 'gist_ncar', 165 'colormap' : 'nipy_spectral', 166 #'colormap' : 'jet', 167 } 168 169 # Generate the RST file and the figure. Order doesn't matter. 170 gen_docs(model_info) 171 make_figure(model_info, opts) 172 173 if __name__ == "__main__": 174 process_model(sys.argv[1]) -
doc/gentoc.py
r5041682 ra5b8477 9 9 from sasmodels.core import load_model_info 10 10 11 try: 12 from typing import Optional, BinaryIO, List, Dict 13 except ImportError: 14 pass 15 else: 16 from sasmodels.modelinfo import ModelInfo 11 17 12 18 TEMPLATE="""\ … … 27 33 28 34 def _make_category(category_name, label, title, parent=None): 35 # type: (str, str, str, Optional[BinaryIO]) -> BinaryIO 29 36 file = open(joinpath(MODEL_TOC_PATH, category_name+".rst"), "w") 30 37 file.write(TEMPLATE%{'label':label, 'title':title, 'bar':'*'*len(title)}) … … 34 41 35 42 def _add_subcategory(category_name, parent): 43 # type: (str, BinaryIO) -> None 36 44 parent.write(" %s.rst\n"%category_name) 37 45 38 46 def _add_model(file, model_name): 47 # type: (IO[str], str) -> None 39 48 file.write(" ../../model/%s.rst\n"%model_name) 40 49 41 50 def _maybe_make_category(category, models, cat_files, model_toc): 51 # type: (str, List[str], Dict[str, BinaryIO], BinaryIO) -> None 42 52 if category not in cat_files: 43 53 print("Unexpected category %s containing"%category, models, file=sys.stderr) … … 46 56 47 57 def generate_toc(model_files): 58 # type: (List[str]) -> None 48 59 if not model_files: 49 60 print("gentoc needs a list of model files", file=sys.stderr) 50 61 51 62 # find all categories 52 category = {} 63 category = {} # type: Dict[str, List[str]] 53 64 for item in model_files: 54 65 # assume model is in sasmodels/models/name.py, and ignore the full path … … 56 67 if model_name.startswith('_'): continue 57 68 model_info = load_model_info(model_name) 58 if model_info ['category']is None:69 if model_info.category is None: 59 70 print("Missing category for", item, file=sys.stderr) 60 71 else: 61 category.setdefault(model_info ['category'],[]).append(model_name)72 category.setdefault(model_info.category,[]).append(model_name) 62 73 63 74 # Check category names -
sasmodels/alignment.py
r3c56da87 r7ae2b7f 18 18 to decide whether it is really required. 19 19 """ 20 import numpy as np 20 import numpy as np # type: ignore 21 21 22 22 def align_empty(shape, dtype, alignment=128): -
sasmodels/bumps_model.py
rea75043 r04dc697 11 11 12 12 """ 13 14 import warnings 15 16 import numpy as np 13 from __future__ import print_function 14 15 __all__ = [ "Model", "Experiment" ] 16 17 import numpy as np # type: ignore 17 18 18 19 from .data import plot_theory 19 20 from .direct_model import DataMixin 20 21 21 __all__ = [ 22 "Model", "Experiment", 23 ] 24 25 # CRUFT: old style bumps wrapper which doesn't separate data and model 26 # pylint: disable=invalid-name 27 def BumpsModel(data, model, cutoff=1e-5, **kw): 28 r""" 29 Bind a model to data, along with a polydispersity cutoff. 30 31 *data* is a :class:`data.Data1D`, :class:`data.Data2D` or 32 :class:`data.Sesans` object. Use :func:`data.empty_data1D` or 33 :func:`data.empty_data2D` to define $q, \Delta q$ calculation 34 points for displaying the SANS curve when there is no measured data. 35 36 *model* is a runnable module as returned from :func:`core.load_model`. 37 38 *cutoff* is the polydispersity weight cutoff. 39 40 Any additional *key=value* pairs are model dependent parameters. 41 42 Returns an :class:`Experiment` object. 43 44 Note that the usual Bumps semantics is not fully supported, since 45 assigning *M.name = parameter* on the returned experiment object 46 does not set that parameter in the model. Range setting will still 47 work as expected though. 48 49 .. deprecated:: 0.1 50 Use :class:`Experiment` instead. 51 """ 52 warnings.warn("Use of BumpsModel is deprecated. Use bumps_model.Experiment instead.") 53 54 # Create the model and experiment 55 model = Model(model, **kw) 56 experiment = Experiment(data=data, model=model, cutoff=cutoff) 57 58 # Copy the model parameters up to the experiment object. 59 for k, v in model.parameters().items(): 60 setattr(experiment, k, v) 61 return experiment 22 try: 23 from typing import Dict, Union, Tuple, Any 24 from .data import Data1D, Data2D 25 from .kernel import KernelModel 26 from .modelinfo import ModelInfo 27 Data = Union[Data1D, Data2D] 28 except ImportError: 29 pass 30 31 try: 32 # Optional import. This allows the doc builder and nosetests to run even 33 # when bumps is not on the path. 34 from bumps.names import Parameter # type: ignore 35 except ImportError: 36 pass 62 37 63 38 64 39 def create_parameters(model_info, **kwargs): 40 # type: (ModelInfo, **Union[float, str, Parameter]) -> Tuple[Dict[str, Parameter], Dict[str, str]] 65 41 """ 66 42 Generate Bumps parameters from the model info. … … 71 47 Any additional *key=value* pairs are initial values for the parameters 72 48 to the models. Uninitialized parameters will use the model default 73 value. 49 value. The value can be a float, a bumps parameter, or in the case 50 of the distribution type parameter, a string. 74 51 75 52 Returns a dictionary of *{name: Parameter}* containing the bumps … … 77 54 *{name: str}* containing the polydispersity distribution types. 78 55 """ 79 # lazy import; this allows the doc builder and nosetests to run even 80 # when bumps is not on the path. 81 from bumps.names import Parameter 82 83 pars = {} 84 for p in model_info['parameters']: 56 pars = {} # type: Dict[str, Parameter] 57 pd_types = {} # type: Dict[str, str] 58 for p in model_info.parameters.call_parameters: 85 59 value = kwargs.pop(p.name, p.default) 86 60 pars[p.name] = Parameter.default(value, name=p.name, limits=p.limits) 87 for name in model_info['partype']['pd-2d']: 88 for xpart, xdefault, xlimits in [ 89 ('_pd', 0., pars[name].limits), 90 ('_pd_n', 35., (0, 1000)), 91 ('_pd_nsigma', 3., (0, 10)), 92 ]: 93 xname = name + xpart 94 xvalue = kwargs.pop(xname, xdefault) 95 pars[xname] = Parameter.default(xvalue, name=xname, limits=xlimits) 96 97 pd_types = {} 98 for name in model_info['partype']['pd-2d']: 99 xname = name + '_pd_type' 100 xvalue = kwargs.pop(xname, 'gaussian') 101 pd_types[xname] = xvalue 61 if p.polydisperse: 62 for part, default, limits in [ 63 ('_pd', 0., pars[p.name].limits), 64 ('_pd_n', 35., (0, 1000)), 65 ('_pd_nsigma', 3., (0, 10)), 66 ]: 67 name = p.name + part 68 value = kwargs.pop(name, default) 69 pars[name] = Parameter.default(value, name=name, limits=limits) 70 name = p.name + '_pd_type' 71 pd_types[name] = str(kwargs.pop(name, 'gaussian')) 102 72 103 73 if kwargs: # args not corresponding to parameters … … 118 88 """ 119 89 def __init__(self, model, **kwargs): 120 self._sasmodel = model 90 # type: (KernelModel, **Dict[str, Union[float, Parameter]]) -> None 91 self.sasmodel = model 121 92 pars, pd_types = create_parameters(model.info, **kwargs) 122 93 for k, v in pars.items(): … … 128 99 129 100 def parameters(self): 101 # type: () -> Dict[str, Parameter] 130 102 """ 131 103 Return a dictionary of parameters objects for the parameters, … … 135 107 136 108 def state(self): 109 # type: () -> Dict[str, Union[Parameter, str]] 137 110 """ 138 111 Return a dictionary of current values for all the parameters, … … 159 132 The resulting model can be used directly in a Bumps FitProblem call. 160 133 """ 134 _cache = None # type: Dict[str, np.ndarray] 161 135 def __init__(self, data, model, cutoff=1e-5): 162 136 # type: (Data, Model, float) -> None 163 137 # remember inputs so we can inspect from outside 164 138 self.model = model 165 139 self.cutoff = cutoff 166 self._interpret_data(data, model. _sasmodel)167 self. update()140 self._interpret_data(data, model.sasmodel) 141 self._cache = {} 168 142 169 143 def update(self): 144 # type: () -> None 170 145 """ 171 146 Call when model parameters have changed and theory needs to be 172 147 recalculated. 173 148 """ 174 self._cache = {}149 self._cache.clear() 175 150 176 151 def numpoints(self): 152 # type: () -> float 177 153 """ 178 154 Return the number of data points … … 181 157 182 158 def parameters(self): 159 # type: () -> Dict[str, Parameter] 183 160 """ 184 161 Return a dictionary of parameters … … 187 164 188 165 def theory(self): 166 # type: () -> np.ndarray 189 167 """ 190 168 Return the theory corresponding to the model parameters. … … 199 177 200 178 def residuals(self): 179 # type: () -> np.ndarray 201 180 """ 202 181 Return theory minus data normalized by uncertainty. … … 206 185 207 186 def nllf(self): 187 # type: () -> float 208 188 """ 209 189 Return the negative log likelihood of seeing data given the model … … 213 193 delta = self.residuals() 214 194 #if np.any(np.isnan(R)): print("NaN in residuals") 215 return 0.5 * np.sum(delta **2)195 return 0.5 * np.sum(delta**2) 216 196 217 197 #def __call__(self): … … 219 199 220 200 def plot(self, view='log'): 201 # type: (str) -> None 221 202 """ 222 203 Plot the data and residuals. … … 226 207 227 208 def simulate_data(self, noise=None): 209 # type: (float) -> None 228 210 """ 229 211 Generate simulated data. … … 233 215 234 216 def save(self, basename): 217 # type: (str) -> None 235 218 """ 236 219 Save the model parameters and data into a file. … … 243 226 244 227 def __getstate__(self): 228 # type: () -> Dict[str, Any] 245 229 # Can't pickle gpu functions, so instead make them lazy 246 230 state = self.__dict__.copy() … … 249 233 250 234 def __setstate__(self, state): 235 # type: (Dict[str, Any]) -> None 251 236 # pylint: disable=attribute-defined-outside-init 252 237 self.__dict__ = state 238 -
sasmodels/compare.py
r8749d5b9 rf2f67a6 34 34 import traceback 35 35 36 import numpy as np 36 import numpy as np # type: ignore 37 37 38 38 from . import core 39 39 from . import kerneldll 40 from . import product41 40 from .data import plot_theory, empty_data1D, empty_data2D 42 41 from .direct_model import DirectModel 43 42 from .convert import revert_name, revert_pars, constrain_new_to_old 43 44 try: 45 from typing import Optional, Dict, Any, Callable, Tuple 46 except: 47 pass 48 else: 49 from .modelinfo import ModelInfo, Parameter, ParameterSet 50 from .data import Data 51 Calculator = Callable[[float], np.ndarray] 44 52 45 53 USAGE = """ … … 98 106 kerneldll.ALLOW_SINGLE_PRECISION_DLLS = True 99 107 100 MODELS = core.list_models()101 102 108 # CRUFT python 2.6 103 109 if not hasattr(datetime.timedelta, 'total_seconds'): … … 161 167 ... print(randint(0,1000000,3)) 162 168 ... raise Exception() 163 ... except :169 ... except Exception: 164 170 ... print("Exception raised") 165 171 ... print(randint(0,1000000)) … … 170 176 """ 171 177 def __init__(self, seed=None): 178 # type: (Optional[int]) -> None 172 179 self._state = np.random.get_state() 173 180 np.random.seed(seed) 174 181 175 182 def __enter__(self): 176 return None 177 178 def __exit__(self, *args): 183 # type: () -> None 184 pass 185 186 def __exit__(self, type, value, traceback): 187 # type: (Any, BaseException, Any) -> None 188 # TODO: better typing for __exit__ method 179 189 np.random.set_state(self._state) 180 190 181 191 def tic(): 192 # type: () -> Callable[[], float] 182 193 """ 183 194 Timer function. … … 191 202 192 203 def set_beam_stop(data, radius, outer=None): 204 # type: (Data, float, float) -> None 193 205 """ 194 206 Add a beam stop of the given *radius*. If *outer*, make an annulus. 195 207 196 Note: this function does not use the sasview package208 Note: this function does not require sasview 197 209 """ 198 210 if hasattr(data, 'qx_data'): … … 208 220 209 221 def parameter_range(p, v): 222 # type: (str, float) -> Tuple[float, float] 210 223 """ 211 224 Choose a parameter range based on parameter name and initial value. 212 225 """ 226 # process the polydispersity options 213 227 if p.endswith('_pd_n'): 214 return [0, 100]228 return 0., 100. 215 229 elif p.endswith('_pd_nsigma'): 216 return [0, 5]230 return 0., 5. 217 231 elif p.endswith('_pd_type'): 218 r eturn v232 raise ValueError("Cannot return a range for a string value") 219 233 elif any(s in p for s in ('theta', 'phi', 'psi')): 220 234 # orientation in [-180,180], orientation pd in [0,45] 221 235 if p.endswith('_pd'): 222 return [0, 45]236 return 0., 45. 223 237 else: 224 return [-180, 180] 238 return -180., 180. 239 elif p.endswith('_pd'): 240 return 0., 1. 225 241 elif 'sld' in p: 226 return [-0.5, 10] 227 elif p.endswith('_pd'): 228 return [0, 1] 242 return -0.5, 10. 229 243 elif p == 'background': 230 return [0, 10]244 return 0., 10. 231 245 elif p == 'scale': 232 return [0, 1e3] 233 elif p == 'case_num': 234 # RPA hack 235 return [0, 10] 236 elif v < 0: 237 # Kxy parameters in rpa model can be negative 238 return [2*v, -2*v] 246 return 0., 1.e3 247 elif v < 0.: 248 return 2.*v, -2.*v 239 249 else: 240 return [0, (2*v if v > 0 else 1)] 241 242 243 def _randomize_one(p, v): 250 return 0., (2.*v if v > 0. else 1.) 251 252 253 def _randomize_one(model_info, p, v): 254 # type: (ModelInfo, str, float) -> float 255 # type: (ModelInfo, str, str) -> str 244 256 """ 245 257 Randomize a single parameter. … … 247 259 if any(p.endswith(s) for s in ('_pd_n', '_pd_nsigma', '_pd_type')): 248 260 return v 261 262 # Find the parameter definition 263 for par in model_info.parameters.call_parameters: 264 if par.name == p: 265 break 249 266 else: 250 return np.random.uniform(*parameter_range(p, v)) 251 252 253 def randomize_pars(pars, seed=None): 267 raise ValueError("unknown parameter %r"%p) 268 269 if len(par.limits) > 2: # choice list 270 return np.random.randint(len(par.limits)) 271 272 limits = par.limits 273 if not np.isfinite(limits).all(): 274 low, high = parameter_range(p, v) 275 limits = (max(limits[0], low), min(limits[1], high)) 276 277 return np.random.uniform(*limits) 278 279 280 def randomize_pars(model_info, pars, seed=None): 281 # type: (ModelInfo, ParameterSet, int) -> ParameterSet 254 282 """ 255 283 Generate random values for all of the parameters. … … 262 290 with push_seed(seed): 263 291 # Note: the sort guarantees order `of calls to random number generator 264 pars = dict((p, _randomize_one(p, v))265 for p, v in sorted(pars.items()))266 return pars292 random_pars = dict((p, _randomize_one(model_info, p, v)) 293 for p, v in sorted(pars.items())) 294 return random_pars 267 295 268 296 def constrain_pars(model_info, pars): 297 # type: (ModelInfo, ParameterSet) -> None 269 298 """ 270 299 Restrict parameters to valid values. … … 273 302 which need to support within model constraints (cap radius more than 274 303 cylinder radius in this case). 275 """ 276 name = model_info['id'] 304 305 Warning: this updates the *pars* dictionary in place. 306 """ 307 name = model_info.id 277 308 # if it is a product model, then just look at the form factor since 278 309 # none of the structure factors need any constraints. … … 295 326 # Make sure phi sums to 1.0 296 327 if pars['case_num'] < 2: 297 pars['Phi a'] = 0.298 pars['Phi b'] = 0.328 pars['Phi1'] = 0. 329 pars['Phi2'] = 0. 299 330 elif pars['case_num'] < 5: 300 pars['Phi a'] = 0.301 total = sum(pars['Phi'+c] for c in ' abcd')302 for c in ' abcd':331 pars['Phi1'] = 0. 332 total = sum(pars['Phi'+c] for c in '1234') 333 for c in '1234': 303 334 pars['Phi'+c] /= total 304 335 305 336 def parlist(model_info, pars, is2d): 337 # type: (ModelInfo, ParameterSet, bool) -> str 306 338 """ 307 339 Format the parameter list for printing. 308 340 """ 309 if is2d:310 exclude = lambda n: False311 else:312 partype = model_info['partype']313 par1d = set(partype['fixed-1d']+partype['pd-1d'])314 exclude = lambda n: n not in par1d315 341 lines = [] 316 for p in model_info['parameters']:317 if exclude(p.name): continue342 parameters = model_info.parameters 343 for p in parameters.user_parameters(pars, is2d): 318 344 fields = dict( 319 value=pars.get(p.name, p.default), 320 pd=pars.get(p.name+"_pd", 0.), 321 n=int(pars.get(p.name+"_pd_n", 0)), 322 nsigma=pars.get(p.name+"_pd_nsgima", 3.), 323 type=pars.get(p.name+"_pd_type", 'gaussian')) 345 value=pars.get(p.id, p.default), 346 pd=pars.get(p.id+"_pd", 0.), 347 n=int(pars.get(p.id+"_pd_n", 0)), 348 nsigma=pars.get(p.id+"_pd_nsgima", 3.), 349 pdtype=pars.get(p.id+"_pd_type", 'gaussian'), 350 ) 324 351 lines.append(_format_par(p.name, **fields)) 325 352 return "\n".join(lines) … … 327 354 #return "\n".join("%s: %s"%(p, v) for p, v in sorted(pars.items())) 328 355 329 def _format_par(name, value=0., pd=0., n=0, nsigma=3., type='gaussian'): 356 def _format_par(name, value=0., pd=0., n=0, nsigma=3., pdtype='gaussian'): 357 # type: (str, float, float, int, float, str) -> str 330 358 line = "%s: %g"%(name, value) 331 359 if pd != 0. and n != 0: 332 360 line += " +/- %g (%d points in [-%g,%g] sigma %s)"\ 333 % (pd, n, nsigma, nsigma, type)361 % (pd, n, nsigma, nsigma, pdtype) 334 362 return line 335 363 336 364 def suppress_pd(pars): 365 # type: (ParameterSet) -> ParameterSet 337 366 """ 338 367 Suppress theta_pd for now until the normalization is resolved. … … 347 376 348 377 def eval_sasview(model_info, data): 378 # type: (Modelinfo, Data) -> Calculator 349 379 """ 350 380 Return a model calculator using the pre-4.0 SasView models. … … 353 383 # import rather than the more obscure smear_selection not imported error 354 384 import sas 385 import sas.models 355 386 from sas.models.qsmearing import smear_selection 387 from sas.models.MultiplicationModel import MultiplicationModel 356 388 357 389 def get_model(name): 390 # type: (str) -> "sas.models.BaseComponent" 358 391 #print("new",sorted(_pars.items())) 359 sas =__import__('sas.models.' + name)392 __import__('sas.models.' + name) 360 393 ModelClass = getattr(getattr(sas.models, name, None), name, None) 361 394 if ModelClass is None: … … 364 397 365 398 # grab the sasview model, or create it if it is a product model 366 if model_info ['composition']:367 composition_type, parts = model_info ['composition']399 if model_info.composition: 400 composition_type, parts = model_info.composition 368 401 if composition_type == 'product': 369 from sas.models.MultiplicationModel import MultiplicationModel370 402 P, S = [get_model(revert_name(p)) for p in parts] 371 403 model = MultiplicationModel(P, S) … … 395 427 396 428 def calculator(**pars): 429 # type: (float, ...) -> np.ndarray 397 430 """ 398 431 Sasview calculator for model. … … 401 434 pars = revert_pars(model_info, pars) 402 435 for k, v in pars.items(): 403 parts= k.split('.') # polydispersity components404 if len( parts) == 2:405 model.dispersion[ parts[0]][parts[1]] = v436 name_attr = k.split('.') # polydispersity components 437 if len(name_attr) == 2: 438 model.dispersion[name_attr[0]][name_attr[1]] = v 406 439 else: 407 440 model.setParam(k, v) … … 423 456 } 424 457 def eval_opencl(model_info, data, dtype='single', cutoff=0.): 458 # type: (ModelInfo, Data, str, float) -> Calculator 425 459 """ 426 460 Return a model calculator using the OpenCL calculation engine. … … 437 471 438 472 def eval_ctypes(model_info, data, dtype='double', cutoff=0.): 473 # type: (ModelInfo, Data, str, float) -> Calculator 439 474 """ 440 475 Return a model calculator using the DLL calculation engine. 441 476 """ 442 if dtype == 'quad':443 dtype = 'longdouble'444 477 model = core.build_model(model_info, dtype=dtype, platform="dll") 445 478 calculator = DirectModel(data, model, cutoff=cutoff) … … 448 481 449 482 def time_calculation(calculator, pars, Nevals=1): 483 # type: (Calculator, ParameterSet, int) -> Tuple[np.ndarray, float] 450 484 """ 451 485 Compute the average calculation time over N evaluations. … … 455 489 """ 456 490 # initialize the code so time is more accurate 457 value = calculator(**suppress_pd(pars)) 491 if Nevals > 1: 492 calculator(**suppress_pd(pars)) 458 493 toc = tic() 459 for _ in range(max(Nevals, 1)): # make sure there is at least one eval 494 # make sure there is at least one eval 495 value = calculator(**pars) 496 for _ in range(Nevals-1): 460 497 value = calculator(**pars) 461 498 average_time = toc()*1000./Nevals 499 #print("I(q)",value) 462 500 return value, average_time 463 501 464 502 def make_data(opts): 503 # type: (Dict[str, Any]) -> Tuple[Data, np.ndarray] 465 504 """ 466 505 Generate an empty dataset, used with the model to set Q points … … 472 511 qmax, nq, res = opts['qmax'], opts['nq'], opts['res'] 473 512 if opts['is2d']: 474 data = empty_data2D(np.linspace(-qmax, qmax, nq), resolution=res) 513 q = np.linspace(-qmax, qmax, nq) # type: np.ndarray 514 data = empty_data2D(q, resolution=res) 475 515 data.accuracy = opts['accuracy'] 476 516 set_beam_stop(data, 0.0004) … … 489 529 490 530 def make_engine(model_info, data, dtype, cutoff): 531 # type: (ModelInfo, Data, str, float) -> Calculator 491 532 """ 492 533 Generate the appropriate calculation engine for the given datatype. … … 503 544 504 545 def _show_invalid(data, theory): 546 # type: (Data, np.ma.ndarray) -> None 547 """ 548 Display a list of the non-finite values in theory. 549 """ 505 550 if not theory.mask.any(): 506 551 return … … 508 553 if hasattr(data, 'x'): 509 554 bad = zip(data.x[theory.mask], theory[theory.mask]) 510 print(" *** ", ", ".join("I(%g)=%g"%(x, y) for x, y in bad))555 print(" *** ", ", ".join("I(%g)=%g"%(x, y) for x, y in bad)) 511 556 512 557 513 558 def compare(opts, limits=None): 559 # type: (Dict[str, Any], Optional[Tuple[float, float]]) -> Tuple[float, float] 514 560 """ 515 561 Preform a comparison using options from the command line. … … 526 572 data = opts['data'] 527 573 574 # silence the linter 575 base = opts['engines'][0] if Nbase else None 576 comp = opts['engines'][1] if Ncomp else None 577 base_time = comp_time = None 578 base_value = comp_value = resid = relerr = None 579 528 580 # Base calculation 529 581 if Nbase > 0: 530 base = opts['engines'][0]531 582 try: 532 base_ value, base_time = time_calculation(base, pars, Nbase)533 base_value = np.ma.masked_invalid(base_ value)583 base_raw, base_time = time_calculation(base, pars, Nbase) 584 base_value = np.ma.masked_invalid(base_raw) 534 585 print("%s t=%.2f ms, intensity=%.0f" 535 586 % (base.engine, base_time, base_value.sum())) … … 541 592 # Comparison calculation 542 593 if Ncomp > 0: 543 comp = opts['engines'][1]544 594 try: 545 comp_ value, comp_time = time_calculation(comp, pars, Ncomp)546 comp_value = np.ma.masked_invalid(comp_ value)595 comp_raw, comp_time = time_calculation(comp, pars, Ncomp) 596 comp_value = np.ma.masked_invalid(comp_raw) 547 597 print("%s t=%.2f ms, intensity=%.0f" 548 598 % (comp.engine, comp_time, comp_value.sum())) … … 555 605 if Nbase > 0 and Ncomp > 0: 556 606 resid = (base_value - comp_value) 557 relerr = resid/ comp_value607 relerr = resid/np.where(comp_value!=0., abs(comp_value), 1.0) 558 608 _print_stats("|%s-%s|" 559 609 % (base.engine, comp.engine) + (" "*(3+len(comp.engine))), … … 619 669 620 670 def _print_stats(label, err): 671 # type: (str, np.ma.ndarray) -> None 672 # work with trimmed data, not the full set 621 673 sorted_err = np.sort(abs(err.compressed())) 622 p50 = int((len( err)-1)*0.50)623 p98 = int((len( err)-1)*0.98)674 p50 = int((len(sorted_err)-1)*0.50) 675 p98 = int((len(sorted_err)-1)*0.98) 624 676 data = [ 625 677 "max:%.3e"%sorted_err[-1], 626 678 "median:%.3e"%sorted_err[p50], 627 679 "98%%:%.3e"%sorted_err[p98], 628 "rms:%.3e"%np.sqrt(np.mean( err**2)),629 "zero-offset:%+.3e"%np.mean( err),680 "rms:%.3e"%np.sqrt(np.mean(sorted_err**2)), 681 "zero-offset:%+.3e"%np.mean(sorted_err), 630 682 ] 631 683 print(label+" "+" ".join(data)) … … 656 708 657 709 def columnize(L, indent="", width=79): 710 # type: (List[str], str, int) -> str 658 711 """ 659 712 Format a list of strings into columns. … … 673 726 674 727 def get_pars(model_info, use_demo=False): 728 # type: (ModelInfo, bool) -> ParameterSet 675 729 """ 676 730 Extract demo parameters from the model definition. 677 731 """ 678 732 # Get the default values for the parameters 679 pars = dict((p.name, p.default) for p in model_info['parameters']) 680 681 # Fill in default values for the polydispersity parameters 682 for p in model_info['parameters']: 683 if p.type in ('volume', 'orientation'): 684 pars[p.name+'_pd'] = 0.0 685 pars[p.name+'_pd_n'] = 0 686 pars[p.name+'_pd_nsigma'] = 3.0 687 pars[p.name+'_pd_type'] = "gaussian" 733 pars = {} 734 for p in model_info.parameters.call_parameters: 735 parts = [('', p.default)] 736 if p.polydisperse: 737 parts.append(('_pd', 0.0)) 738 parts.append(('_pd_n', 0)) 739 parts.append(('_pd_nsigma', 3.0)) 740 parts.append(('_pd_type', "gaussian")) 741 for ext, val in parts: 742 if p.length > 1: 743 dict(("%s%d%s"%(p.id,k,ext), val) for k in range(p.length)) 744 else: 745 pars[p.id+ext] = val 688 746 689 747 # Plug in values given in demo 690 748 if use_demo: 691 pars.update(model_info ['demo'])749 pars.update(model_info.demo) 692 750 return pars 693 751 694 752 695 753 def parse_opts(): 754 # type: () -> Dict[str, Any] 696 755 """ 697 756 Parse command line options. … … 768 827 elif arg.startswith('-cutoff='): opts['cutoff'] = float(arg[8:]) 769 828 elif arg.startswith('-random='): opts['seed'] = int(arg[8:]) 770 elif arg == '-random': opts['seed'] = np.random.randint(1 e6)829 elif arg == '-random': opts['seed'] = np.random.randint(1000000) 771 830 elif arg == '-preset': opts['seed'] = -1 772 831 elif arg == '-mono': opts['mono'] = True … … 792 851 793 852 if len(engines) == 0: 794 engines.extend(['single', ' sasview'])853 engines.extend(['single', 'double']) 795 854 elif len(engines) == 1: 796 if engines[0][0] != ' sasview':797 engines.append(' sasview')855 if engines[0][0] != 'double': 856 engines.append('double') 798 857 else: 799 858 engines.append('single') … … 825 884 #pars.update(set_pars) # set value before random to control range 826 885 if opts['seed'] > -1: 827 pars = randomize_pars( pars, seed=opts['seed'])886 pars = randomize_pars(model_info, pars, seed=opts['seed']) 828 887 print("Randomize using -random=%i"%opts['seed']) 829 888 if opts['mono']: … … 865 924 866 925 def explore(opts): 926 # type: (Dict[str, Any]) -> None 867 927 """ 868 928 Explore the model using the Bumps GUI. 869 929 """ 870 import wx 871 from bumps.names import FitProblem 872 from bumps.gui.app_frame import AppFrame 930 import wx # type: ignore 931 from bumps.names import FitProblem # type: ignore 932 from bumps.gui.app_frame import AppFrame # type: ignore 873 933 874 934 problem = FitProblem(Explore(opts)) … … 891 951 """ 892 952 def __init__(self, opts): 893 from bumps.cli import config_matplotlib 953 # type: (Dict[str, Any]) -> None 954 from bumps.cli import config_matplotlib # type: ignore 894 955 from . import bumps_model 895 956 config_matplotlib() … … 897 958 model_info = opts['def'] 898 959 pars, pd_types = bumps_model.create_parameters(model_info, **opts['pars']) 960 # Initialize parameter ranges, fixing the 2D parameters for 1D data. 899 961 if not opts['is2d']: 900 active = [base + ext 901 for base in model_info['partype']['pd-1d'] 902 for ext in ['', '_pd', '_pd_n', '_pd_nsigma']] 903 active.extend(model_info['partype']['fixed-1d']) 904 for k in active: 905 v = pars[k] 906 v.range(*parameter_range(k, v.value)) 962 for p in model_info.parameters.user_parameters(is2d=False): 963 for ext in ['', '_pd', '_pd_n', '_pd_nsigma']: 964 k = p.name+ext 965 v = pars.get(k, None) 966 if v is not None: 967 v.range(*parameter_range(k, v.value)) 907 968 else: 908 969 for k, v in pars.items(): … … 914 975 915 976 def numpoints(self): 977 # type: () -> int 916 978 """ 917 979 Return the number of points. … … 920 982 921 983 def parameters(self): 984 # type: () -> Any # Dict/List hierarchy of parameters 922 985 """ 923 986 Return a dictionary of parameters. … … 926 989 927 990 def nllf(self): 991 # type: () -> float 928 992 """ 929 993 Return cost. … … 933 997 934 998 def plot(self, view='log'): 999 # type: (str) -> None 935 1000 """ 936 1001 Plot the data and residuals. … … 942 1007 if self.limits is None: 943 1008 vmin, vmax = limits 944 vmax = 1.3*vmax 945 vmin = vmax*1e-7 946 self.limits = vmin, vmax 1009 self.limits = vmax*1e-7, 1.3*vmax 947 1010 948 1011 949 1012 def main(): 1013 # type: () -> None 950 1014 """ 951 1015 Main program. -
sasmodels/compare_many.py
rce346b6 r7ae2b7f 17 17 import traceback 18 18 19 import numpy as np 19 import numpy as np # type: ignore 20 20 21 21 from . import core 22 from . import generate23 22 from .compare import (MODELS, randomize_pars, suppress_pd, make_data, 24 23 make_engine, get_pars, columnize, … … 109 108 110 109 if is_2d: 111 if not model_info[' has_2d']:110 if not model_info['parameters'].has_2d: 112 111 print(',"1-D only"') 113 112 return … … 125 124 try: 126 125 result = fn(**pars) 127 except KeyboardInterrupt: 128 raise 129 except: 126 except Exception: 130 127 traceback.print_exc() 131 128 print("when comparing %s for %d"%(name, seed)) … … 246 243 base = sys.argv[5] 247 244 comp = sys.argv[6] if len(sys.argv) > 6 else "sasview" 248 except :245 except Exception: 249 246 traceback.print_exc() 250 247 print_usage() -
sasmodels/convert.json
rec45c4f r6e7ff6d 602 602 "RPAModel", 603 603 { 604 "N1": "Na", "N2": "Nb", "N3": "Nc", "N4": "Nd", 605 "L1": "La", "L2": "Lb", "L3": "Lc", "L4": "Ld", 606 "v1": "va", "v2": "vb", "v3": "vc", "v4": "vd", 607 "b1": "ba", "b2": "bb", "b3": "bc", "b4": "bd", 608 "Phi1": "Phia", "Phi2": "Phib", "Phi3": "Phic", "Phi4": "Phid", 604 609 "case_num": "lcase_n" 605 610 } … … 620 625 } 621 626 ], 627 "_spherepy": [ 628 "SphereModel", 629 { 630 "sld": "sldSph", 631 "radius": "radius", 632 "sld_solvent": "sldSolv" 633 } 634 ], 622 635 "spherical_sld": [ 623 636 "SphereSLDModel", 624 637 { 638 "n": "n_shells", 625 639 "radius_core": "rad_core", 626 640 "sld_solvent": "sld_solv" -
sasmodels/convert.py
rf247314 r7ae2b7f 62 62 # but only if we haven't already byteified it 63 63 if isinstance(data, dict) and not ignore_dicts: 64 return { 65 _byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True) 66 for key, value in data.iteritems() 67 } 64 return dict((_byteify(key, ignore_dicts=True), 65 _byteify(value, ignore_dicts=True)) 66 for key, value in data.items()) 68 67 # if it's anything else, return it in its original form 69 68 return data … … 153 152 def revert_name(model_info): 154 153 _read_conversion_table() 155 oldname, oldpars = CONVERSION_TABLE.get(model_info ['id'], [None, {}])154 oldname, oldpars = CONVERSION_TABLE.get(model_info.id, [None, {}]) 156 155 return oldname 157 156 158 157 def _get_old_pars(model_info): 159 158 _read_conversion_table() 160 oldname, oldpars = CONVERSION_TABLE.get(model_info ['id'], [None, {}])159 oldname, oldpars = CONVERSION_TABLE.get(model_info.id, [None, {}]) 161 160 return oldpars 162 161 … … 165 164 Convert model from new style parameter names to old style. 166 165 """ 167 if model_info ['composition']is not None:168 composition_type, parts = model_info ['composition']166 if model_info.composition is not None: 167 composition_type, parts = model_info.composition 169 168 if composition_type == 'product': 170 169 P, S = parts … … 180 179 181 180 # Note: update compare.constrain_pars to match 182 name = model_info ['id']183 if name in MODELS_WITHOUT_SCALE or model_info ['structure_factor']:181 name = model_info.id 182 if name in MODELS_WITHOUT_SCALE or model_info.structure_factor: 184 183 if oldpars.pop('scale', 1.0) != 1.0: 185 184 warnings.warn("parameter scale not used in sasview %s"%name) 186 if name in MODELS_WITHOUT_BACKGROUND or model_info ['structure_factor']:185 if name in MODELS_WITHOUT_BACKGROUND or model_info.structure_factor: 187 186 if oldpars.pop('background', 0.0) != 0.0: 188 187 warnings.warn("parameter background not used in sasview %s"%name) … … 206 205 elif name == 'rpa': 207 206 # convert scattering lengths from femtometers to centimeters 208 for p in "L a", "Lb", "Lc", "Ld":207 for p in "L1", "L2", "L3", "L4": 209 208 if p in oldpars: oldpars[p] *= 1e-13 210 209 elif name == 'core_shell_parallelepiped': … … 225 224 Restrict parameter values to those that will match sasview. 226 225 """ 227 name = model_info ['id']226 name = model_info.id 228 227 # Note: update convert.revert_model to match 229 if name in MODELS_WITHOUT_SCALE or model_info ['structure_factor']:228 if name in MODELS_WITHOUT_SCALE or model_info.structure_factor: 230 229 pars['scale'] = 1 231 if name in MODELS_WITHOUT_BACKGROUND or model_info ['structure_factor']:230 if name in MODELS_WITHOUT_BACKGROUND or model_info.structure_factor: 232 231 pars['background'] = 0 233 232 # sasview multiplies background by structure factor -
sasmodels/core.py
r4d76711 rcebbb5a 2 2 Core model handling routines. 3 3 """ 4 from __future__ import print_function 5 4 6 __all__ = [ 5 "list_models", "load_model _info", "precompile_dll",6 "build_model", " make_kernel", "call_kernel", "call_ER_VR",7 "list_models", "load_model", "load_model_info", 8 "build_model", "precompile_dll", 7 9 ] 8 10 9 from os.path import basename, dirname, join as joinpath , splitext11 from os.path import basename, dirname, join as joinpath 10 12 from glob import glob 11 13 12 import numpy as np 14 import numpy as np # type: ignore 13 15 14 from . import models15 from . import weights16 16 from . import generate 17 # TODO: remove circular references between product and core 18 # product uses call_ER/call_VR, core uses make_product_info/ProductModel 19 #from . import product 17 from . import modelinfo 18 from . import product 20 19 from . import mixture 21 20 from . import kernelpy … … 24 23 from . import kernelcl 25 24 HAVE_OPENCL = True 26 except :25 except Exception: 27 26 HAVE_OPENCL = False 28 27 29 28 try: 30 np.meshgrid([]) 31 meshgrid = np.meshgrid 32 except ValueError: 33 # CRUFT: np.meshgrid requires multiple vectors 34 def meshgrid(*args): 35 if len(args) > 1: 36 return np.meshgrid(*args) 37 else: 38 return [np.asarray(v) for v in args] 29 from typing import List, Union, Optional, Any 30 from .kernel import KernelModel 31 from .modelinfo import ModelInfo 32 except ImportError: 33 pass 34 39 35 40 36 # TODO: refactor composite model support … … 51 47 52 48 def list_models(): 49 # type: () -> List[str] 53 50 """ 54 51 Return the list of available models on the model path. … … 59 56 return available_models 60 57 61 def isstr(s): 62 """ 63 Return True if *s* is a string-like object. 64 """ 65 try: s + '' 66 except: return False 67 return True 68 69 def load_model(model_name, **kw): 58 def load_model(model_name, dtype=None, platform='ocl'): 59 # type: (str, str, str) -> KernelModel 70 60 """ 71 61 Load model info and build model. 62 63 *model_name* is the name of the model as used by :func:`load_model_info`. 64 Additional keyword arguments are passed directly to :func:`build_model`. 72 65 """ 73 return build_model(load_model_info(model_name), **kw) 66 return build_model(load_model_info(model_name), 67 dtype=dtype, platform=platform) 74 68 75 69 76 70 def load_model_info(model_name): 71 # type: (str) -> modelinfo.ModelInfo 77 72 """ 78 73 Load a model definition given the model name. … … 88 83 parts = model_name.split('*') 89 84 if len(parts) > 1: 90 from . import product91 # Note: currently have circular reference92 85 if len(parts) > 2: 93 86 raise ValueError("use P*S to apply structure factor S to model P") … … 96 89 97 90 kernel_module = generate.load_kernel_module(model_name) 98 return generate.make_model_info(kernel_module)91 return modelinfo.make_model_info(kernel_module) 99 92 100 93 101 94 def build_model(model_info, dtype=None, platform="ocl"): 95 # type: (modelinfo.ModelInfo, str, str) -> KernelModel 102 96 """ 103 97 Prepare the model for the default execution platform. … … 110 104 111 105 *dtype* indicates whether the model should use single or double precision 112 for the calculation. Any valid numpy single or double precision identifier 113 is valid, such as 'single', 'f', 'f32', or np.float32 for single, or 114 'double', 'd', 'f64' and np.float64 for double. If *None*, then use 115 'single' unless the model defines single=False. 106 for the calculation. Choices are 'single', 'double', 'quad', 'half', 107 or 'fast'. If *dtype* ends with '!', then force the use of the DLL rather 108 than OpenCL for the calculation. 116 109 117 110 *platform* should be "dll" to force the dll to be used for C models, 118 111 otherwise it uses the default "ocl". 119 112 """ 120 composition = model_info. get('composition', None)113 composition = model_info.composition 121 114 if composition is not None: 122 115 composition_type, parts = composition … … 131 124 raise ValueError('unknown mixture type %s'%composition_type) 132 125 126 # If it is a python model, return it immediately 127 if callable(model_info.Iq): 128 return kernelpy.PyModel(model_info) 129 133 130 ## for debugging: 134 131 ## 1. uncomment open().write so that the source will be saved next time … … 137 134 ## 4. rerun "python -m sasmodels.direct_model $MODELNAME" 138 135 ## 5. uncomment open().read() so that source will be regenerated from model 139 # open(model_info ['name']+'.c','w').write(source)140 # source = open(model_info ['name']+'.cl','r').read()136 # open(model_info.name+'.c','w').write(source) 137 # source = open(model_info.name+'.cl','r').read() 141 138 source = generate.make_source(model_info) 142 if dtype is None: 143 dtype = 'single' if model_info['single'] else 'double' 144 if callable(model_info.get('Iq', None)): 145 return kernelpy.PyModel(model_info) 139 numpy_dtype, fast = parse_dtype(model_info, dtype) 146 140 if (platform == "dll" 141 or (dtype is not None and dtype.endswith('!')) 147 142 or not HAVE_OPENCL 148 or not kernelcl.environment().has_type(dtype)): 149 return kerneldll.load_dll(source, model_info, dtype) 143 or not kernelcl.environment().has_type(numpy_dtype)): 144 #print("building dll", numpy_dtype) 145 return kerneldll.load_dll(source, model_info, numpy_dtype) 150 146 else: 151 return kernelcl.GpuModel(source, model_info, dtype) 147 #print("building ocl", numpy_dtype) 148 return kernelcl.GpuModel(source, model_info, numpy_dtype, fast=fast) 152 149 153 150 def precompile_dll(model_name, dtype="double"): 151 # type: (str, str) -> Optional[str] 154 152 """ 155 153 Precompile the dll for a model. … … 166 164 """ 167 165 model_info = load_model_info(model_name) 166 numpy_dtype, fast = parse_dtype(model_info, dtype) 168 167 source = generate.make_source(model_info) 169 return kerneldll.make_dll(source, model_info, dtype= dtype) if source else None168 return kerneldll.make_dll(source, model_info, dtype=numpy_dtype) if source else None 170 169 170 def parse_dtype(model_info, dtype): 171 # type: (ModelInfo, str) -> Tuple[np.dtype, bool] 172 """ 173 Interpret dtype string, returning np.dtype and fast flag. 171 174 172 def get_weights(model_info, pars, name): 175 Possible types include 'half', 'single', 'double' and 'quad'. If the 176 type is 'fast', then this is equivalent to dtype 'single' with the 177 fast flag set to True. 173 178 """ 174 Generate the distribution for parameter *name* given the parameter values 175 in *pars*. 179 # Fill in default type based on required precision in the model 180 if dtype is None: 181 dtype = 'single' if model_info.single else 'double' 176 182 177 Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma" 178 from the *pars* dictionary for parameter value and parameter dispersion. 179 """ 180 relative = name in model_info['partype']['pd-rel'] 181 limits = model_info['limits'][name] 182 disperser = pars.get(name+'_pd_type', 'gaussian') 183 value = pars.get(name, model_info['defaults'][name]) 184 npts = pars.get(name+'_pd_n', 0) 185 width = pars.get(name+'_pd', 0.0) 186 nsigma = pars.get(name+'_pd_nsigma', 3.0) 187 value, weight = weights.get_weights( 188 disperser, npts, width, nsigma, value, limits, relative) 189 return value, weight / np.sum(weight) 183 # Ignore platform indicator 184 if dtype.endswith('!'): 185 dtype = dtype[:-1] 190 186 191 def dispersion_mesh(pars): 192 """ 193 Create a mesh grid of dispersion parameters and weights. 187 # Convert type string to type 188 if dtype == 'quad': 189 return generate.F128, False 190 elif dtype == 'half': 191 return generate.F16, False 192 elif dtype == 'fast': 193 return generate.F32, True 194 else: 195 return np.dtype(dtype), False 194 196 195 Returns [p1,p2,...],w where pj is a vector of values for parameter j196 and w is a vector containing the products for weights for each197 parameter set in the vector.198 """199 value, weight = zip(*pars)200 value = [v.flatten() for v in meshgrid(*value)]201 weight = np.vstack([v.flatten() for v in meshgrid(*weight)])202 weight = np.prod(weight, axis=0)203 return value, weight204 205 def call_kernel(kernel, pars, cutoff=0, mono=False):206 """207 Call *kernel* returned from *model.make_kernel* with parameters *pars*.208 209 *cutoff* is the limiting value for the product of dispersion weights used210 to perform the multidimensional dispersion calculation more quickly at a211 slight cost to accuracy. The default value of *cutoff=0* integrates over212 the entire dispersion cube. Using *cutoff=1e-5* can be 50% faster, but213 with an error of about 1%, which is usually less than the measurement214 uncertainty.215 216 *mono* is True if polydispersity should be set to none on all parameters.217 """218 fixed_pars = [pars.get(name, kernel.info['defaults'][name])219 for name in kernel.fixed_pars]220 if mono:221 pd_pars = [( np.array([pars[name]]), np.array([1.0]) )222 for name in kernel.pd_pars]223 else:224 pd_pars = [get_weights(kernel.info, pars, name) for name in kernel.pd_pars]225 return kernel(fixed_pars, pd_pars, cutoff=cutoff)226 227 def call_ER_VR(model_info, vol_pars):228 """229 Return effect radius and volume ratio for the model.230 231 *info* is either *kernel.info* for *kernel=make_kernel(model,q)*232 or *model.info*.233 234 *pars* are the parameters as expected by :func:`call_kernel`.235 """236 ER = model_info.get('ER', None)237 VR = model_info.get('VR', None)238 value, weight = dispersion_mesh(vol_pars)239 240 individual_radii = ER(*value) if ER else 1.0241 whole, part = VR(*value) if VR else (1.0, 1.0)242 243 effect_radius = np.sum(weight*individual_radii) / np.sum(weight)244 volume_ratio = np.sum(weight*part)/np.sum(weight*whole)245 return effect_radius, volume_ratio246 247 248 def call_ER(model_info, values):249 """250 Call the model ER function using *values*. *model_info* is either251 *model.info* if you have a loaded model, or *kernel.info* if you252 have a model kernel prepared for evaluation.253 """254 ER = model_info.get('ER', None)255 if ER is None:256 return 1.0257 else:258 vol_pars = [get_weights(model_info, values, name)259 for name in model_info['partype']['volume']]260 value, weight = dispersion_mesh(vol_pars)261 individual_radii = ER(*value)262 #print(values[0].shape, weights.shape, fv.shape)263 return np.sum(weight*individual_radii) / np.sum(weight)264 265 def call_VR(model_info, values):266 """267 Call the model VR function using *pars*.268 *info* is either *model.info* if you have a loaded model, or *kernel.info*269 if you have a model kernel prepared for evaluation.270 """271 VR = model_info.get('VR', None)272 if VR is None:273 return 1.0274 else:275 vol_pars = [get_weights(model_info, values, name)276 for name in model_info['partype']['volume']]277 value, weight = dispersion_mesh(vol_pars)278 whole, part = VR(*value)279 return np.sum(weight*part)/np.sum(weight*whole)280 281 # TODO: remove call_ER, call_VR282 -
sasmodels/custom/__init__.py
rcd0a808 r7ae2b7f 14 14 try: 15 15 # Python 3.5 and up 16 from importlib.util import spec_from_file_location, module_from_spec 16 from importlib.util import spec_from_file_location, module_from_spec # type: ignore 17 17 def load_module_from_path(fullname, path): 18 18 spec = spec_from_file_location(fullname, path) -
sasmodels/data.py
re78edc4 rb151003 35 35 import traceback 36 36 37 import numpy as np 37 import numpy as np # type: ignore 38 39 try: 40 from typing import Union, Dict, List, Optional 41 except ImportError: 42 pass 43 else: 44 Data = Union["Data1D", "Data2D", "SesansData"] 38 45 39 46 def load_data(filename): 47 # type: (str) -> Data 40 48 """ 41 49 Load data using a sasview loader. 42 50 """ 43 from sas.sascalc.dataloader.loader import Loader 51 from sas.sascalc.dataloader.loader import Loader # type: ignore 44 52 loader = Loader() 45 53 data = loader.load(filename) … … 50 58 51 59 def set_beam_stop(data, radius, outer=None): 60 # type: (Data, float, Optional[float]) -> None 52 61 """ 53 62 Add a beam stop of the given *radius*. If *outer*, make an annulus. 54 63 """ 55 from sas.dataloader.manipulations import Ringcut 64 from sas.dataloader.manipulations import Ringcut # type: ignore 56 65 if hasattr(data, 'qx_data'): 57 66 data.mask = Ringcut(0, radius)(data) … … 65 74 66 75 def set_half(data, half): 76 # type: (Data, str) -> None 67 77 """ 68 78 Select half of the data, either "right" or "left". 69 79 """ 70 from sas.dataloader.manipulations import Boxcut 80 from sas.dataloader.manipulations import Boxcut # type: ignore 71 81 if half == 'right': 72 82 data.mask += \ … … 78 88 79 89 def set_top(data, cutoff): 90 # type: (Data, float) -> None 80 91 """ 81 92 Chop the top off the data, above *cutoff*. 82 93 """ 83 from sas.dataloader.manipulations import Boxcut 94 from sas.dataloader.manipulations import Boxcut # type: ignore 84 95 data.mask += \ 85 96 Boxcut(x_min=-np.inf, x_max=np.inf, y_min=-np.inf, y_max=cutoff)(data) … … 114 125 """ 115 126 def __init__(self, x=None, y=None, dx=None, dy=None): 127 # type: (Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray]) -> None 116 128 self.x, self.y, self.dx, self.dy = x, y, dx, dy 117 129 self.dxl = None … … 127 139 128 140 def xaxis(self, label, unit): 141 # type: (str, str) -> None 129 142 """ 130 143 set the x axis label and unit … … 134 147 135 148 def yaxis(self, label, unit): 149 # type: (str, str) -> None 136 150 """ 137 151 set the y axis label and unit … … 140 154 self._yunit = unit 141 155 142 156 class SesansData(Data1D): 157 def __init__(self, **kw): 158 Data1D.__init__(self, **kw) 159 self.lam = None # type: Optional[np.ndarray] 143 160 144 161 class Data2D(object): … … 175 192 """ 176 193 def __init__(self, x=None, y=None, z=None, dx=None, dy=None, dz=None): 194 # type: (Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray]) -> None 177 195 self.qx_data, self.dqx_data = x, dx 178 196 self.qy_data, self.dqy_data = y, dy … … 197 215 198 216 def xaxis(self, label, unit): 217 # type: (str, str) -> None 199 218 """ 200 219 set the x axis label and unit … … 204 223 205 224 def yaxis(self, label, unit): 225 # type: (str, str) -> None 206 226 """ 207 227 set the y axis label and unit … … 211 231 212 232 def zaxis(self, label, unit): 233 # type: (str, str) -> None 213 234 """ 214 235 set the y axis label and unit … … 223 244 """ 224 245 def __init__(self, x=None, y=None, z=None): 246 # type: (float, float, Optional[float]) -> None 225 247 self.x, self.y, self.z = x, y, z 226 248 … … 230 252 """ 231 253 def __init__(self, pixel_size=(None, None), distance=None): 254 # type: (Tuple[float, float], float) -> None 232 255 self.pixel_size = Vector(*pixel_size) 233 256 self.distance = distance … … 238 261 """ 239 262 def __init__(self): 263 # type: () -> None 240 264 self.wavelength = np.NaN 241 265 self.wavelength_unit = "A" … … 243 267 244 268 def empty_data1D(q, resolution=0.0): 269 # type: (np.ndarray, float) -> Data1D 245 270 """ 246 271 Create empty 1D data using the given *q* as the x value. … … 259 284 260 285 def empty_data2D(qx, qy=None, resolution=0.0): 286 # type: (np.ndarray, Optional[np.ndarray], float) -> Data2D 261 287 """ 262 288 Create empty 2D data using the given mesh. … … 272 298 Qx, Qy = np.meshgrid(qx, qy) 273 299 Qx, Qy = Qx.flatten(), Qy.flatten() 274 Iq = 100 * np.ones_like(Qx) 300 Iq = 100 * np.ones_like(Qx) # type: np.ndarray 275 301 dIq = np.sqrt(Iq) 276 302 if resolution != 0: … … 300 326 301 327 def plot_data(data, view='log', limits=None): 328 # type: (Data, str, Optional[Tuple[float, float]]) -> None 302 329 """ 303 330 Plot data loaded by the sasview loader. … … 323 350 def plot_theory(data, theory, resid=None, view='log', 324 351 use_data=True, limits=None, Iq_calc=None): 352 # type: (Data, Optional[np.ndarray], Optional[np.ndarray], str, bool, Optional[Tuple[float,float]], Optional[np.ndarray]) -> None 325 353 """ 326 354 Plot theory calculation. … … 337 365 *limits* sets the intensity limits on the plot; if None then the limits 338 366 are inferred from the data. 367 368 *Iq_calc* is the raw theory values without resolution smearing 339 369 """ 340 370 if hasattr(data, 'lam'): … … 348 378 349 379 def protect(fn): 380 # type: (Callable) -> Callable 350 381 """ 351 382 Decorator to wrap calls in an exception trapper which prints the … … 358 389 try: 359 390 return fn(*args, **kw) 360 except KeyboardInterrupt: 361 raise 362 except: 391 except Exception: 363 392 traceback.print_exc() 364 393 … … 369 398 def _plot_result1D(data, theory, resid, view, use_data, 370 399 limits=None, Iq_calc=None): 400 # type: (Data1D, Optional[np.ndarray], Optional[np.ndarray], str, bool, Optional[Tuple[float, float]], Optional[np.ndarray]) -> None 371 401 """ 372 402 Plot the data and residuals for 1D data. 373 403 """ 374 import matplotlib.pyplot as plt 375 from numpy.ma import masked_array, masked 404 import matplotlib.pyplot as plt # type: ignore 405 from numpy.ma import masked_array, masked # type: ignore 376 406 377 407 use_data = use_data and data.y is not None … … 448 478 @protect 449 479 def _plot_result_sesans(data, theory, resid, use_data, limits=None): 480 # type: (SesansData, Optional[np.ndarray], Optional[np.ndarray], bool, Optional[Tuple[float, float]]) -> None 450 481 """ 451 482 Plot SESANS results. 452 483 """ 453 import matplotlib.pyplot as plt 484 import matplotlib.pyplot as plt # type: ignore 454 485 use_data = use_data and data.y is not None 455 486 use_theory = theory is not None … … 458 489 459 490 if use_data or use_theory: 460 is_tof = np.any(data.lam!=data.lam[0])491 is_tof = (data.lam != data.lam[0]).any() 461 492 if num_plots > 1: 462 493 plt.subplot(1, num_plots, 1) 463 494 if use_data: 464 495 if is_tof: 465 plt.errorbar(data.x, np.log(data.y)/(data.lam*data.lam), yerr=data.dy/data.y/(data.lam*data.lam)) 496 plt.errorbar(data.x, np.log(data.y)/(data.lam*data.lam), 497 yerr=data.dy/data.y/(data.lam*data.lam)) 466 498 else: 467 499 plt.errorbar(data.x, data.y, yerr=data.dy) … … 491 523 @protect 492 524 def _plot_result2D(data, theory, resid, view, use_data, limits=None): 525 # type: (Data2D, Optional[np.ndarray], Optional[np.ndarray], str, bool, Optional[Tuple[float,float]]) -> None 493 526 """ 494 527 Plot the data and residuals for 2D data. 495 528 """ 496 import matplotlib.pyplot as plt 529 import matplotlib.pyplot as plt # type: ignore 497 530 use_data = use_data and data.data is not None 498 531 use_theory = theory is not None … … 502 535 # Put theory and data on a common colormap scale 503 536 vmin, vmax = np.inf, -np.inf 537 target = None # type: Optional[np.ndarray] 504 538 if use_data: 505 539 target = data.data[~data.mask] … … 550 584 @protect 551 585 def _plot_2d_signal(data, signal, vmin=None, vmax=None, view='log'): 586 # type: (Data2D, np.ndarray, Optional[float], Optional[float], str) -> Tuple[float, float] 552 587 """ 553 588 Plot the target value for the data. This could be the data itself, … … 556 591 *scale* can be 'log' for log scale data, or 'linear'. 557 592 """ 558 import matplotlib.pyplot as plt 559 from numpy.ma import masked_array 593 import matplotlib.pyplot as plt # type: ignore 594 from numpy.ma import masked_array # type: ignore 560 595 561 596 image = np.zeros_like(data.qx_data) … … 591 626 592 627 def demo(): 628 # type: () -> None 593 629 """ 594 630 Load and plot a SAS dataset. … … 597 633 set_beam_stop(data, 0.004) 598 634 plot_data(data) 599 import matplotlib.pyplot as plt; plt.show() 635 import matplotlib.pyplot as plt # type: ignore 636 plt.show() 600 637 601 638 -
sasmodels/direct_model.py
r4d76711 r0ff62d4 23 23 from __future__ import print_function 24 24 25 import numpy as np 26 27 from .core import call_kernel, call_ER_VR 28 from . import sesans 25 import numpy as np # type: ignore 26 27 # TODO: fix sesans module 28 from . import sesans # type: ignore 29 from . import weights 29 30 from . import resolution 30 31 from . import resolution2d 32 from . import kernel 33 34 try: 35 from typing import Optional, Dict, Tuple 36 except ImportError: 37 pass 38 else: 39 from .data import Data 40 from .kernel import Kernel, KernelModel 41 from .modelinfo import Parameter, ParameterSet 42 43 def call_kernel(calculator, pars, cutoff=0., mono=False): 44 # type: (Kernel, ParameterSet, float, bool) -> np.ndarray 45 """ 46 Call *kernel* returned from *model.make_kernel* with parameters *pars*. 47 48 *cutoff* is the limiting value for the product of dispersion weights used 49 to perform the multidimensional dispersion calculation more quickly at a 50 slight cost to accuracy. The default value of *cutoff=0* integrates over 51 the entire dispersion cube. Using *cutoff=1e-5* can be 50% faster, but 52 with an error of about 1%, which is usually less than the measurement 53 uncertainty. 54 55 *mono* is True if polydispersity should be set to none on all parameters. 56 """ 57 parameters = calculator.info.parameters 58 if mono: 59 active = lambda name: False 60 elif calculator.dim == '1d': 61 active = lambda name: name in parameters.pd_1d 62 elif calculator.dim == '2d': 63 active = lambda name: name in parameters.pd_2d 64 else: 65 active = lambda name: True 66 67 vw_pairs = [(get_weights(p, pars) if active(p.name) 68 else ([pars.get(p.name, p.default)], [])) 69 for p in parameters.call_parameters] 70 71 call_details, weights, values = kernel.build_details(calculator, vw_pairs) 72 return calculator(call_details, weights, values, cutoff) 73 74 def get_weights(parameter, values): 75 # type: (Parameter, Dict[str, float]) -> Tuple[np.ndarray, np.ndarray] 76 """ 77 Generate the distribution for parameter *name* given the parameter values 78 in *pars*. 79 80 Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma" 81 from the *pars* dictionary for parameter value and parameter dispersion. 82 """ 83 value = float(values.get(parameter.name, parameter.default)) 84 relative = parameter.relative_pd 85 limits = parameter.limits 86 disperser = values.get(parameter.name+'_pd_type', 'gaussian') 87 npts = values.get(parameter.name+'_pd_n', 0) 88 width = values.get(parameter.name+'_pd', 0.0) 89 nsigma = values.get(parameter.name+'_pd_nsigma', 3.0) 90 if npts == 0 or width == 0: 91 return [value], [] 92 value, weight = weights.get_weights( 93 disperser, npts, width, nsigma, value, limits, relative) 94 return value, weight / np.sum(weight) 31 95 32 96 class DataMixin(object): … … 52 116 """ 53 117 def _interpret_data(self, data, model): 118 # type: (Data, KernelModel) -> None 54 119 # pylint: disable=attribute-defined-outside-init 55 120 … … 67 132 self.data_type = 'Iq' 68 133 69 partype = model.info['partype']70 71 134 if self.data_type == 'sesans': 72 135 … … 82 145 q_mono = sesans.make_all_q(data) 83 146 elif self.data_type == 'Iqxy': 84 if not partype['orientation'] and not partype['magnetic']:85 raise ValueError("not 2D without orientation or magnetic parameters")147 #if not model.info.parameters.has_2d: 148 # raise ValueError("not 2D without orientation or magnetic parameters") 86 149 q = np.sqrt(data.qx_data**2 + data.qy_data**2) 87 150 qmin = getattr(data, 'qmin', 1e-16) … … 154 217 155 218 def _set_data(self, Iq, noise=None): 219 # type: (np.ndarray, Optional[float]) -> None 156 220 # pylint: disable=attribute-defined-outside-init 157 221 if noise is not None: … … 171 235 172 236 def _calc_theory(self, pars, cutoff=0.0): 237 # type: (ParameterSet, float) -> np.ndarray 173 238 if self._kernel is None: 174 self._kernel = self._model.make_kernel(self._kernel_inputs) # pylint: disable=attribute-dedata_type239 self._kernel = self._model.make_kernel(self._kernel_inputs) 175 240 self._kernel_mono = (self._model.make_kernel(self._kernel_mono_inputs) 176 241 if self._kernel_mono_inputs else None) … … 206 271 """ 207 272 def __init__(self, data, model, cutoff=1e-5): 273 # type: (Data, KernelModel, float) -> None 208 274 self.model = model 209 275 self.cutoff = cutoff … … 212 278 213 279 def __call__(self, **pars): 280 # type: (**float) -> np.ndarray 214 281 return self._calc_theory(pars, cutoff=self.cutoff) 215 282 216 def ER_VR(self, **pars):217 """218 Compute the equivalent radius and volume ratio for the model.219 """220 return call_ER_VR(self.model.info, pars)221 222 283 def simulate_data(self, noise=None, **pars): 284 # type: (Optional[float], **float) -> None 223 285 """ 224 286 Generate simulated data for the model. … … 228 290 229 291 def main(): 292 # type: () -> None 230 293 """ 231 294 Program to evaluate a particular model at a set of q values. … … 243 306 try: 244 307 values = [float(v) for v in call.split(',')] 245 except :308 except Exception: 246 309 values = [] 247 310 if len(values) == 1: -
sasmodels/generate.py
r81ec7c8 rae2b6b5 21 21 22 22 *VR(p1, p2, ...)* returns the volume ratio for core-shell style forms. 23 24 #define INVALID(v) (expr) returns False if v.parameter is invalid 25 for some parameter or other (e.g., v.bell_radius < v.radius). If 26 necessary, the expression can call a function. 23 27 24 28 These functions are defined in a kernel module .py script and an associated … … 65 69 The constructor code will generate the necessary vectors for computing 66 70 them with the desired polydispersity. 67 68 The available kernel parameters are defined as a list, with each parameter69 defined as a sublist with the following elements:70 71 *name* is the name that will be used in the call to the kernel72 function and the name that will be displayed to the user. Names73 should be lower case, with words separated by underscore. If74 acronyms are used, the whole acronym should be upper case.75 76 *units* should be one of *degrees* for angles, *Ang* for lengths,77 *1e-6/Ang^2* for SLDs.78 79 *default value* will be the initial value for the model when it80 is selected, or when an initial value is not otherwise specified.81 82 *limits = [lb, ub]* are the hard limits on the parameter value, used to83 limit the polydispersity density function. In the fit, the parameter limits84 given to the fit are the limits on the central value of the parameter.85 If there is polydispersity, it will evaluate parameter values outside86 the fit limits, but not outside the hard limits specified in the model.87 If there are no limits, use +/-inf imported from numpy.88 89 *type* indicates how the parameter will be used. "volume" parameters90 will be used in all functions. "orientation" parameters will be used91 in *Iqxy* and *Imagnetic*. "magnetic* parameters will be used in92 *Imagnetic* only. If *type* is the empty string, the parameter will93 be used in all of *Iq*, *Iqxy* and *Imagnetic*. "sld" parameters94 can automatically be promoted to magnetic parameters, each of which95 will have a magnitude and a direction, which may be different from96 other sld parameters.97 98 *description* is a short description of the parameter. This will99 be displayed in the parameter table and used as a tool tip for the100 parameter value in the user interface.101 102 71 The kernel module must set variables defining the kernel meta data: 103 72 … … 154 123 polydispersity values for tests. 155 124 156 An *model_info* dictionary is constructed from the kernel meta data and 157 returned to the caller. 158 159 The model evaluator, function call sequence consists of q inputs and the return vector, 160 followed by the loop value/weight vector, followed by the values for 161 the non-polydisperse parameters, followed by the lengths of the 162 polydispersity loops. To construct the call for 1D models, the 163 categories *fixed-1d* and *pd-1d* list the names of the parameters 164 of the non-polydisperse and the polydisperse parameters respectively. 165 Similarly, *fixed-2d* and *pd-2d* provide parameter names for 2D models. 166 The *pd-rel* category is a set of those parameters which give 167 polydispersitiy as a portion of the value (so a 10% length dispersity 168 would use a polydispersity value of 0.1) rather than absolute 169 dispersity such as an angle plus or minus 15 degrees. 170 171 The *volume* category lists the volume parameters in order for calls 172 to volume within the kernel (used for volume normalization) and for 173 calls to ER and VR for effective radius and volume ratio respectively. 174 175 The *orientation* and *magnetic* categories list the orientation and 176 magnetic parameters. These are used by the sasview interface. The 177 blank category is for parameters such as scale which don't have any 178 other marking. 125 A :class:`modelinfo.ModelInfo` structure is constructed from the kernel meta 126 data and returned to the caller. 179 127 180 128 The doc string at the start of the kernel module will be used to … … 184 132 file systems are case-sensitive, so only use lower case characters for 185 133 file names and extensions. 186 187 188 The function :func:`make` loads the metadata from the module and returns189 the kernel source. The function :func:`make_doc` extracts the doc string190 and adds the parameter table to the top. The function :func:`model_sources`191 returns a list of files required by the model.192 134 193 135 Code follows the C99 standard with the following extensions and conditions:: … … 201 143 all double declarations may be converted to half, float, or long double 202 144 FLOAT_SIZE is the number of bytes in the converted variables 145 146 :func:`load_kernel_module` loads the model definition file and 147 :modelinfo:`make_model_info` parses it. :func:`make_source` 148 converts C-based model definitions to C source code, including the 149 polydispersity integral. :func:`model_sources` returns the list of 150 source files the model depends on, and :func:`timestamp` returns 151 the latest time stamp amongst the source files (so you can check if 152 the model needs to be rebuilt). 153 154 The function :func:`make_doc` extracts the doc string and adds the 155 parameter table to the top. *make_figure* in *sasmodels/doc/genmodel* 156 creates the default figure for the model. [These two sets of code 157 should mignrate into docs.py so docs can be updated in one place]. 203 158 """ 204 159 from __future__ import print_function 205 160 206 #TODO: identify model files which have changed since loading and reload them.207 161 #TODO: determine which functions are useful outside of generate 208 162 #__all__ = ["model_info", "make_doc", "make_source", "convert_type"] 209 163 210 import sys 211 from os.path import abspath, dirname, join as joinpath, exists, basename, \ 212 splitext 164 from os.path import abspath, dirname, join as joinpath, exists, getmtime 213 165 import re 214 166 import string 215 167 import warnings 216 from collections import namedtuple 217 218 import numpy as np 219 168 169 import numpy as np # type: ignore 170 171 from .modelinfo import Parameter 220 172 from .custom import load_custom_kernel_module 221 173 222 PARAMETER_FIELDS = ['name', 'units', 'default', 'limits', 'type', 'description'] 223 Parameter = namedtuple('Parameter', PARAMETER_FIELDS) 224 225 C_KERNEL_TEMPLATE_PATH = joinpath(dirname(__file__), 'kernel_template.c') 174 try: 175 from typing import Tuple, Sequence, Iterator, Dict 176 from .modelinfo import ModelInfo 177 except ImportError: 178 pass 179 180 TEMPLATE_ROOT = dirname(__file__) 226 181 227 182 F16 = np.dtype('float16') … … 232 187 except TypeError: 233 188 F128 = None 234 235 # Scale and background, which are parameters common to every form factor236 COMMON_PARAMETERS = [237 ["scale", "", 1, [0, np.inf], "", "Source intensity"],238 ["background", "1/cm", 1e-3, [0, np.inf], "", "Source background"],239 ]240 189 241 190 # Conversion from units defined in the parameter table for each model … … 284 233 285 234 def format_units(units): 235 # type: (str) -> str 286 236 """ 287 237 Convert units into ReStructured Text format. … … 290 240 291 241 def make_partable(pars): 242 # type: (List[Parameter]) -> str 292 243 """ 293 244 Generate the parameter table to include in the sphinx documentation. … … 320 271 321 272 def _search(search_path, filename): 273 # type: (List[str], str) -> str 322 274 """ 323 275 Find *filename* in *search_path*. … … 331 283 raise ValueError("%r not found in %s" % (filename, search_path)) 332 284 285 333 286 def model_sources(model_info): 287 # type: (ModelInfo) -> List[str] 334 288 """ 335 289 Return a list of the sources file paths for the module. 336 290 """ 337 search_path = [dirname(model_info ['filename']),291 search_path = [dirname(model_info.filename), 338 292 abspath(joinpath(dirname(__file__), 'models'))] 339 return [_search(search_path, f) for f in model_info['source']] 340 341 # Pragmas for enable OpenCL features. Be sure to protect them so that they 342 # still compile even if OpenCL is not present. 343 _F16_PRAGMA = """\ 344 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16) 345 # pragma OPENCL EXTENSION cl_khr_fp16: enable 346 #endif 347 """ 348 349 _F64_PRAGMA = """\ 350 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64) 351 # pragma OPENCL EXTENSION cl_khr_fp64: enable 352 #endif 353 """ 293 return [_search(search_path, f) for f in model_info.source] 294 295 def timestamp(model_info): 296 # type: (ModelInfo) -> int 297 """ 298 Return a timestamp for the model corresponding to the most recently 299 changed file or dependency. 300 301 Note that this does not look at the time stamps for the OpenCL header 302 information since that need not trigger a recompile of the DLL. 303 """ 304 source_files = (model_sources(model_info) 305 + model_templates() 306 + [model_info.filename]) 307 newest = max(getmtime(f) for f in source_files) 308 return newest 309 310 def model_templates(): 311 # type: () -> List[str] 312 # TODO: fails DRY; templates appear two places. 313 # should instead have model_info contain a list of paths 314 # Note: kernel_iq.cl is not on this list because changing it need not 315 # trigger a recompile of the dll. 316 return [joinpath(TEMPLATE_ROOT, filename) 317 for filename in ('kernel_header.c', 'kernel_iq.c')] 354 318 355 319 def convert_type(source, dtype): 320 # type: (str, np.dtype) -> str 356 321 """ 357 322 Convert code from double precision to the desired type. … … 362 327 if dtype == F16: 363 328 fbytes = 2 364 source = _ F16_PRAGMA + _convert_type(source, "half", "f")329 source = _convert_type(source, "half", "f") 365 330 elif dtype == F32: 366 331 fbytes = 4 … … 368 333 elif dtype == F64: 369 334 fbytes = 8 370 source = _F64_PRAGMA + source # Sourceis already double335 # no need to convert if it is already double 371 336 elif dtype == F128: 372 337 fbytes = 16 … … 378 343 379 344 def _convert_type(source, type_name, constant_flag): 345 # type: (str, str, str) -> str 380 346 """ 381 347 Replace 'double' with *type_name* in *source*, tagging floating point … … 396 362 397 363 def kernel_name(model_info, is_2d): 364 # type: (ModelInfo, bool) -> str 398 365 """ 399 366 Name of the exported kernel symbol. 400 367 """ 401 return model_info ['name']+ "_" + ("Iqxy" if is_2d else "Iq")368 return model_info.name + "_" + ("Iqxy" if is_2d else "Iq") 402 369 403 370 404 371 def indent(s, depth): 372 # type: (str, int) -> str 405 373 """ 406 374 Indent a string of text with *depth* additional spaces on each line. … … 411 379 412 380 413 LOOP_OPEN = """\ 414 for (int %(name)s_i=0; %(name)s_i < N%(name)s; %(name)s_i++) { 415 const double %(name)s = loops[2*(%(name)s_i%(offset)s)]; 416 const double %(name)s_w = loops[2*(%(name)s_i%(offset)s)+1];\ 381 _template_cache = {} # type: Dict[str, Tuple[int, str, str]] 382 def load_template(filename): 383 # type: (str) -> str 384 path = joinpath(TEMPLATE_ROOT, filename) 385 mtime = getmtime(path) 386 if filename not in _template_cache or mtime > _template_cache[filename][0]: 387 with open(path) as fid: 388 _template_cache[filename] = (mtime, fid.read(), path) 389 return _template_cache[filename][1] 390 391 392 _FN_TEMPLATE = """\ 393 double %(name)s(%(pars)s); 394 double %(name)s(%(pars)s) { 395 %(body)s 396 } 397 417 398 """ 418 def build_polydispersity_loops(pd_pars): 419 """ 420 Build polydispersity loops 421 422 Returns loop opening and loop closing 423 """ 424 depth = 4 425 offset = "" 426 loop_head = [] 427 loop_end = [] 428 for name in pd_pars: 429 subst = {'name': name, 'offset': offset} 430 loop_head.append(indent(LOOP_OPEN % subst, depth)) 431 loop_end.insert(0, (" "*depth) + "}") 432 offset += '+N' + name 433 depth += 2 434 return "\n".join(loop_head), "\n".join(loop_end) 435 436 C_KERNEL_TEMPLATE = None 399 400 def _gen_fn(name, pars, body): 401 # type: (str, List[Parameter], str) -> str 402 """ 403 Generate a function given pars and body. 404 405 Returns the following string:: 406 407 double fn(double a, double b, ...); 408 double fn(double a, double b, ...) { 409 .... 410 } 411 """ 412 par_decl = ', '.join(p.as_function_argument() for p in pars) if pars else 'void' 413 return _FN_TEMPLATE % {'name': name, 'body': body, 'pars': par_decl} 414 415 def _call_pars(prefix, pars): 416 # type: (str, List[Parameter]) -> List[str] 417 """ 418 Return a list of *prefix.parameter* from parameter items. 419 """ 420 return [p.as_call_reference(prefix) for p in pars] 421 422 _IQXY_PATTERN = re.compile("^((inline|static) )? *(double )? *Iqxy *([(]|$)", 423 flags=re.MULTILINE) 424 def _have_Iqxy(sources): 425 # type: (List[str]) -> bool 426 """ 427 Return true if any file defines Iqxy. 428 429 Note this is not a C parser, and so can be easily confused by 430 non-standard syntax. Also, it will incorrectly identify the following 431 as having Iqxy:: 432 433 /* 434 double Iqxy(qx, qy, ...) { ... fill this in later ... } 435 */ 436 437 If you want to comment out an Iqxy function, use // on the front of the 438 line instead. 439 """ 440 for code in sources: 441 if _IQXY_PATTERN.search(code): 442 return True 443 else: 444 return False 445 437 446 def make_source(model_info): 447 # type: (ModelInfo) -> str 438 448 """ 439 449 Generate the OpenCL/ctypes kernel from the module info. 440 450 441 Uses source files found in the given search path. 442 """ 443 if callable(model_info['Iq']): 444 return None 451 Uses source files found in the given search path. Returns None if this 452 is a pure python model, with no C source components. 453 """ 454 if callable(model_info.Iq): 455 raise ValueError("can't compile python model") 445 456 446 457 # TODO: need something other than volume to indicate dispersion parameters … … 453 464 # for computing volume even if we allow non-disperse volume parameters. 454 465 455 # Load template 456 global C_KERNEL_TEMPLATE 457 if C_KERNEL_TEMPLATE is None: 458 with open(C_KERNEL_TEMPLATE_PATH) as fid: 459 C_KERNEL_TEMPLATE = fid.read() 460 461 # Load additional sources 462 source = [open(f).read() for f in model_sources(model_info)] 463 464 # Prepare defines 465 defines = [] 466 partype = model_info['partype'] 467 pd_1d = partype['pd-1d'] 468 pd_2d = partype['pd-2d'] 469 fixed_1d = partype['fixed-1d'] 470 fixed_2d = partype['fixed-1d'] 471 472 iq_parameters = [p.name 473 for p in model_info['parameters'][2:] # skip scale, background 474 if p.name in set(fixed_1d + pd_1d)] 475 iqxy_parameters = [p.name 476 for p in model_info['parameters'][2:] # skip scale, background 477 if p.name in set(fixed_2d + pd_2d)] 478 volume_parameters = [p.name 479 for p in model_info['parameters'] 480 if p.type == 'volume'] 481 482 # Fill in defintions for volume parameters 483 if volume_parameters: 484 defines.append(('VOLUME_PARAMETERS', 485 ','.join(volume_parameters))) 486 defines.append(('VOLUME_WEIGHT_PRODUCT', 487 '*'.join(p + '_w' for p in volume_parameters))) 488 489 # Generate form_volume function from body only 490 if model_info['form_volume'] is not None: 491 if volume_parameters: 492 vol_par_decl = ', '.join('double ' + p for p in volume_parameters) 493 else: 494 vol_par_decl = 'void' 495 defines.append(('VOLUME_PARAMETER_DECLARATIONS', 496 vol_par_decl)) 497 fn = """\ 498 double form_volume(VOLUME_PARAMETER_DECLARATIONS); 499 double form_volume(VOLUME_PARAMETER_DECLARATIONS) { 500 %(body)s 501 } 502 """ % {'body':model_info['form_volume']} 503 source.append(fn) 504 505 # Fill in definitions for Iq parameters 506 defines.append(('IQ_KERNEL_NAME', model_info['name'] + '_Iq')) 507 defines.append(('IQ_PARAMETERS', ', '.join(iq_parameters))) 508 if fixed_1d: 509 defines.append(('IQ_FIXED_PARAMETER_DECLARATIONS', 510 ', \\\n '.join('const double %s' % p for p in fixed_1d))) 511 if pd_1d: 512 defines.append(('IQ_WEIGHT_PRODUCT', 513 '*'.join(p + '_w' for p in pd_1d))) 514 defines.append(('IQ_DISPERSION_LENGTH_DECLARATIONS', 515 ', \\\n '.join('const int N%s' % p for p in pd_1d))) 516 defines.append(('IQ_DISPERSION_LENGTH_SUM', 517 '+'.join('N' + p for p in pd_1d))) 518 open_loops, close_loops = build_polydispersity_loops(pd_1d) 519 defines.append(('IQ_OPEN_LOOPS', 520 open_loops.replace('\n', ' \\\n'))) 521 defines.append(('IQ_CLOSE_LOOPS', 522 close_loops.replace('\n', ' \\\n'))) 523 if model_info['Iq'] is not None: 524 defines.append(('IQ_PARAMETER_DECLARATIONS', 525 ', '.join('double ' + p for p in iq_parameters))) 526 fn = """\ 527 double Iq(double q, IQ_PARAMETER_DECLARATIONS); 528 double Iq(double q, IQ_PARAMETER_DECLARATIONS) { 529 %(body)s 530 } 531 """ % {'body':model_info['Iq']} 532 source.append(fn) 533 534 # Fill in definitions for Iqxy parameters 535 defines.append(('IQXY_KERNEL_NAME', model_info['name'] + '_Iqxy')) 536 defines.append(('IQXY_PARAMETERS', ', '.join(iqxy_parameters))) 537 if fixed_2d: 538 defines.append(('IQXY_FIXED_PARAMETER_DECLARATIONS', 539 ', \\\n '.join('const double %s' % p for p in fixed_2d))) 540 if pd_2d: 541 defines.append(('IQXY_WEIGHT_PRODUCT', 542 '*'.join(p + '_w' for p in pd_2d))) 543 defines.append(('IQXY_DISPERSION_LENGTH_DECLARATIONS', 544 ', \\\n '.join('const int N%s' % p for p in pd_2d))) 545 defines.append(('IQXY_DISPERSION_LENGTH_SUM', 546 '+'.join('N' + p for p in pd_2d))) 547 open_loops, close_loops = build_polydispersity_loops(pd_2d) 548 defines.append(('IQXY_OPEN_LOOPS', 549 open_loops.replace('\n', ' \\\n'))) 550 defines.append(('IQXY_CLOSE_LOOPS', 551 close_loops.replace('\n', ' \\\n'))) 552 if model_info['Iqxy'] is not None: 553 defines.append(('IQXY_PARAMETER_DECLARATIONS', 554 ', '.join('double ' + p for p in iqxy_parameters))) 555 fn = """\ 556 double Iqxy(double qx, double qy, IQXY_PARAMETER_DECLARATIONS); 557 double Iqxy(double qx, double qy, IQXY_PARAMETER_DECLARATIONS) { 558 %(body)s 559 } 560 """ % {'body':model_info['Iqxy']} 561 source.append(fn) 562 563 # Need to know if we have a theta parameter for Iqxy; it is not there 564 # for the magnetic sphere model, for example, which has a magnetic 565 # orientation but no shape orientation. 566 if 'theta' in pd_2d: 567 defines.append(('IQXY_HAS_THETA', '1')) 568 569 #for d in defines: print(d) 570 defines = '\n'.join('#define %s %s' % (k, v) for k, v in defines) 571 sources = '\n\n'.join(source) 572 return C_KERNEL_TEMPLATE % { 573 'DEFINES': defines, 574 'SOURCES': sources, 575 } 576 577 def categorize_parameters(pars): 578 """ 579 Build parameter categories out of the the parameter definitions. 580 581 Returns a dictionary of categories. 582 583 Note: these categories are subject to change, depending on the needs of 584 the UI and the needs of the kernel calling function. 585 586 The categories are as follows: 587 588 * *volume* list of volume parameter names 589 * *orientation* list of orientation parameters 590 * *magnetic* list of magnetic parameters 591 * *<empty string>* list of parameters that have no type info 592 593 Each parameter is in one and only one category. 594 595 The following derived categories are created: 596 597 * *fixed-1d* list of non-polydisperse parameters for 1D models 598 * *pd-1d* list of polydisperse parameters for 1D models 599 * *fixed-2d* list of non-polydisperse parameters for 2D models 600 * *pd-d2* list of polydisperse parameters for 2D models 601 """ 602 partype = { 603 'volume': [], 'orientation': [], 'magnetic': [], 'sld': [], '': [], 604 'fixed-1d': [], 'fixed-2d': [], 'pd-1d': [], 'pd-2d': [], 605 'pd-rel': set(), 606 } 607 608 for p in pars: 609 if p.type == 'volume': 610 partype['pd-1d'].append(p.name) 611 partype['pd-2d'].append(p.name) 612 partype['pd-rel'].add(p.name) 613 elif p.type == 'magnetic': 614 partype['fixed-2d'].append(p.name) 615 elif p.type == 'orientation': 616 partype['pd-2d'].append(p.name) 617 elif p.type in ('', 'sld'): 618 partype['fixed-1d'].append(p.name) 619 partype['fixed-2d'].append(p.name) 620 else: 621 raise ValueError("unknown parameter type %r" % p.type) 622 partype[p.type].append(p.name) 623 624 return partype 625 626 def process_parameters(model_info): 627 """ 628 Process parameter block, precalculating parameter details. 629 """ 630 # convert parameters into named tuples 631 for p in model_info['parameters']: 632 if p[4] == '' and (p[0].startswith('sld') or p[0].endswith('sld')): 633 p[4] = 'sld' 634 # TODO: make sure all models explicitly label their sld parameters 635 #raise ValueError("%s.%s needs to be explicitly set to type 'sld'" %(model_info['id'], p[0])) 636 637 pars = [Parameter(*p) for p in model_info['parameters']] 638 # Fill in the derived attributes 639 model_info['parameters'] = pars 640 partype = categorize_parameters(pars) 641 model_info['limits'] = dict((p.name, p.limits) for p in pars) 642 model_info['partype'] = partype 643 model_info['defaults'] = dict((p.name, p.default) for p in pars) 644 if model_info.get('demo', None) is None: 645 model_info['demo'] = model_info['defaults'] 646 model_info['has_2d'] = partype['orientation'] or partype['magnetic'] 647 466 partable = model_info.parameters 467 468 # Identify parameters for Iq, Iqxy, Iq_magnetic and form_volume. 469 # Note that scale and volume are not possible types. 470 471 # Load templates and user code 472 kernel_header = load_template('kernel_header.c') 473 dll_code = load_template('kernel_iq.c') 474 ocl_code = load_template('kernel_iq.cl') 475 #ocl_code = load_template('kernel_iq_local.cl') 476 user_code = [open(f).read() for f in model_sources(model_info)] 477 478 # Build initial sources 479 source = [kernel_header] + user_code 480 481 # Make parameters for q, qx, qy so that we can use them in declarations 482 q, qx, qy = [Parameter(name=v) for v in ('q', 'qx', 'qy')] 483 # Generate form_volume function, etc. from body only 484 if isinstance(model_info.form_volume, str): 485 pars = partable.form_volume_parameters 486 source.append(_gen_fn('form_volume', pars, model_info.form_volume)) 487 if isinstance(model_info.Iq, str): 488 pars = [q] + partable.iq_parameters 489 source.append(_gen_fn('Iq', pars, model_info.Iq)) 490 if isinstance(model_info.Iqxy, str): 491 pars = [qx, qy] + partable.iqxy_parameters 492 source.append(_gen_fn('Iqxy', pars, model_info.Iqxy)) 493 494 # Define the parameter table 495 source.append("#define PARAMETER_TABLE \\") 496 source.append("\\\n".join(p.as_definition() 497 for p in partable.kernel_parameters)) 498 499 # Define the function calls 500 if partable.form_volume_parameters: 501 refs = _call_pars("_v.", partable.form_volume_parameters) 502 call_volume = "#define CALL_VOLUME(_v) form_volume(%s)" % (",".join(refs)) 503 else: 504 # Model doesn't have volume. We could make the kernel run a little 505 # faster by not using/transferring the volume normalizations, but 506 # the ifdef's reduce readability more than is worthwhile. 507 call_volume = "#define CALL_VOLUME(v) 1.0" 508 source.append(call_volume) 509 510 refs = ["_q[_i]"] + _call_pars("_v.", partable.iq_parameters) 511 call_iq = "#define CALL_IQ(_q,_i,_v) Iq(%s)" % (",".join(refs)) 512 if _have_Iqxy(user_code): 513 # Call 2D model 514 refs = ["q[2*_i]", "q[2*_i+1]"] + _call_pars("_v.", partable.iqxy_parameters) 515 call_iqxy = "#define CALL_IQ(_q,_i,_v) Iqxy(%s)" % (",".join(refs)) 516 else: 517 # Call 1D model with sqrt(qx^2 + qy^2) 518 warnings.warn("Creating Iqxy = Iq(sqrt(qx^2 + qy^2))") 519 # still defined:: refs = ["q[i]"] + _call_pars("v", iq_parameters) 520 pars_sqrt = ["sqrt(_q[2*_i]*_q[2*_i]+_q[2*_i+1]*_q[2*_i+1])"] + refs[1:] 521 call_iqxy = "#define CALL_IQ(_q,_i,_v) Iq(%s)" % (",".join(pars_sqrt)) 522 523 # Fill in definitions for numbers of parameters 524 source.append("#define MAX_PD %s"%partable.max_pd) 525 source.append("#define NPARS %d"%partable.npars) 526 527 # TODO: allow mixed python/opencl kernels? 528 529 source.append("#if defined(USE_OPENCL)") 530 source.extend(_add_kernels(ocl_code, call_iq, call_iqxy, model_info.name)) 531 source.append("#else /* !USE_OPENCL */") 532 source.extend(_add_kernels(dll_code, call_iq, call_iqxy, model_info.name)) 533 source.append("#endif /* !USE_OPENCL */") 534 return '\n'.join(source) 535 536 def _add_kernels(kernel_code, call_iq, call_iqxy, name): 537 # type: (str, str, str, str) -> List[str] 538 source = [ 539 # define the Iq kernel 540 "#define KERNEL_NAME %s_Iq"%name, 541 call_iq, 542 kernel_code, 543 "#undef CALL_IQ", 544 "#undef KERNEL_NAME", 545 546 # define the Iqxy kernel from the same source with different #defines 547 "#define KERNEL_NAME %s_Iqxy"%name, 548 call_iqxy, 549 kernel_code, 550 "#undef CALL_IQ", 551 "#undef KERNEL_NAME", 552 ] 553 return source 648 554 649 555 def load_kernel_module(model_name): 556 # type: (str) -> module 557 """ 558 Return the kernel module named in *model_name*. 559 560 If the name ends in *.py* then load it as a custom model using 561 :func:`sasmodels.custom.load_custom_kernel_module`, otherwise 562 load it from :mod:`sasmodels.models`. 563 """ 650 564 if model_name.endswith('.py'): 651 565 kernel_module = load_custom_kernel_module(model_name) … … 657 571 658 572 659 def make_model_info(kernel_module):660 """661 Interpret the model definition file, categorizing the parameters.662 663 The module can be loaded with a normal python import statement if you664 know which module you need, or with __import__('sasmodels.model.'+name)665 if the name is in a string.666 667 The *model_info* structure contains the following fields:668 669 * *id* is the id of the kernel670 * *name* is the display name of the kernel671 * *filename* is the full path to the module defining the file (if any)672 * *title* is a short description of the kernel673 * *description* is a long description of the kernel (this doesn't seem674 very useful since the Help button on the model page brings you directly675 to the documentation page)676 * *docs* is the docstring from the module. Use :func:`make_doc` to677 * *category* specifies the model location in the docs678 * *parameters* is the model parameter table679 * *single* is True if the model allows single precision680 * *structure_factor* is True if the model is useable in a product681 * *defaults* is the *{parameter: value}* table built from the parameter682 description table.683 * *limits* is the *{parameter: [min, max]}* table built from the684 parameter description table.685 * *partypes* categorizes the model parameters. See686 :func:`categorize_parameters` for details.687 * *demo* contains the *{parameter: value}* map used in compare (and maybe688 for the demo plot, if plots aren't set up to use the default values).689 If *demo* is not given in the file, then the default values will be used.690 * *tests* is a set of tests that must pass691 * *source* is the list of library files to include in the C model build692 * *Iq*, *Iqxy*, *form_volume*, *ER*, *VR* and *sesans* are python functions693 implementing the kernel for the module, or None if they are not694 defined in python695 * *composition* is None if the model is independent, otherwise it is a696 tuple with composition type ('product' or 'mixture') and a list of697 *model_info* blocks for the composition objects. This allows us to698 build complete product and mixture models from just the info.699 700 """701 parameters = COMMON_PARAMETERS + kernel_module.parameters702 filename = abspath(kernel_module.__file__)703 kernel_id = splitext(basename(filename))[0]704 name = getattr(kernel_module, 'name', None)705 if name is None:706 name = " ".join(w.capitalize() for w in kernel_id.split('_'))707 model_info = dict(708 id=kernel_id, # string used to load the kernel709 filename=abspath(kernel_module.__file__),710 name=name,711 title=kernel_module.title,712 description=kernel_module.description,713 parameters=parameters,714 composition=None,715 docs=kernel_module.__doc__,716 category=getattr(kernel_module, 'category', None),717 single=getattr(kernel_module, 'single', True),718 structure_factor=getattr(kernel_module, 'structure_factor', False),719 control=getattr(kernel_module, 'control', None),720 demo=getattr(kernel_module, 'demo', None),721 source=getattr(kernel_module, 'source', []),722 tests=getattr(kernel_module, 'tests', []),723 )724 process_parameters(model_info)725 # Check for optional functions726 functions = "ER VR form_volume Iq Iqxy shape sesans".split()727 model_info.update((k, getattr(kernel_module, k, None)) for k in functions)728 return model_info729 573 730 574 section_marker = re.compile(r'\A(?P<first>[%s])(?P=first)*\Z' 731 575 %re.escape(string.punctuation)) 732 576 def _convert_section_titles_to_boldface(lines): 577 # type: (Sequence[str]) -> Iterator[str] 733 578 """ 734 579 Do the actual work of identifying and converting section headings. … … 752 597 753 598 def convert_section_titles_to_boldface(s): 599 # type: (str) -> str 754 600 """ 755 601 Use explicit bold-face rather than section headings so that the table of … … 762 608 763 609 def make_doc(model_info): 610 # type: (ModelInfo) -> str 764 611 """ 765 612 Return the documentation for the model. … … 767 614 Iq_units = "The returned value is scaled to units of |cm^-1| |sr^-1|, absolute scale." 768 615 Sq_units = "The returned value is a dimensionless structure factor, $S(q)$." 769 docs = convert_section_titles_to_boldface(model_info['docs']) 770 subst = dict(id=model_info['id'].replace('_', '-'), 771 name=model_info['name'], 772 title=model_info['title'], 773 parameters=make_partable(model_info['parameters']), 774 returns=Sq_units if model_info['structure_factor'] else Iq_units, 616 docs = convert_section_titles_to_boldface(model_info.docs) 617 pars = make_partable(model_info.parameters.COMMON 618 + model_info.parameters.kernel_parameters) 619 subst = dict(id=model_info.id.replace('_', '-'), 620 name=model_info.name, 621 title=model_info.title, 622 parameters=pars, 623 returns=Sq_units if model_info.structure_factor else Iq_units, 775 624 docs=docs) 776 625 return DOC_HEADER % subst … … 778 627 779 628 def demo_time(): 629 # type: () -> None 780 630 """ 781 631 Show how long it takes to process a model. 782 632 """ 633 import datetime 634 from .modelinfo import make_model_info 783 635 from .models import cylinder 784 import datetime 636 785 637 tic = datetime.datetime.now() 786 638 make_source(make_model_info(cylinder)) … … 789 641 790 642 def main(): 643 # type: () -> None 791 644 """ 792 645 Program which prints the source produced by the model. 793 646 """ 647 import sys 648 from .modelinfo import make_model_info 649 794 650 if len(sys.argv) <= 1: 795 651 print("usage: python -m sasmodels.generate modelname") -
sasmodels/kernelcl.py
r4d76711 rae2b6b5 48 48 harmless, albeit annoying. 49 49 """ 50 from __future__ import print_function 51 50 52 import os 51 53 import warnings 52 54 53 import numpy as np 55 import numpy as np # type: ignore 54 56 55 57 try: 56 import pyopencl as cl 58 #raise NotImplementedError("OpenCL not yet implemented for new kernel template") 59 import pyopencl as cl # type: ignore 57 60 # Ask OpenCL for the default context so that we know that one exists 58 61 cl.create_some_context(interactive=False) 59 except Exception as exc: 60 warnings.warn(str(exc)) 62 except Exception as ocl_exc: 63 warnings.warn(str(ocl_exc)) 64 del ocl_exc 61 65 raise RuntimeError("OpenCL not available") 62 66 63 from pyopencl import mem_flags as mf 64 from pyopencl.characterize import get_fast_inaccurate_build_options 67 from pyopencl import mem_flags as mf # type: ignore 68 from pyopencl.characterize import get_fast_inaccurate_build_options # type: ignore 65 69 66 70 from . import generate 71 from .kernel import KernelModel, Kernel 72 73 try: 74 from typing import Tuple, Callable, Any 75 from .modelinfo import ModelInfo 76 from .details import CallDetails 77 except ImportError: 78 pass 67 79 68 80 # The max loops number is limited by the amount of local memory available … … 75 87 76 88 89 # Pragmas for enable OpenCL features. Be sure to protect them so that they 90 # still compile even if OpenCL is not present. 91 _F16_PRAGMA = """\ 92 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16) 93 # pragma OPENCL EXTENSION cl_khr_fp16: enable 94 #endif 95 """ 96 97 _F64_PRAGMA = """\ 98 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64) 99 # pragma OPENCL EXTENSION cl_khr_fp64: enable 100 #endif 101 """ 102 103 77 104 ENV = None 78 105 def environment(): 106 # type: () -> "GpuEnvironment" 79 107 """ 80 108 Returns a singleton :class:`GpuEnvironment`. … … 88 116 89 117 def has_type(device, dtype): 118 # type: (cl.Device, np.dtype) -> bool 90 119 """ 91 120 Return true if device supports the requested precision. … … 101 130 102 131 def get_warp(kernel, queue): 132 # type: (cl.Kernel, cl.CommandQueue) -> int 103 133 """ 104 134 Return the size of an execution batch for *kernel* running on *queue*. … … 109 139 110 140 def _stretch_input(vector, dtype, extra=1e-3, boundary=32): 141 # type: (np.ndarray, np.dtype, float, int) -> np.ndarray 111 142 """ 112 143 Stretch an input vector to the correct boundary. … … 131 162 132 163 def compile_model(context, source, dtype, fast=False): 164 # type: (cl.Context, str, np.dtype, bool) -> cl.Program 133 165 """ 134 166 Build a model to run on the gpu. … … 142 174 raise RuntimeError("%s not supported for devices"%dtype) 143 175 144 source = generate.convert_type(source, dtype) 176 source_list = [generate.convert_type(source, dtype)] 177 178 if dtype == generate.F16: 179 source_list.insert(0, _F16_PRAGMA) 180 elif dtype == generate.F64: 181 source_list.insert(0, _F64_PRAGMA) 182 145 183 # Note: USE_SINCOS makes the intel cpu slower under opencl 146 184 if context.devices[0].type == cl.device_type.GPU: 147 source = "#define USE_SINCOS\n" + source185 source_list.insert(0, "#define USE_SINCOS\n") 148 186 options = (get_fast_inaccurate_build_options(context.devices[0]) 149 187 if fast else []) 188 source = "\n".join(source_list) 150 189 program = cl.Program(context, source).build(options=options) 151 190 return program … … 159 198 """ 160 199 def __init__(self): 200 # type: () -> None 161 201 # find gpu context 162 202 #self.context = cl.create_some_context() … … 177 217 178 218 def has_type(self, dtype): 219 # type: (np.dtype) -> bool 179 220 """ 180 221 Return True if all devices support a given type. 181 222 """ 182 dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)183 223 return any(has_type(d, dtype) 184 224 for context in self.context … … 186 226 187 227 def get_queue(self, dtype): 228 # type: (np.dtype) -> cl.CommandQueue 188 229 """ 189 230 Return a command queue for the kernels of type dtype. … … 194 235 195 236 def get_context(self, dtype): 237 # type: (np.dtype) -> cl.Context 196 238 """ 197 239 Return a OpenCL context for the kernels of type dtype. … … 202 244 203 245 def _create_some_context(self): 246 # type: () -> cl.Context 204 247 """ 205 248 Protected call to cl.create_some_context without interactivity. Use … … 215 258 216 259 def compile_program(self, name, source, dtype, fast=False): 260 # type: (str, str, np.dtype, bool) -> cl.Program 217 261 """ 218 262 Compile the program for the device in the given context. … … 220 264 key = "%s-%s-%s"%(name, dtype, fast) 221 265 if key not in self.compiled: 222 #print(" compiling",name)266 #print("OpenCL compile",name) 223 267 dtype = np.dtype(dtype) 224 program = compile_model(self.get_context(dtype), source, dtype, fast) 268 program = compile_model(self.get_context(dtype), 269 str(source), dtype, fast) 225 270 self.compiled[key] = program 226 271 return self.compiled[key] 227 272 228 273 def release_program(self, name): 274 # type: (str) -> None 229 275 """ 230 276 Free memory associated with the program on the device. … … 235 281 236 282 def _get_default_context(): 283 # type: () -> cl.Context 237 284 """ 238 285 Get an OpenCL context, preferring GPU over CPU, and preferring Intel … … 285 332 286 333 287 class GpuModel( object):334 class GpuModel(KernelModel): 288 335 """ 289 336 GPU wrapper for a single model. … … 300 347 that the compiler is allowed to take shortcuts. 301 348 """ 302 def __init__(self, source, model_info, dtype=generate.F32): 349 def __init__(self, source, model_info, dtype=generate.F32, fast=False): 350 # type: (str, ModelInfo, np.dtype, bool) -> None 303 351 self.info = model_info 304 352 self.source = source 305 self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)306 self.fast = (dtype == 'fast')353 self.dtype = dtype 354 self.fast = fast 307 355 self.program = None # delay program creation 308 356 309 357 def __getstate__(self): 358 # type: () -> Tuple[ModelInfo, str, np.dtype, bool] 310 359 return self.info, self.source, self.dtype, self.fast 311 360 312 361 def __setstate__(self, state): 362 # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None 313 363 self.info, self.source, self.dtype, self.fast = state 314 364 self.program = None 315 365 316 366 def make_kernel(self, q_vectors): 367 # type: (List[np.ndarray]) -> "GpuKernel" 317 368 if self.program is None: 318 369 compiler = environment().compile_program 319 self.program = compiler(self.info ['name'], self.source, self.dtype,320 self. fast)370 self.program = compiler(self.info.name, self.source, 371 self.dtype, self.fast) 321 372 is_2d = len(q_vectors) == 2 322 373 kernel_name = generate.kernel_name(self.info, is_2d) 323 374 kernel = getattr(self.program, kernel_name) 324 return GpuKernel(kernel, self. info, q_vectors, self.dtype)375 return GpuKernel(kernel, self.dtype, self.info, q_vectors) 325 376 326 377 def release(self): 378 # type: () -> None 327 379 """ 328 380 Free the resources associated with the model. 329 381 """ 330 382 if self.program is not None: 331 environment().release_program(self.info ['name'])383 environment().release_program(self.info.name) 332 384 self.program = None 333 385 334 386 def __del__(self): 387 # type: () -> None 335 388 self.release() 336 389 … … 356 409 """ 357 410 def __init__(self, q_vectors, dtype=generate.F32): 411 # type: (List[np.ndarray], np.dtype) -> None 358 412 # TODO: do we ever need double precision q? 359 413 env = environment() … … 365 419 # at this point, so instead using 32, which is good on the set of 366 420 # architectures tested so far. 367 self.q_vectors = [_stretch_input(q, self.dtype, 32) for q in q_vectors] 421 if self.is_2d: 422 # Note: 17 rather than 15 because results is 2 elements 423 # longer than input. 424 width = ((self.nq+17)//16)*16 425 self.q = np.empty((width, 2), dtype=dtype) 426 self.q[:self.nq, 0] = q_vectors[0] 427 self.q[:self.nq, 1] = q_vectors[1] 428 else: 429 # Note: 33 rather than 31 because results is 2 elements 430 # longer than input. 431 width = ((self.nq+33)//32)*32 432 self.q = np.empty(width, dtype=dtype) 433 self.q[:self.nq] = q_vectors[0] 434 self.global_size = [self.q.shape[0]] 368 435 context = env.get_context(self.dtype) 369 self.global_size = [self.q_vectors[0].size]370 436 #print("creating inputs of size", self.global_size) 371 self.q_buffers = [ 372 cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q) 373 for q in self.q_vectors 374 ] 437 self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 438 hostbuf=self.q) 375 439 376 440 def release(self): 441 # type: () -> None 377 442 """ 378 443 Free the memory. 379 444 """ 380 for b in self.q_buffers:381 b.release()382 self.q_buffers = []445 if self.q_b is not None: 446 self.q_b.release() 447 self.q_b = None 383 448 384 449 def __del__(self): 450 # type: () -> None 385 451 self.release() 386 452 387 class GpuKernel( object):453 class GpuKernel(Kernel): 388 454 """ 389 455 Callable SAS kernel. … … 405 471 Call :meth:`release` when done with the kernel instance. 406 472 """ 407 def __init__(self, kernel, model_info, q_vectors, dtype): 473 def __init__(self, kernel, dtype, model_info, q_vectors): 474 # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None 475 max_pd = model_info.parameters.max_pd 476 npars = len(model_info.parameters.kernel_parameters)-2 408 477 q_input = GpuInput(q_vectors, dtype) 409 478 self.kernel = kernel 410 479 self.info = model_info 411 self. res = np.empty(q_input.nq, q_input.dtype)412 dim = '2d' if q_input.is_2d else '1d'413 self.fixed_pars = model_info['partype']['fixed-' + dim]414 self. pd_pars = model_info['partype']['pd-' + dim]480 self.dtype = dtype 481 self.dim = '2d' if q_input.is_2d else '1d' 482 # plus three for the normalization values 483 self.result = np.empty(q_input.nq+3, dtype) 415 484 416 485 # Inputs and outputs for each kernel call … … 418 487 env = environment() 419 488 self.queue = env.get_queue(dtype) 420 self.loops_b = cl.Buffer(self.queue.context, mf.READ_WRITE, 421 2 * MAX_LOOPS * q_input.dtype.itemsize) 422 self.res_b = cl.Buffer(self.queue.context, mf.READ_WRITE, 423 q_input.global_size[0] * q_input.dtype.itemsize) 424 self.q_input = q_input 425 426 self._need_release = [self.loops_b, self.res_b, self.q_input] 427 428 def __call__(self, fixed_pars, pd_pars, cutoff): 429 real = (np.float32 if self.q_input.dtype == generate.F32 430 else np.float64 if self.q_input.dtype == generate.F64 431 else np.float16 if self.q_input.dtype == generate.F16 432 else np.float32) # will never get here, so use np.float32 433 434 #print "pars", fixed_pars, pd_pars 435 res_bi = self.res_b 436 nq = np.uint32(self.q_input.nq) 437 if pd_pars: 438 cutoff = real(cutoff) 439 loops_N = [np.uint32(len(p[0])) for p in pd_pars] 440 loops = np.hstack(pd_pars) \ 441 if pd_pars else np.empty(0, dtype=self.q_input.dtype) 442 loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() 443 #print("loops",Nloops, loops) 444 445 #import sys; print("opencl eval",pars) 446 #print("opencl eval",pars) 447 if len(loops) > 2 * MAX_LOOPS: 448 raise ValueError("too many polydispersity points") 449 450 loops_bi = self.loops_b 451 cl.enqueue_copy(self.queue, loops_bi, loops) 452 loops_l = cl.LocalMemory(len(loops.data)) 453 #ctx = environment().context 454 #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops) 455 dispersed = [loops_bi, loops_l, cutoff] + loops_N 456 else: 457 dispersed = [] 458 fixed = [real(p) for p in fixed_pars] 459 args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed 460 self.kernel(self.queue, self.q_input.global_size, None, *args) 461 cl.enqueue_copy(self.queue, self.res, res_bi) 462 463 return self.res 489 490 # details is int32 data, padded to an 8 integer boundary 491 size = ((max_pd*5 + npars*3 + 2 + 7)//8)*8 492 self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE, 493 q_input.global_size[0] * dtype.itemsize) 494 self.q_input = q_input # allocated by GpuInput above 495 496 self._need_release = [ self.result_b, self.q_input ] 497 self.real = (np.float32 if dtype == generate.F32 498 else np.float64 if dtype == generate.F64 499 else np.float16 if dtype == generate.F16 500 else np.float32) # will never get here, so use np.float32 501 502 def __call__(self, call_details, weights, values, cutoff): 503 # type: (CallDetails, np.ndarray, np.ndarray, float) -> np.ndarray 504 context = self.queue.context 505 # Arrange data transfer to card 506 details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 507 hostbuf=call_details.buffer) 508 weights_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 509 hostbuf=weights) if len(weights) else None 510 values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 511 hostbuf=values) 512 513 # Call kernel and retrieve results 514 step = 100 515 for start in range(0, call_details.total_pd, step): 516 stop = min(start+step, call_details.total_pd) 517 args = [ 518 np.uint32(self.q_input.nq), np.int32(start), np.int32(stop), 519 details_b, weights_b, values_b, self.q_input.q_b, self.result_b, 520 self.real(cutoff), 521 ] 522 self.kernel(self.queue, self.q_input.global_size, None, *args) 523 cl.enqueue_copy(self.queue, self.result, self.result_b) 524 525 # Free buffers 526 for v in (details_b, weights_b, values_b): 527 if v is not None: v.release() 528 529 return self.result[:self.q_input.nq] 464 530 465 531 def release(self): 532 # type: () -> None 466 533 """ 467 534 Release resources associated with the kernel. … … 472 539 473 540 def __del__(self): 541 # type: () -> None 474 542 self.release() -
sasmodels/kernelpy.py
r4d76711 r7ae2b7f 7 7 :class:`kernelcl.GpuModel` and :class:`kerneldll.DllModel`. 8 8 """ 9 import numpy as np 10 from numpy import pi, cos 11 9 import numpy as np # type: ignore 10 from numpy import pi, cos #type: ignore 11 12 from . import details 12 13 from .generate import F64 13 14 class PyModel(object): 14 from .kernel import KernelModel, Kernel 15 16 try: 17 from typing import Union, Callable 18 except: 19 pass 20 else: 21 DType = Union[None, str, np.dtype] 22 23 class PyModel(KernelModel): 15 24 """ 16 25 Wrapper for pure python models. 17 26 """ 18 27 def __init__(self, model_info): 28 # Make sure Iq and Iqxy are available and vectorized 29 _create_default_functions(model_info) 19 30 self.info = model_info 20 31 21 32 def make_kernel(self, q_vectors): 22 33 q_input = PyInput(q_vectors, dtype=F64) 23 kernel = self.info ['Iqxy'] if q_input.is_2d else self.info['Iq']34 kernel = self.info.Iqxy if q_input.is_2d else self.info.Iq 24 35 return PyKernel(kernel, self.info, q_input) 25 36 … … 53 64 self.dtype = dtype 54 65 self.is_2d = (len(q_vectors) == 2) 55 self.q_vectors = [np.ascontiguousarray(q, self.dtype) for q in q_vectors] 56 self.q_pointers = [q.ctypes.data for q in self.q_vectors] 66 if self.is_2d: 67 self.q = np.empty((self.nq, 2), dtype=dtype) 68 self.q[:, 0] = q_vectors[0] 69 self.q[:, 1] = q_vectors[1] 70 else: 71 self.q = np.empty(self.nq, dtype=dtype) 72 self.q[:self.nq] = q_vectors[0] 57 73 58 74 def release(self): … … 60 76 Free resources associated with the model inputs. 61 77 """ 62 self.q _vectors = []63 64 class PyKernel( object):78 self.q = None 79 80 class PyKernel(Kernel): 65 81 """ 66 82 Callable SAS kernel. … … 82 98 """ 83 99 def __init__(self, kernel, model_info, q_input): 100 self.dtype = np.dtype('d') 84 101 self.info = model_info 85 102 self.q_input = q_input 86 103 self.res = np.empty(q_input.nq, q_input.dtype) 87 dim = '2d' if q_input.is_2d else '1d' 88 # Loop over q unless user promises that the kernel is vectorized by 89 # taggining it with vectorized=True 90 if not getattr(kernel, 'vectorized', False): 91 if dim == '2d': 92 def vector_kernel(qx, qy, *args): 93 """ 94 Vectorized 2D kernel. 95 """ 96 return np.array([kernel(qxi, qyi, *args) 97 for qxi, qyi in zip(qx, qy)]) 104 self.kernel = kernel 105 self.dim = '2d' if q_input.is_2d else '1d' 106 107 partable = model_info.parameters 108 kernel_parameters = (partable.iqxy_parameters if q_input.is_2d 109 else partable.iq_parameters) 110 volume_parameters = partable.form_volume_parameters 111 112 # Create an array to hold the parameter values. There will be a 113 # single array whose values are updated as the calculator goes 114 # through the loop. Arguments to the kernel and volume functions 115 # will use views into this vector, relying on the fact that a 116 # an array of no dimensions acts like a scalar. 117 parameter_vector = np.empty(len(partable.call_parameters)-2, 'd') 118 119 # Create views into the array to hold the arguments 120 offset = 0 121 kernel_args, volume_args = [], [] 122 for p in partable.kernel_parameters: 123 if p.length == 1: 124 # Scalar values are length 1 vectors with no dimensions. 125 v = parameter_vector[offset:offset+1].reshape(()) 98 126 else: 99 def vector_kernel(q, *args): 100 """ 101 Vectorized 1D kernel. 102 """ 103 return np.array([kernel(qi, *args) 104 for qi in q]) 105 self.kernel = vector_kernel 127 # Vector values are simple views. 128 v = parameter_vector[offset:offset+p.length] 129 offset += p.length 130 if p in kernel_parameters: 131 kernel_args.append(v) 132 if p in volume_parameters: 133 volume_args.append(v) 134 135 # Hold on to the parameter vector so we can use it to call kernel later. 136 # This may also be required to preserve the views into the vector. 137 self._parameter_vector = parameter_vector 138 139 # Generate a closure which calls the kernel with the views into the 140 # parameter array. 141 if q_input.is_2d: 142 form = model_info.Iqxy 143 qx, qy = q_input.q[:,0], q_input.q[:,1] 144 self._form = lambda: form(qx, qy, *kernel_args) 106 145 else: 107 self.kernel = kernel 108 fixed_pars = model_info['partype']['fixed-' + dim] 109 pd_pars = model_info['partype']['pd-' + dim] 110 vol_pars = model_info['partype']['volume'] 111 112 # First two fixed pars are scale and background 113 pars = [p.name for p in model_info['parameters'][2:]] 114 offset = len(self.q_input.q_vectors) 115 self.args = self.q_input.q_vectors + [None] * len(pars) 116 self.fixed_index = np.array([pars.index(p) + offset 117 for p in fixed_pars[2:]]) 118 self.pd_index = np.array([pars.index(p) + offset 119 for p in pd_pars]) 120 self.vol_index = np.array([pars.index(p) + offset 121 for p in vol_pars]) 122 try: self.theta_index = pars.index('theta') + offset 123 except ValueError: self.theta_index = -1 124 125 # Caller needs fixed_pars and pd_pars 126 self.fixed_pars = fixed_pars 127 self.pd_pars = pd_pars 128 129 def __call__(self, fixed, pd, cutoff=1e-5): 130 #print("fixed",fixed) 131 #print("pd", pd) 132 args = self.args[:] # grab a copy of the args 133 form, form_volume = self.kernel, self.info['form_volume'] 134 # First two fixed 135 scale, background = fixed[:2] 136 for index, value in zip(self.fixed_index, fixed[2:]): 137 args[index] = float(value) 138 res = _loops(form, form_volume, cutoff, scale, background, args, 139 pd, self.pd_index, self.vol_index, self.theta_index) 140 146 form = model_info.Iq 147 q = q_input.q 148 self._form = lambda: form(q, *kernel_args) 149 150 # Generate a closure which calls the form_volume if it exists. 151 form_volume = model_info.form_volume 152 self._volume = ((lambda: form_volume(*volume_args)) if form_volume 153 else (lambda: 1.0)) 154 155 def __call__(self, call_details, weights, values, cutoff): 156 assert isinstance(call_details, details.CallDetails) 157 res = _loops(self._parameter_vector, self._form, self._volume, 158 self.q_input.nq, call_details, weights, values, cutoff) 141 159 return res 142 160 … … 145 163 Free resources associated with the kernel. 146 164 """ 165 self.q_input.release() 147 166 self.q_input = None 148 167 149 def _loops(form, form_volume, cutoff, scale, background, 150 args, pd, pd_index, vol_index, theta_index): 151 """ 152 *form* is the name of the form function, which should be vectorized over 153 q, but otherwise have an interface like the opencl kernels, with the 154 q parameters first followed by the individual parameters in the order 155 given in model.parameters (see :mod:`sasmodels.generate`). 156 157 *form_volume* calculates the volume of the shape. *vol_index* gives 158 the list of volume parameters 159 160 *cutoff* ignores the corners of the dispersion hypercube 161 162 *scale*, *background* multiplies the resulting form and adds an offset 163 164 *args* is the prepopulated set of arguments to the form function, starting 165 with the q vectors, and including slots for all the parameters. The 166 values for the parameters will be substituted with values from the 167 dispersion functions. 168 169 *pd* is the list of dispersion parameters 170 171 *pd_index* are the indices of the dispersion parameters in *args* 172 173 *vol_index* are the indices of the volume parameters in *args* 174 175 *theta_index* is the index of the theta parameter for the sasview 176 spherical correction, or -1 if there is no angular dispersion 177 """ 178 168 def _loops(parameters, form, form_volume, nq, call_details, 169 weights, values, cutoff): 170 # type: (np.ndarray, Callable[[], np.ndarray], Callable[[], float], int, details.CallDetails, np.ndarray, np.ndarray, float) -> None 179 171 ################################################################ 180 172 # # … … 186 178 # # 187 179 ################################################################ 188 189 weight = np.empty(len(pd), 'd') 190 if weight.size > 0: 191 # weight vector, to be populated by polydispersity loops 192 193 # identify which pd parameters are volume parameters 194 vol_weight_index = np.array([(index in vol_index) for index in pd_index]) 195 196 # Sort parameters in decreasing order of pd length 197 Npd = np.array([len(pdi[0]) for pdi in pd], 'i') 198 order = np.argsort(Npd)[::-1] 199 stride = np.cumprod(Npd[order]) 200 pd = [pd[index] for index in order] 201 pd_index = pd_index[order] 202 vol_weight_index = vol_weight_index[order] 203 204 fast_value = pd[0][0] 205 fast_weight = pd[0][1] 180 parameters[:] = values[call_details.par_offset] 181 scale, background = values[0], values[1] 182 if call_details.num_active == 0: 183 norm = float(form_volume()) 184 if norm > 0.0: 185 return (scale/norm)*form() + background 186 else: 187 return np.ones(nq, 'd')*background 188 189 partial_weight = np.NaN 190 spherical_correction = 1.0 191 pd_stride = call_details.pd_stride[:call_details.num_active] 192 pd_length = call_details.pd_length[:call_details.num_active] 193 pd_offset = call_details.pd_offset[:call_details.num_active] 194 pd_index = np.empty_like(pd_offset) 195 offset = np.empty_like(call_details.par_offset) 196 theta = call_details.theta_par 197 fast_length = pd_length[0] 198 pd_index[0] = fast_length 199 total = np.zeros(nq, 'd') 200 norm = 0.0 201 for loop_index in range(call_details.total_pd): 202 # update polydispersity parameter values 203 if pd_index[0] == fast_length: 204 pd_index[:] = (loop_index/pd_stride)%pd_length 205 partial_weight = np.prod(weights[pd_offset+pd_index][1:]) 206 for k in range(call_details.num_coord): 207 par = call_details.par_coord[k] 208 coord = call_details.pd_coord[k] 209 this_offset = call_details.par_offset[par] 210 block_size = 1 211 for bit in range(len(pd_offset)): 212 if coord&1: 213 this_offset += block_size * pd_index[bit] 214 block_size *= pd_length[bit] 215 coord >>= 1 216 if coord == 0: break 217 offset[par] = this_offset 218 parameters[par] = values[this_offset] 219 if par == theta and not (call_details.par_coord[k]&1): 220 spherical_correction = max(abs(cos(pi/180 * parameters[theta])), 1e-6) 221 for k in range(call_details.num_coord): 222 if call_details.pd_coord[k]&1: 223 #par = call_details.par_coord[k] 224 parameters[par] = values[offset[par]] 225 #print "par",par,offset[par],parameters[par+2] 226 offset[par] += 1 227 if par == theta: 228 spherical_correction = max(abs(cos(pi/180 * parameters[theta])), 1e-6) 229 230 weight = partial_weight * weights[pd_offset[0] + pd_index[0]] 231 pd_index[0] += 1 232 if weight > cutoff: 233 # Call the scattering function 234 # Assume that NaNs are only generated if the parameters are bad; 235 # exclude all q for that NaN. Even better would be to have an 236 # INVALID expression like the C models, but that is too expensive. 237 I = form() 238 if np.isnan(I).any(): continue 239 240 # update value and norm 241 weight *= spherical_correction 242 total += weight * I 243 norm += weight * form_volume() 244 245 if norm > 0.0: 246 return (scale/norm)*total + background 206 247 else: 207 stride = np.array([1]) 208 vol_weight_index = slice(None, None) 209 # keep lint happy 210 fast_value = [None] 211 fast_weight = [None] 212 213 ret = np.zeros_like(args[0]) 214 norm = np.zeros_like(ret) 215 vol = np.zeros_like(ret) 216 vol_norm = np.zeros_like(ret) 217 for k in range(stride[-1]): 218 # update polydispersity parameter values 219 fast_index = k % stride[0] 220 if fast_index == 0: # bottom loop complete ... check all other loops 221 if weight.size > 0: 222 for i, index, in enumerate(k % stride): 223 args[pd_index[i]] = pd[i][0][index] 224 weight[i] = pd[i][1][index] 225 else: 226 args[pd_index[0]] = fast_value[fast_index] 227 weight[0] = fast_weight[fast_index] 228 229 # Computes the weight, and if it is not sufficient then ignore this 230 # parameter set. 231 # Note: could precompute w1*...*wn so we only need to multiply by w0 232 w = np.prod(weight) 233 if w > cutoff: 234 # Note: can precompute spherical correction if theta_index is not 235 # the fast index. Correction factor for spherical integration 236 #spherical_correction = abs(cos(pi*args[phi_index])) if phi_index>=0 else 1.0 237 spherical_correction = (abs(cos(pi * args[theta_index])) * pi / 2 238 if theta_index >= 0 else 1.0) 239 #spherical_correction = 1.0 240 241 # Call the scattering function and adds it to the total. 242 I = form(*args) 243 if np.isnan(I).any(): continue 244 ret += w * I * spherical_correction 245 norm += w 246 247 # Volume normalization. 248 # If there are "volume" polydispersity parameters, then these 249 # will be used to call the form_volume function from the user 250 # supplied kernel, and accumulate a normalized weight. 251 # Note: can precompute volume norm if fast index is not a volume 252 if form_volume: 253 vol_args = [args[index] for index in vol_index] 254 vol_weight = np.prod(weight[vol_weight_index]) 255 vol += vol_weight * form_volume(*vol_args) 256 vol_norm += vol_weight 257 258 positive = (vol * vol_norm != 0.0) 259 ret[positive] *= vol_norm[positive] / vol[positive] 260 result = scale * ret / norm + background 261 return result 248 return np.ones(nq, 'd')*background 249 250 251 def _create_default_functions(model_info): 252 """ 253 Autogenerate missing functions, such as Iqxy from Iq. 254 255 This only works for Iqxy when Iq is written in python. :func:`make_source` 256 performs a similar role for Iq written in C. This also vectorizes 257 any functions that are not already marked as vectorized. 258 """ 259 _create_vector_Iq(model_info) 260 _create_vector_Iqxy(model_info) # call create_vector_Iq() first 261 262 263 def _create_vector_Iq(model_info): 264 """ 265 Define Iq as a vector function if it exists. 266 """ 267 Iq = model_info.Iq 268 if callable(Iq) and not getattr(Iq, 'vectorized', False): 269 #print("vectorizing Iq") 270 def vector_Iq(q, *args): 271 """ 272 Vectorized 1D kernel. 273 """ 274 return np.array([Iq(qi, *args) for qi in q]) 275 vector_Iq.vectorized = True 276 model_info.Iq = vector_Iq 277 278 def _create_vector_Iqxy(model_info): 279 """ 280 Define Iqxy as a vector function if it exists, or default it from Iq(). 281 """ 282 Iq, Iqxy = model_info.Iq, model_info.Iqxy 283 if callable(Iqxy): 284 if not getattr(Iqxy, 'vectorized', False): 285 #print("vectorizing Iqxy") 286 def vector_Iqxy(qx, qy, *args): 287 """ 288 Vectorized 2D kernel. 289 """ 290 return np.array([Iqxy(qxi, qyi, *args) for qxi, qyi in zip(qx, qy)]) 291 vector_Iqxy.vectorized = True 292 model_info.Iqxy = vector_Iqxy 293 elif callable(Iq): 294 #print("defaulting Iqxy") 295 # Iq is vectorized because create_vector_Iq was already called. 296 def default_Iqxy(qx, qy, *args): 297 """ 298 Default 2D kernel. 299 """ 300 return Iq(np.sqrt(qx**2 + qy**2), *args) 301 default_Iqxy.vectorized = True 302 model_info.Iqxy = default_Iqxy 303 -
sasmodels/list_pars.py
ra84a0ca r6d6508e 25 25 for name in sorted(MODELS): 26 26 model_info = load_model_info(name) 27 for p in model_info ['parameters']:27 for p in model_info.parameters.kernel_parameters: 28 28 partable.setdefault(p.name, []) 29 29 partable[p.name].append(name) -
sasmodels/mixture.py
r72a081d r7ae2b7f 12 12 """ 13 13 from copy import copy 14 import numpy as np 14 import numpy as np # type: ignore 15 15 16 from .generate import process_parameters, COMMON_PARAMETERS, Parameter 16 from .modelinfo import Parameter, ParameterTable, ModelInfo 17 from .kernel import KernelModel, Kernel 17 18 18 SCALE=0 19 BACKGROUND=1 20 EFFECT_RADIUS=2 21 VOLFRACTION=3 19 try: 20 from typing import List 21 from .details import CallDetails 22 except ImportError: 23 pass 22 24 23 25 def make_mixture_info(parts): 26 # type: (List[ModelInfo]) -> ModelInfo 24 27 """ 25 28 Create info block for product model. … … 27 30 flatten = [] 28 31 for part in parts: 29 if part ['composition'] and part['composition'][0] == 'mixture':30 flatten.extend(part ['compostion'][1])32 if part.composition and part.composition[0] == 'mixture': 33 flatten.extend(part.composition[1]) 31 34 else: 32 35 flatten.append(part) … … 34 37 35 38 # Build new parameter list 36 pars = COMMON_PARAMETERS +[]39 combined_pars = [] 37 40 for k, part in enumerate(parts): 38 41 # Parameter prefix per model, A_, B_, ... 42 # Note that prefix must also be applied to id and length_control 43 # to support vector parameters 39 44 prefix = chr(ord('A')+k) + '_' 40 for p in part['parameters']: 41 # No background on the individual mixture elements 42 if p.name == 'background': 43 continue 44 # TODO: promote Parameter to a full class 45 # this code knows too much about the implementation! 46 p = list(p) 47 if p[0] == 'scale': # allow model subtraction 48 p[3] = [-np.inf, np.inf] # limits 49 p[0] = prefix+p[0] # relabel parameters with A_, ... 50 pars.append(p) 45 combined_pars.append(Parameter(prefix+'scale')) 46 for p in part.parameters.kernel_parameters: 47 p = copy(p) 48 p.name = prefix + p.name 49 p.id = prefix + p.id 50 if p.length_control is not None: 51 p.length_control = prefix + p.length_control 52 combined_pars.append(p) 53 parameters = ParameterTable(combined_pars) 51 54 52 model_info = {}53 model_info ['id'] = '+'.join(part['id'])54 model_info ['name'] = ' + '.join(part['name'])55 model_info ['filename']= None56 model_info ['title'] = 'Mixture model with ' + model_info['name']57 model_info ['description'] = model_info['title']58 model_info ['docs'] = model_info['title']59 model_info ['category']= "custom"60 model_info ['parameters'] = pars61 #model_info ['single']= any(part['single'] for part in parts)62 model_info ['structure_factor']= False63 model_info ['variant_info']= None64 #model_info ['tests']= []65 #model_info ['source']= []55 model_info = ModelInfo() 56 model_info.id = '+'.join(part.id for part in parts) 57 model_info.name = ' + '.join(part.name for part in parts) 58 model_info.filename = None 59 model_info.title = 'Mixture model with ' + model_info.name 60 model_info.description = model_info.title 61 model_info.docs = model_info.title 62 model_info.category = "custom" 63 model_info.parameters = parameters 64 #model_info.single = any(part['single'] for part in parts) 65 model_info.structure_factor = False 66 model_info.variant_info = None 67 #model_info.tests = [] 68 #model_info.source = [] 66 69 # Iq, Iqxy, form_volume, ER, VR and sesans 67 70 # Remember the component info blocks so we can build the model 68 model_info['composition'] = ('mixture', parts) 69 process_parameters(model_info) 70 return model_info 71 model_info.composition = ('mixture', parts) 71 72 72 73 73 class MixtureModel( object):74 class MixtureModel(KernelModel): 74 75 def __init__(self, model_info, parts): 76 # type: (ModelInfo, List[KernelModel]) -> None 75 77 self.info = model_info 76 78 self.parts = parts 77 79 78 80 def __call__(self, q_vectors): 81 # type: (List[np.ndarray]) -> MixtureKernel 79 82 # Note: may be sending the q_vectors to the n times even though they 80 83 # are only needed once. It would mess up modularity quite a bit to … … 83 86 # in opencl; or both in opencl, but one in single precision and the 84 87 # other in double precision). 85 kernels = [part (q_vectors) for part in self.parts]88 kernels = [part.make_kernel(q_vectors) for part in self.parts] 86 89 return MixtureKernel(self.info, kernels) 87 90 88 91 def release(self): 92 # type: () -> None 89 93 """ 90 94 Free resources associated with the model. … … 94 98 95 99 96 class MixtureKernel( object):100 class MixtureKernel(Kernel): 97 101 def __init__(self, model_info, kernels): 98 dim = '2d' if kernels[0].q_input.is_2d else '1d' 102 # type: (ModelInfo, List[Kernel]) -> None 103 self.dim = kernels[0].dim 104 self.info = model_info 105 self.kernels = kernels 99 106 100 # fixed offsets starts at 2 for scale and background 101 fixed_pars, pd_pars = [], [] 102 offsets = [[2, 0]] 103 #vol_index = [] 104 def accumulate(fixed, pd, volume): 105 # subtract 1 from fixed since we are removing background 106 fixed_offset, pd_offset = offsets[-1] 107 #vol_index.extend(k+pd_offset for k,v in pd if v in volume) 108 offsets.append([fixed_offset + len(fixed) - 1, pd_offset + len(pd)]) 109 pd_pars.append(pd) 110 if dim == '2d': 111 for p in kernels: 112 partype = p.info['partype'] 113 accumulate(partype['fixed-2d'], partype['pd-2d'], partype['volume']) 114 else: 115 for p in kernels: 116 partype = p.info['partype'] 117 accumulate(partype['fixed-1d'], partype['pd-1d'], partype['volume']) 118 119 #self.vol_index = vol_index 120 self.offsets = offsets 121 self.fixed_pars = fixed_pars 122 self.pd_pars = pd_pars 123 self.info = model_info 124 self.kernels = kernels 125 self.results = None 126 127 def __call__(self, fixed_pars, pd_pars, cutoff=1e-5): 128 scale, background = fixed_pars[0:2] 107 def __call__(self, call_details, value, weight, cutoff): 108 # type: (CallDetails, np.ndarray, np.ndarry, float) -> np.ndarray 109 scale, background = value[0:2] 129 110 total = 0.0 130 self.results = [] # remember the parts for plotting later 131 for k in range(len(self.offsets)-1): 132 start_fixed, start_pd = self.offsets[k] 133 end_fixed, end_pd = self.offsets[k+1] 134 part_fixed = [fixed_pars[start_fixed], 0.0] + fixed_pars[start_fixed+1:end_fixed] 135 part_pd = [pd_pars[start_pd], 0.0] + pd_pars[start_pd+1:end_pd] 136 part_result = self.kernels[k](part_fixed, part_pd) 111 # remember the parts for plotting later 112 self.results = [] 113 for kernel, kernel_details in zip(self.kernels, call_details.parts): 114 part_result = kernel(kernel_details, value, weight, cutoff) 137 115 total += part_result 138 self.results.append( scale*sum+background)116 self.results.append(part_result) 139 117 140 118 return scale*total + background 141 119 142 120 def release(self): 143 self.p_kernel.release() 144 self.q_kernel.release() 121 # type: () -> None 122 for k in self.kernels: 123 k.release() 145 124 -
sasmodels/models/cylinder.c
r26141cb re9b1663d 3 3 double Iqxy(double qx, double qy, double sld, double solvent_sld, 4 4 double radius, double length, double theta, double phi); 5 6 #define INVALID(v) (v.radius<0 || v.length<0) 5 7 6 8 double form_volume(double radius, double length) … … 15 17 double length) 16 18 { 17 // TODO: return NaN if radius<0 or length<0?18 19 // precompute qr and qh to save time in the loop 19 20 const double qr = q*radius; … … 47 48 double phi) 48 49 { 49 // TODO: return NaN if radius<0 or length<0?50 50 double sn, cn; // slots to hold sincos function output 51 51 -
sasmodels/models/cylinder.py
rf247314 r7ae2b7f 82 82 """ 83 83 84 import numpy as np 85 from numpy import pi, inf 84 import numpy as np # type: ignore 85 from numpy import pi, inf # type: ignore 86 86 87 87 name = "cylinder" -
sasmodels/models/flexible_cylinder.c
re6408d0 r4937980 1 double form_volume(double length, double kuhn_length, double radius); 2 double Iq(double q, double length, double kuhn_length, double radius, 3 double sld, double solvent_sld); 4 double Iqxy(double qx, double qy, double length, double kuhn_length, 5 double radius, double sld, double solvent_sld); 6 double flexible_cylinder_kernel(double q, double length, double kuhn_length, 7 double radius, double sld, double solvent_sld); 8 9 10 double form_volume(double length, double kuhn_length, double radius) 1 static double 2 form_volume(length, kuhn_length, radius) 11 3 { 12 4 return 1.0; 13 5 } 14 6 15 double flexible_cylinder_kernel(double q, 16 double length, 17 double kuhn_length, 18 double radius, 19 double sld, 20 double solvent_sld) 7 static double 8 Iq(double q, 9 double length, 10 double kuhn_length, 11 double radius, 12 double sld, 13 double solvent_sld) 21 14 { 22 23 const double cont = sld-solvent_sld; 24 const double qr = q*radius; 25 //const double crossSect = (2.0*J1(qr)/qr)*(2.0*J1(qr)/qr); 26 const double crossSect = sas_J1c(qr); 27 double flex = Sk_WR(q,length,kuhn_length); 28 flex *= crossSect*crossSect; 29 flex *= M_PI*radius*radius*length; 30 flex *= cont*cont; 31 flex *= 1.0e-4; 32 return flex; 15 const double contrast = sld - solvent_sld; 16 const double cross_section = sas_J1c(q*radius); 17 const double volume = M_PI*radius*radius*length; 18 const double flex = Sk_WR(q, length, kuhn_length); 19 return 1.0e-4 * volume * square(contrast*cross_section) * flex; 33 20 } 34 35 double Iq(double q,36 double length,37 double kuhn_length,38 double radius,39 double sld,40 double solvent_sld)41 {42 43 double result = flexible_cylinder_kernel(q, length, kuhn_length, radius, sld, solvent_sld);44 return result;45 }46 47 double Iqxy(double qx, double qy,48 double length,49 double kuhn_length,50 double radius,51 double sld,52 double solvent_sld)53 {54 double q;55 q = sqrt(qx*qx+qy*qy);56 double result = flexible_cylinder_kernel(q, length, kuhn_length, radius, sld, solvent_sld);57 58 return result;59 } -
sasmodels/models/gel_fit.c
r30b4ddf r03cac08 1 double form_volume(void); 2 3 double Iq(double q, 4 double guinier_scale, 5 double lorentzian_scale, 6 double gyration_radius, 7 double fractal_exp, 8 double cor_length); 9 10 double Iqxy(double qx, double qy, 11 double guinier_scale, 12 double lorentzian_scale, 13 double gyration_radius, 14 double fractal_exp, 15 double cor_length); 16 17 static double _gel_fit_kernel(double q, 1 static double Iq(double q, 18 2 double guinier_scale, 19 3 double lorentzian_scale, … … 24 8 // Lorentzian Term 25 9 ////////////////////////double a(x[i]*x[i]*zeta*zeta); 26 double lorentzian_term = q*q*cor_length*cor_length;10 double lorentzian_term = square(q*cor_length); 27 11 lorentzian_term = 1.0 + ((fractal_exp + 1.0)/3.0)*lorentzian_term; 28 12 lorentzian_term = pow(lorentzian_term, (fractal_exp/2.0) ); … … 30 14 // Exponential Term 31 15 ////////////////////////double d(x[i]*x[i]*rg*rg); 32 double exp_term = q*q*gyration_radius*gyration_radius;16 double exp_term = square(q*gyration_radius); 33 17 exp_term = exp(-1.0*(exp_term/3.0)); 34 18 … … 37 21 return result; 38 22 } 39 double form_volume(void){40 // Unused, so free to return garbage.41 return NAN;42 }43 44 double Iq(double q,45 double guinier_scale,46 double lorentzian_scale,47 double gyration_radius,48 double fractal_exp,49 double cor_length)50 {51 return _gel_fit_kernel(q,52 guinier_scale,53 lorentzian_scale,54 gyration_radius,55 fractal_exp,56 cor_length);57 }58 59 // Iqxy is never called since no orientation or magnetic parameters.60 double Iqxy(double qx, double qy,61 double guinier_scale,62 double lorentzian_scale,63 double gyration_radius,64 double fractal_exp,65 double cor_length)66 {67 double q = sqrt(qx*qx + qy*qy);68 return _gel_fit_kernel(q,69 guinier_scale,70 lorentzian_scale,71 gyration_radius,72 fractal_exp,73 cor_length);74 }75 -
sasmodels/models/hardsphere.py
rec45c4f rd2bb604 149 149 """ 150 150 151 Iqxy = """152 // never called since no orientation or magnetic parameters.153 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);154 """155 156 151 # ER defaults to 0.0 157 152 # VR defaults to 1.0 -
sasmodels/models/hayter_msa.py
rec45c4f rd2bb604 87 87 return 1.0; 88 88 """ 89 Iqxy = """90 // never called since no orientation or magnetic parameters.91 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);92 """93 89 # ER defaults to 0.0 94 90 # VR defaults to 1.0 -
sasmodels/models/lamellar.py
rec45c4f rd2bb604 82 82 """ 83 83 84 Iqxy = """85 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);86 """87 88 84 # ER defaults to 0.0 89 85 # VR defaults to 1.0 -
sasmodels/models/lamellar_hg.py
rec45c4f rd2bb604 101 101 """ 102 102 103 Iqxy = """104 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);105 """106 107 103 # ER defaults to 0.0 108 104 # VR defaults to 1.0 -
sasmodels/models/lamellar_hg_stack_caille.py
rec45c4f rd2bb604 120 120 """ 121 121 122 Iqxy = """123 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);124 """125 126 122 # ER defaults to 0.0 127 123 # VR defaults to 1.0 -
sasmodels/models/lamellar_stack_caille.py
rec45c4f rd2bb604 104 104 """ 105 105 106 Iqxy = """107 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);108 """109 110 106 # ER defaults to 0.0 111 107 # VR defaults to 1.0 -
sasmodels/models/lamellar_stack_paracrystal.py
rec45c4f rd2bb604 132 132 """ 133 133 134 Iqxy = """135 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);136 """137 138 134 # ER defaults to 0.0 139 135 # VR defaults to 1.0 -
sasmodels/models/lib/sas_JN.c
re6408d0 r4937980 48 48 */ 49 49 50 static double 51 sas_JN( int n, double x ) { 50 double sas_JN( int n, double x ); 51 52 double sas_JN( int n, double x ) { 52 53 53 54 const double MACHEP = 1.11022302462515654042E-16; -
sasmodels/models/lib/sph_j1c.c
re6f1410 rba32cdd 7 7 * using double precision that are the source. 8 8 */ 9 double sph_j1c(double q); 9 10 10 11 // The choice of the number of terms in the series and the cutoff value for … … 43 44 #endif 44 45 45 double sph_j1c(double q);46 46 double sph_j1c(double q) 47 47 { -
sasmodels/models/lib/sphere_form.c
rad90df9 rba32cdd 1 inline double 2 sphere_volume(double radius) 1 double sphere_volume(double radius); 2 double sphere_form(double q, double radius, double sld, double solvent_sld); 3 4 double sphere_volume(double radius) 3 5 { 4 6 return M_4PI_3*cube(radius); 5 7 } 6 8 7 inline double 8 sphere_form(double q, double radius, double sld, double solvent_sld) 9 double sphere_form(double q, double radius, double sld, double solvent_sld) 9 10 { 10 11 const double fq = sphere_volume(radius) * sph_j1c(q*radius); -
sasmodels/models/lib/wrc_cyl.c
re7678b2 rba32cdd 2 2 Functions for WRC implementation of flexible cylinders 3 3 */ 4 double Sk_WR(double q, double L, double b); 5 6 4 7 static double 5 8 AlphaSquare(double x) … … 363 366 } 364 367 365 double Sk_WR(double q, double L, double b);366 368 double Sk_WR(double q, double L, double b) 367 369 { -
sasmodels/models/onion.py
r416609b rfa5fd8d 293 293 294 294 # ["name", "units", default, [lower, upper], "type","description"], 295 parameters = [[" core_sld", "1e-6/Ang^2", 1.0, [-inf, inf], "",295 parameters = [["sld_core", "1e-6/Ang^2", 1.0, [-inf, inf], "", 296 296 "Core scattering length density"], 297 297 ["core_radius", "Ang", 200., [0, inf], "volume", 298 298 "Radius of the core"], 299 ["s olvent_sld", "1e-6/Ang^2", 6.4, [-inf, inf], "",299 ["sld_solvent", "1e-6/Ang^2", 6.4, [-inf, inf], "", 300 300 "Solvent scattering length density"], 301 ["n ", "", 1, [0, 10], "volume",301 ["n_shells", "", 1, [0, 10], "volume", 302 302 "number of shells"], 303 [" in_sld[n]", "1e-6/Ang^2", 1.7, [-inf, inf], "",303 ["sld_in[n_shells]", "1e-6/Ang^2", 1.7, [-inf, inf], "", 304 304 "scattering length density at the inner radius of shell k"], 305 [" out_sld[n]", "1e-6/Ang^2", 2.0, [-inf, inf], "",305 ["sld_out[n_shells]", "1e-6/Ang^2", 2.0, [-inf, inf], "", 306 306 "scattering length density at the outer radius of shell k"], 307 ["thickness[n ]", "Ang", 40., [0, inf], "volume",307 ["thickness[n_shells]", "Ang", 40., [0, inf], "volume", 308 308 "Thickness of shell k"], 309 ["A[n ]", "", 1.0, [-inf, inf], "",309 ["A[n_shells]", "", 1.0, [-inf, inf], "", 310 310 "Decay rate of shell k"], 311 311 ] 312 312 313 #source = ["lib/sph_j1c.c", "onion.c"] 314 315 def Iq(q, *args, **kw): 316 return q 317 318 def Iqxy(qx, *args, **kw): 319 return qx 320 321 322 def shape(core_sld, core_radius, solvent_sld, n, in_sld, out_sld, thickness, A): 313 source = ["lib/sph_j1c.c", "onion.c"] 314 315 #def Iq(q, *args, **kw): 316 # return q 317 318 profile_axes = ['Radius (A)', 'SLD (1e-6/A^2)'] 319 def profile(core_sld, core_radius, solvent_sld, n_shells, 320 in_sld, out_sld, thickness, A): 323 321 """ 324 SLD profile322 Returns shape profile with x=radius, y=SLD. 325 323 """ 326 324 327 total_radius = 1.25*(sum(thickness[:n ]) + core_radius + 1)325 total_radius = 1.25*(sum(thickness[:n_shells]) + core_radius + 1) 328 326 dr = total_radius/400 # 400 points for a smooth plot 329 327 … … 338 336 339 337 # add in the shells 340 for k in range(n ):338 for k in range(n_shells): 341 339 # Left side of each shells 342 340 r0 = r[-1] … … 365 363 beta.append(solvent_sld) 366 364 367 return np.asarray(r), np.asarray(beta) 365 return np.asarray(r), np.asarray(beta)*1e-6 368 366 369 367 def ER(core_radius, n, thickness): … … 374 372 375 373 demo = { 376 "s olvent_sld": 2.2,377 " core_sld": 1.0,374 "sld_solvent": 2.2, 375 "sld_core": 1.0, 378 376 "core_radius": 100, 379 377 "n": 4, 380 " in_sld": [0.5, 1.5, 0.9, 2.0],381 " out_sld": [nan, 0.9, 1.2, 1.6],378 "sld_in": [0.5, 1.5, 0.9, 2.0], 379 "sld_out": [nan, 0.9, 1.2, 1.6], 382 380 "thickness": [50, 75, 150, 75], 383 381 "A": [0, -1, 1e-4, 1], -
sasmodels/models/rpa.py
rec45c4f ra5b8477 86 86 # ["name", "units", default, [lower, upper], "type","description"], 87 87 parameters = [ 88 ["case_num", CASES, 0, [0, 10], "", "Component organization"],88 ["case_num", "", 1, [CASES], "", "Component organization"], 89 89 90 ["Na", "", 1000.0, [1, inf], "", "Degree of polymerization"], 91 ["Phia", "", 0.25, [0, 1], "", "volume fraction"], 92 ["va", "mL/mol", 100.0, [0, inf], "", "specific volume"], 93 ["La", "fm", 10.0, [-inf, inf], "", "scattering length"], 94 ["ba", "Ang", 5.0, [0, inf], "", "segment length"], 95 96 ["Nb", "", 1000.0, [1, inf], "", "Degree of polymerization"], 97 ["Phib", "", 0.25, [0, 1], "", "volume fraction"], 98 ["vb", "mL/mol", 100.0, [0, inf], "", "specific volume"], 99 ["Lb", "fm", 10.0, [-inf, inf], "", "scattering length"], 100 ["bb", "Ang", 5.0, [0, inf], "", "segment length"], 101 102 ["Nc", "", 1000.0, [1, inf], "", "Degree of polymerization"], 103 ["Phic", "", 0.25, [0, 1], "", "volume fraction"], 104 ["vc", "mL/mol", 100.0, [0, inf], "", "specific volume"], 105 ["Lc", "fm", 10.0, [-inf, inf], "", "scattering length"], 106 ["bc", "Ang", 5.0, [0, inf], "", "segment length"], 107 108 ["Nd", "", 1000.0, [1, inf], "", "Degree of polymerization"], 109 ["Phid", "", 0.25, [0, 1], "", "volume fraction"], 110 ["vd", "mL/mol", 100.0, [0, inf], "", "specific volume"], 111 ["Ld", "fm", 10.0, [-inf, inf], "", "scattering length"], 112 ["bd", "Ang", 5.0, [0, inf], "", "segment length"], 90 ["N[4]", "", 1000.0, [1, inf], "", "Degree of polymerization"], 91 ["Phi[4]", "", 0.25, [0, 1], "", "volume fraction"], 92 ["v[4]", "mL/mol", 100.0, [0, inf], "", "specific volume"], 93 ["L[4]", "fm", 10.0, [-inf, inf], "", "scattering length"], 94 ["b[4]", "Ang", 5.0, [0, inf], "", "segment length"], 113 95 114 96 ["Kab", "", -0.0004, [-inf, inf], "", "Interaction parameter"], -
sasmodels/models/spherical_sld.py
rec45c4f rdd71228 1 1 r""" 2 This model calculates an empirical functional form for SAS data using SpericalSLD profile 3 4 Similarly to the OnionExpShellModel, this model provides the form factor, P(q), for a multi-shell sphere, 5 where the interface between the each neighboring shells can be described by one of a number of functions 6 including error, power-law, and exponential functions. This model is to calculate the scattering intensity 7 by building a continuous custom SLD profile against the radius of the particle. The SLD profile is composed 8 of a flat core, a flat solvent, a number (up to 9 ) flat shells, and the interfacial layers between 9 the adjacent flat shells (or core, and solvent) (see below). 2 This model calculates an empirical functional form for SAS data using 3 SpericalSLD profile 4 5 Similarly to the OnionExpShellModel, this model provides the form factor, 6 P(q), for a multi-shell sphere, where the interface between the each neighboring 7 shells can be described by one of a number of functions including error, 8 power-law, and exponential functions. 9 This model is to calculate the scattering intensity by building a continuous 10 custom SLD profile against the radius of the particle. 11 The SLD profile is composed of a flat core, a flat solvent, a number (up to 9 ) 12 flat shells, and the interfacial layers between the adjacent flat shells 13 (or core, and solvent) (see below). 10 14 11 15 .. figure:: img/spherical_sld_profile.gif … … 13 17 Exemplary SLD profile 14 18 15 Unlike the <onion> model (using an analytical integration), 16 the interfacial layers here are sub-divided and numerically integrated assuming each of the sub-layers are described 17 by a line function. The number of the sub-layer can be given by users by setting the integer values of npts_inter. 18 The form factor is normalized by the total volume of the sphere. 19 Unlike the <onion> model (using an analytical integration), the interfacial 20 layers here are sub-divided and numerically integrated assuming each of the 21 sub-layers are described by a line function. 22 The number of the sub-layer can be given by users by setting the integer values 23 of npts_inter. The form factor is normalized by the total volume of the sphere. 19 24 20 25 Definition … … 29 34 \sum_{\text{flat}_i=0}^N f_{\text{flat}_i} +f_\text{solvent} 30 35 31 For a spherically symmetric particle with a particle density $\rho_x(r)$ the sld function can be defined as: 36 For a spherically symmetric particle with a particle density $\rho_x(r)$ 37 the sld function can be defined as: 32 38 33 39 .. math:: … … 39 45 40 46 .. math:: 41 f_\text{core} = 4 \pi \int_{0}^{r_\text{core}} \rho_\text{core} \frac{\sin(qr)} {qr} r^2 dr = 47 f_\text{core} = 4 \pi \int_{0}^{r_\text{core}} \rho_\text{core} 48 \frac{\sin(qr)} {qr} r^2 dr = 42 49 3 \rho_\text{core} V(r_\text{core}) 43 \Big[ \frac{\sin(qr_\text{core}) - qr_\text{core} \cos(qr_\text{core})} {qr_\text{core}^3} \Big] 44 45 f_{\text{inter}_i} = 4 \pi \int_{\Delta t_{ \text{inter}_i } } \rho_{ \text{inter}_i } \frac{\sin(qr)} {qr} r^2 dr 46 47 f_{\text{shell}_i} = 4 \pi \int_{\Delta t_{ \text{inter}_i } } \rho_{ \text{flat}_i } \frac{\sin(qr)} {qr} r^2 dr = 48 3 \rho_{ \text{flat}_i } V ( r_{ \text{inter}_i } + \Delta t_{ \text{inter}_i } ) 49 \Big[ \frac{\sin(qr_{\text{inter}_i} + \Delta t_{ \text{inter}_i } ) - q (r_{\text{inter}_i} + 50 \Delta t_{ \text{inter}_i }) \cos(q( r_{\text{inter}_i} + \Delta t_{ \text{inter}_i } ) ) } 50 \Big[ \frac{\sin(qr_\text{core}) - qr_\text{core} \cos(qr_\text{core})} 51 {qr_\text{core}^3} \Big] 52 53 f_{\text{inter}_i} = 4 \pi \int_{\Delta t_{ \text{inter}_i } } 54 \rho_{ \text{inter}_i } \frac{\sin(qr)} {qr} r^2 dr 55 56 f_{\text{shell}_i} = 4 \pi \int_{\Delta t_{ \text{inter}_i } } 57 \rho_{ \text{flat}_i } \frac{\sin(qr)} {qr} r^2 dr = 58 3 \rho_{ \text{flat}_i } V ( r_{ \text{inter}_i } + 59 \Delta t_{ \text{inter}_i } ) 60 \Big[ \frac{\sin(qr_{\text{inter}_i} + \Delta t_{ \text{inter}_i } ) 61 - q (r_{\text{inter}_i} + \Delta t_{ \text{inter}_i }) 62 \cos(q( r_{\text{inter}_i} + \Delta t_{ \text{inter}_i } ) ) } 51 63 {q ( r_{\text{inter}_i} + \Delta t_{ \text{inter}_i } )^3 } \Big] 52 64 -3 \rho_{ \text{flat}_i } V(r_{ \text{inter}_i }) 53 \Big[ \frac{\sin(qr_{\text{inter}_i}) - qr_{\text{flat}_i} \cos(qr_{\text{inter}_i}) } {qr_{\text{inter}_i}^3} \Big] 54 55 f_\text{solvent} = 4 \pi \int_{r_N}^{\infty} \rho_\text{solvent} \frac{\sin(qr)} {qr} r^2 dr = 65 \Big[ \frac{\sin(qr_{\text{inter}_i}) - qr_{\text{flat}_i} 66 \cos(qr_{\text{inter}_i}) } {qr_{\text{inter}_i}^3} \Big] 67 68 f_\text{solvent} = 4 \pi \int_{r_N}^{\infty} \rho_\text{solvent} 69 \frac{\sin(qr)} {qr} r^2 dr = 56 70 3 \rho_\text{solvent} V(r_N) 57 71 \Big[ \frac{\sin(qr_N) - qr_N \cos(qr_N)} {qr_N^3} \Big] … … 66 80 .. math:: 67 81 \rho_{{inter}_i} (r) = \begin{cases} 68 B \exp\Big( \frac {\pm A(r - r_{\text{flat}_i})} {\Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A \neq 0 \\ 69 B \Big( \frac {(r - r_{\text{flat}_i})} {\Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A = 0 \\ 82 B \exp\Big( \frac {\pm A(r - r_{\text{flat}_i})} 83 {\Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A \neq 0 \\ 84 B \Big( \frac {(r - r_{\text{flat}_i})} 85 {\Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A = 0 \\ 70 86 \end{cases} 71 87 … … 74 90 .. math:: 75 91 \rho_{{inter}_i} (r) = \begin{cases} 76 \pm B \Big( \frac {(r - r_{\text{flat}_i} )} {\Delta t_{ \text{inter}_i }} \Big) ^A +C & \text{for} A \neq 0 \\ 92 \pm B \Big( \frac {(r - r_{\text{flat}_i} )} {\Delta t_{ \text{inter}_i }} 93 \Big) ^A +C & \text{for} A \neq 0 \\ 77 94 \rho_{\text{flat}_{i+1}} & \text{for} A = 0 \\ 78 95 \end{cases} … … 82 99 .. math:: 83 100 \rho_{{inter}_i} (r) = \begin{cases} 84 B \text{erf} \Big( \frac { A(r - r_{\text{flat}_i})} {\sqrt{2} \Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A \neq 0 \\ 85 B \Big( \frac {(r - r_{\text{flat}_i} )} {\Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A = 0 \\ 101 B \text{erf} \Big( \frac { A(r - r_{\text{flat}_i})} 102 {\sqrt{2} \Delta t_{ \text{inter}_i }} \Big) +C & \text{for} A \neq 0 \\ 103 B \Big( \frac {(r - r_{\text{flat}_i} )} {\Delta t_{ \text{inter}_i }} 104 \Big) +C & \text{for} A = 0 \\ 86 105 \end{cases} 87 106 88 The functions are normalized so that they vary between 0 and 1, and they are constrained such that the SLD 89 is continuous at the boundaries of the interface as well as each sub-layers. Thus B and C are determined. 90 91 Once $\rho_{\text{inter}_i}$ is found at the boundary of the sub-layer of the interface, we can find its contribution 92 to the form factor $P(q)$ 93 94 .. math:: 95 f_{\text{inter}_i} = 4 \pi \int_{\Delta t_{ \text{inter}_i } } \rho_{ \text{inter}_i } \frac{\sin(qr)} {qr} r^2 dr = 107 The functions are normalized so that they vary between 0 and 1, and they are 108 constrained such that the SLD is continuous at the boundaries of the interface 109 as well as each sub-layers. Thus B and C are determined. 110 111 Once $\rho_{\text{inter}_i}$ is found at the boundary of the sub-layer of the 112 interface, we can find its contribution to the form factor $P(q)$ 113 114 .. math:: 115 f_{\text{inter}_i} = 4 \pi \int_{\Delta t_{ \text{inter}_i } } 116 \rho_{ \text{inter}_i } \frac{\sin(qr)} {qr} r^2 dr = 96 117 4 \pi \sum_{j=0}^{npts_{\text{inter}_i} -1 } 97 \int_{r_j}^{r_{j+1}} \rho_{ \text{inter}_i } (r_j) \frac{\sin(qr)} {qr} r^2 dr \approx 118 \int_{r_j}^{r_{j+1}} \rho_{ \text{inter}_i } (r_j) 119 \frac{\sin(qr)} {qr} r^2 dr \approx 98 120 99 121 4 \pi \sum_{j=0}^{npts_{\text{inter}_i} -1 } \Big[ 100 3 ( \rho_{ \text{inter}_i } ( r_{j+1} ) - \rho_{ \text{inter}_i } ( r_{j} ) V ( r_{ \text{sublayer}_j } ) 101 \Big[ \frac {r_j^2 \beta_\text{out}^2 \sin(\beta_\text{out}) - (\beta_\text{out}^2-2) \cos(\beta_\text{out}) } 122 3 ( \rho_{ \text{inter}_i } ( r_{j+1} ) - \rho_{ \text{inter}_i } 123 ( r_{j} ) V ( r_{ \text{sublayer}_j } ) 124 \Big[ \frac {r_j^2 \beta_\text{out}^2 \sin(\beta_\text{out}) 125 - (\beta_\text{out}^2-2) \cos(\beta_\text{out}) } 102 126 {\beta_\text{out}^4 } \Big] 103 127 104 - 3 ( \rho_{ \text{inter}_i } ( r_{j+1} ) - \rho_{ \text{inter}_i } ( r_{j} ) V ( r_{ \text{sublayer}_j-1 } ) 105 \Big[ \frac {r_{j-1}^2 \sin(\beta_\text{in}) - (\beta_\text{in}^2-2) \cos(\beta_\text{in}) } 128 - 3 ( \rho_{ \text{inter}_i } ( r_{j+1} ) - \rho_{ \text{inter}_i } 129 ( r_{j} ) V ( r_{ \text{sublayer}_j-1 } ) 130 \Big[ \frac {r_{j-1}^2 \sin(\beta_\text{in}) 131 - (\beta_\text{in}^2-2) \cos(\beta_\text{in}) } 106 132 {\beta_\text{in}^4 } \Big] 107 133 … … 120 146 V(a) = \frac {4\pi}{3}a^3 121 147 122 a_\text{in} ~ \frac{r_j}{r_{j+1} -r_j} \text{, } a_\text{out} ~ \frac{r_{j+1}}{r_{j+1} -r_j} 148 a_\text{in} ~ \frac{r_j}{r_{j+1} -r_j} \text{, } a_\text{out} 149 ~ \frac{r_{j+1}}{r_{j+1} -r_j} 123 150 124 151 \beta_\text{in} = qr_j \text{, } \beta_\text{out} = qr_{j+1} 125 152 126 153 127 We assume the $\rho_{\text{inter}_i} (r)$ can be approximately linear within a sub-layer $j$ 154 We assume the $\rho_{\text{inter}_i} (r)$ can be approximately linear 155 within a sub-layer $j$ 128 156 129 157 Finally form factor can be calculated by … … 131 159 .. math:: 132 160 133 P(q) = \frac{[f]^2} {V_\text{particle}} \text{where} V_\text{particle} = V(r_{\text{shell}_N}) 161 P(q) = \frac{[f]^2} {V_\text{particle}} \text{where} V_\text{particle} 162 = V(r_{\text{shell}_N}) 134 163 135 164 For 2D data the scattering intensity is calculated in the same way as 1D, … … 150 179 151 180 .. note:: 152 The outer most radius is used as the effective radius for S(Q) when $P(Q) * S(Q)$ is applied. 181 The outer most radius is used as the effective radius for S(Q) 182 when $P(Q) * S(Q)$ is applied. 153 183 154 184 References 155 185 ---------- 156 L A Feigin and D I Svergun, Structure Analysis by Small-Angle X-Ray and Neutron Scattering, Plenum Press, New York, (1987) 186 L A Feigin and D I Svergun, Structure Analysis by Small-Angle X-Ray 187 and Neutron Scattering, Plenum Press, New York, (1987) 157 188 158 189 """ … … 170 201 # pylint: disable=bad-whitespace, line-too-long 171 202 # ["name", "units", default, [lower, upper], "type", "description"], 172 parameters = [["n_shells", "", 1, [0, 9], "", "number of shells"], 173 ["radius_core", "Ang", 50.0, [0, inf], "", "intern layer thickness"], 174 ["sld_core", "1e-6/Ang^2", 2.07, [-inf, inf], "", "sld function flat"], 175 ["sld_flat[n]", "1e-6/Ang^2", 4.06, [-inf, inf], "", "sld function flat"], 176 ["thick_flat[n]", "Ang", 100.0, [0, inf], "", "flat layer_thickness"], 177 ["func_inter[n]", "", 0, [0, 4], "", "Erf:0, RPower:1, LPower:2, RExp:3, LExp:4"], 178 ["thick_inter[n]", "Ang", 50.0, [0, inf], "", "intern layer thickness"], 179 ["inter_nu[n]", "", 2.5, [-inf, inf], "", "steepness parameter"], 180 ["npts_inter", "", 35, [0, 35], "", "number of points in each sublayer Must be odd number"], 181 ["sld_solvent", "1e-6/Ang^2", 1.0, [-inf, inf], "", "sld function solvent"], 203 parameters = [["n_shells", "", 1, [0, 9], "volume", "number of shells"], 204 ["npts_inter", "", 35, [0, inf], "", "number of points in each sublayer Must be odd number"], 205 ["radius_core", "Ang", 50.0, [0, inf], "volume", "intern layer thickness"], 206 ["sld_core", "1e-6/Ang^2", 2.07, [-inf, inf], "", "sld function flat"], 207 ["sld_solvent", "1e-6/Ang^2", 1.0, [-inf, inf], "", "sld function solvent"], 208 ["func_inter0", "", 0, [0, 4], "", "Erf:0, RPower:1, LPower:2, RExp:3, LExp:4"], 209 ["thick_inter0", "Ang", 50.0, [0, inf], "volume", "intern layer thickness for core layer"], 210 ["nu_inter0", "", 2.5, [-inf, inf], "", "steepness parameter for core layer"], 211 ["sld_flat[n_shells]", "1e-6/Ang^2", 4.06, [-inf, inf], "", "sld function flat"], 212 ["thick_flat[n_shells]", "Ang", 100.0, [0, inf], "volume", "flat layer_thickness"], 213 ["func_inter[n_shells]", "", 0, [0, 4], "", "Erf:0, RPower:1, LPower:2, RExp:3, LExp:4"], 214 ["thick_inter[n_shells]", "Ang", 50.0, [0, inf], "volume", "intern layer thickness"], 215 ["nu_inter[n_shells]", "", 2.5, [-inf, inf], "", "steepness parameter"], 182 216 ] 183 217 # pylint: enable=bad-whitespace, line-too-long 184 #source = ["lib/librefl.c", "lib/sph_j1c.c", "spherical_sld.c"] 185 186 def Iq(q, *args, **kw): 187 return q 188 189 def Iqxy(qx, *args, **kw): 190 return qx 191 192 193 demo = dict( 194 n_shells=4, 195 scale=1.0, 196 solvent_sld=1.0, 197 background=0.0, 198 npts_inter=35.0, 199 ) 218 source = ["lib/librefl.c", "lib/sph_j1c.c", "spherical_sld.c"] 219 220 profile_axes = ['Radius (A)', 'SLD (1e-6/A^2)'] 221 def profile(n_shells, radius_core, sld_core, sld_solvent, sld_flat, 222 thick_flat, func_inter, thick_inter, nu_inter, npts_inter): 223 """ 224 Returns shape profile with x=radius, y=SLD. 225 """ 226 227 z = [] 228 beta = [] 229 z0 = 0 230 # two sld points for core 231 z.append(0) 232 beta.append(sld_core) 233 z.append(radius_core) 234 beta.append(sld_core) 235 z0 += radius_core 236 237 for i in range(1, n_shells+2): 238 dz = thick_inter[i-1]/npts_inter 239 # j=0 for interface, j=1 for flat layer 240 for j in range(0, 2): 241 # interation for sub-layers 242 for n_s in range(0, npts_inter+1): 243 if j == 1: 244 if i == n_shells+1: 245 break 246 # shift half sub thickness for the first point 247 z0 -= dz#/2.0 248 z.append(z0) 249 #z0 -= dz/2.0 250 z0 += thick_flat[i] 251 sld_i = sld_flat[i] 252 beta.append(sld_flat[i]) 253 dz = 0 254 else: 255 nu = nu_inter[i-1] 256 # decide which sld is which, sld_r or sld_l 257 if i == 1: 258 sld_l = sld_core 259 else: 260 sld_l = sld_flat[i-1] 261 if i == n_shells+1: 262 sld_r = sld_solvent 263 else: 264 sld_r = sld_flat[i] 265 # get function type 266 func_idx = func_inter[i-1] 267 # calculate the sld 268 sld_i = intersldfunc(func_idx, npts_inter, n_s, nu, 269 sld_l, sld_r) 270 # append to the list 271 z.append(z0) 272 beta.append(sld_i) 273 z0 += dz 274 if j == 1: 275 break 276 z.append(z0) 277 beta.append(sld_solvent) 278 z_ext = z0/5.0 279 z.append(z0+z_ext) 280 beta.append(sld_solvent) 281 # return sld profile (r, beta) 282 return np.asarray(z), np.asarray(beta)*1e-6 283 284 def ER(n_shells, radius_core, thick_inter0, thick_inter, thick_flat): 285 total_thickness = thick_inter0 286 total_thickness += np.sum(thick_inter[:n_shells], axis=0) 287 total_thickness += np.sum(thick_flat[:n_shells], axis=0) 288 return total_thickness + radius_core 289 290 291 demo = { 292 "n_shells": 4, 293 "npts_inter": 35.0, 294 "radius_core": 50.0, 295 "sld_core": 2.07, 296 "sld_solvent": 1.0, 297 "thick_inter0": 50.0, 298 "func_inter0": 0, 299 "nu_inter0": 2.5, 300 "sld_flat":[4.0,3.5,4.0,3.5], 301 "thick_flat":[100.0,100.0,100.0,100.0], 302 "func_inter":[0,0,0,0], 303 "thick_inter":[50.0,50.0,50.0,50.0], 304 "nu_inter":[2.5,2.5,2.5,2.5], 305 } 200 306 201 307 #TODO: Not working yet 202 308 tests = [ 203 309 # Accuracy tests based on content in test/utest_extra_models.py 204 [{'npts_iter':35, 205 'sld_solv':1, 206 'radius_core':50.0, 207 'sld_core':2.07, 208 'func_inter2':0.0, 209 'thick_inter2':50, 210 'nu_inter2':2.5, 211 'sld_flat2':4, 212 'thick_flat2':100, 213 'func_inter1':0.0, 214 'thick_inter1':50, 215 'nu_inter1':2.5, 216 'background': 0.0, 310 [{"n_shells":4, 311 'npts_inter':35, 312 "radius_core":50.0, 313 "sld_core":2.07, 314 "sld_solvent": 1.0, 315 "sld_flat":[4.0,3.5,4.0,3.5], 316 "thick_flat":[100.0,100.0,100.0,100.0], 317 "func_inter":[0,0,0,0], 318 "thick_inter":[50.0,50.0,50.0,50.0], 319 "nu_inter":[2.5,2.5,2.5,2.5] 217 320 }, 0.001, 0.001], 218 321 ] -
sasmodels/models/squarewell.py
rec45c4f rd2bb604 123 123 """ 124 124 125 Iqxy = """126 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);127 """128 129 125 # ER defaults to 0.0 130 126 # VR defaults to 1.0 -
sasmodels/models/stickyhardsphere.py
rec45c4f rd2bb604 171 171 """ 172 172 173 Iqxy = """174 return Iq(sqrt(qx*qx+qy*qy), IQ_PARAMETERS);175 """176 177 173 # ER defaults to 0.0 178 174 # VR defaults to 1.0 -
sasmodels/product.py
rf247314 r0ff62d4 11 11 *ProductModel(P, S)*. 12 12 """ 13 import numpy as np 13 import numpy as np # type: ignore 14 14 15 from . core import call_ER_VR16 from . generate import process_parameters15 from .modelinfo import suffix_parameter, ParameterTable, ModelInfo 16 from .kernel import KernelModel, Kernel, dispersion_mesh 17 17 18 SCALE=0 19 BACKGROUND=1 20 RADIUS_EFFECTIVE=2 21 VOLFRACTION=3 18 try: 19 from typing import Tuple 20 from .modelinfo import ParameterSet 21 from .details import CallDetails 22 except ImportError: 23 pass 24 25 # TODO: make estimates available to constraints 26 #ESTIMATED_PARAMETERS = [ 27 # ["est_effect_radius", "A", 0.0, [0, np.inf], "", "Estimated effective radius"], 28 # ["est_volume_ratio", "", 1.0, [0, np.inf], "", "Estimated volume ratio"], 29 #] 22 30 23 31 # TODO: core_shell_sphere model has suppressed the volume ratio calculation 24 32 # revert it after making VR and ER available at run time as constraints. 25 33 def make_product_info(p_info, s_info): 34 # type: (ModelInfo, ModelInfo) -> ModelInfo 26 35 """ 27 36 Create info block for product model. 28 37 """ 29 p_id, p_name, p_pars = p_info ['id'], p_info['name'], p_info['parameters']30 s_id, s_name, s_pars = s_info ['id'], s_info['name'], s_info['parameters']31 # We require models to start with scale and background32 assert s_pars[SCALE].name == 'scale'33 assert s_pars[BACKGROUND].name == 'background' 34 # We require structure factors to start with effect radius and volfraction35 assert s_pars[RADIUS_EFFECTIVE].name == 'radius_effective'36 assert s_pars[VOLFRACTION].name == 'volfraction'37 # Combine the parameter sets. We are skipping the first three38 # parameters of S since scale, background are defined in P and39 # effect_radius is set from P.ER().40 pars = p_pars + s_pars[3:]41 # check for duplicates; can't use assertion since they may never be checked42 if len(set(p.name for p in pars)) != len(pars):43 raise ValueError("Duplicate parameters in %s and %s"%(p_id)) 44 model_info = {}45 model_info ['id']= '*'.join((p_id, s_id))46 model_info ['name']= ' X '.join((p_name, s_name))47 model_info ['filename']= None48 model_info ['title'] = 'Product of %s and structure factor%s'%(p_name, s_name)49 model_info ['description'] = model_info['title']50 model_info ['docs'] = model_info['title']51 model_info ['category']= "custom"52 model_info ['parameters'] = pars53 #model_info ['single'] = p_info['single'] and s_info['single']54 model_info ['structure_factor']= False55 model_info ['variant_info']= None56 #model_info ['tests']= []57 #model_info ['source']= []38 p_id, p_name, p_pars = p_info.id, p_info.name, p_info.parameters 39 s_id, s_name, s_pars = s_info.id, s_info.name, s_info.parameters 40 p_set = set(p.id for p in p_pars.call_parameters) 41 s_set = set(p.id for p in s_pars.call_parameters) 42 43 if p_set & s_set: 44 # there is some overlap between the parameter names; tag the 45 # overlapping S parameters with name_S 46 s_list = [(suffix_parameter(par, "_S") if par.id in p_set else par) 47 for par in s_pars.kernel_parameters] 48 combined_pars = p_pars.kernel_parameters + s_list 49 else: 50 combined_pars = p_pars.kernel_parameters + s_pars.kernel_parameters 51 parameters = ParameterTable(combined_pars) 52 53 model_info = ModelInfo() 54 model_info.id = '*'.join((p_id, s_id)) 55 model_info.name = ' X '.join((p_name, s_name)) 56 model_info.filename = None 57 model_info.title = 'Product of %s and %s'%(p_name, s_name) 58 model_info.description = model_info.title 59 model_info.docs = model_info.title 60 model_info.category = "custom" 61 model_info.parameters = parameters 62 #model_info.single = p_info.single and s_info.single 63 model_info.structure_factor = False 64 model_info.variant_info = None 65 #model_info.tests = [] 66 #model_info.source = [] 58 67 # Iq, Iqxy, form_volume, ER, VR and sesans 59 model_info['composition'] = ('product', [p_info, s_info]) 60 process_parameters(model_info) 68 model_info.composition = ('product', [p_info, s_info]) 61 69 return model_info 62 70 63 class ProductModel( object):71 class ProductModel(KernelModel): 64 72 def __init__(self, model_info, P, S): 73 # type: (ModelInfo, KernelModel, KernelModel) -> None 65 74 self.info = model_info 66 75 self.P = P … … 68 77 69 78 def __call__(self, q_vectors): 79 # type: (List[np.ndarray]) -> Kernel 70 80 # Note: may be sending the q_vectors to the GPU twice even though they 71 81 # are only needed once. It would mess up modularity quite a bit to … … 74 84 # in opencl; or both in opencl, but one in single precision and the 75 85 # other in double precision). 76 p_kernel = self.P (q_vectors)77 s_kernel = self.S (q_vectors)86 p_kernel = self.P.make_kernel(q_vectors) 87 s_kernel = self.S.make_kernel(q_vectors) 78 88 return ProductKernel(self.info, p_kernel, s_kernel) 79 89 80 90 def release(self): 91 # type: (None) -> None 81 92 """ 82 93 Free resources associated with the model. … … 86 97 87 98 88 class ProductKernel( object):99 class ProductKernel(Kernel): 89 100 def __init__(self, model_info, p_kernel, s_kernel): 90 dim = '2d' if p_kernel.q_input.is_2d else '1d' 91 92 # Need to know if we want 2D and magnetic parameters when constructing 93 # a parameter map. 94 par_map = {} 95 p_info = p_kernel.info['partype'] 96 s_info = s_kernel.info['partype'] 97 vol_pars = set(p_info['volume']) 98 if dim == '2d': 99 num_p_fixed = len(p_info['fixed-2d']) 100 num_p_pd = len(p_info['pd-2d']) 101 num_s_fixed = len(s_info['fixed-2d']) 102 num_s_pd = len(s_info['pd-2d']) - 1 # exclude effect_radius 103 # volume parameters are amongst the pd pars for P, not S 104 vol_par_idx = [k for k,v in enumerate(p_info['pd-2d']) 105 if v in vol_pars] 106 else: 107 num_p_fixed = len(p_info['fixed-1d']) 108 num_p_pd = len(p_info['pd-1d']) 109 num_s_fixed = len(s_info['fixed-1d']) 110 num_s_pd = len(s_info['pd-1d']) - 1 # exclude effect_radius 111 # volume parameters are amongst the pd pars for P, not S 112 vol_par_idx = [k for k,v in enumerate(p_info['pd-1d']) 113 if v in vol_pars] 114 115 start = 0 116 par_map['p_fixed'] = np.arange(start, start+num_p_fixed) 117 # User doesn't set scale, background or effect_radius for S in P*S, 118 # so borrow values from end of p_fixed. This makes volfraction the 119 # first S parameter. 120 start += num_p_fixed 121 par_map['s_fixed'] = np.hstack(([start,start], 122 np.arange(start, start+num_s_fixed-2))) 123 par_map['volfraction'] = num_p_fixed 124 start += num_s_fixed-2 125 # vol pars offset from the start of pd pars 126 par_map['vol_pars'] = [start+k for k in vol_par_idx] 127 par_map['p_pd'] = np.arange(start, start+num_p_pd) 128 start += num_p_pd-1 129 par_map['s_pd'] = np.hstack((start, 130 np.arange(start, start+num_s_pd-1))) 131 132 self.fixed_pars = model_info['partype']['fixed-' + dim] 133 self.pd_pars = model_info['partype']['pd-' + dim] 101 # type: (ModelInfo, Kernel, Kernel) -> None 134 102 self.info = model_info 135 103 self.p_kernel = p_kernel 136 104 self.s_kernel = s_kernel 137 self.par_map = par_map138 105 139 def __call__(self, fixed_pars, pd_pars, cutoff=1e-5): 140 pars = fixed_pars + pd_pars 141 scale = pars[SCALE] 142 background = pars[BACKGROUND] 143 s_volfraction = pars[self.par_map['volfraction']] 144 p_fixed = [pars[k] for k in self.par_map['p_fixed']] 145 s_fixed = [pars[k] for k in self.par_map['s_fixed']] 146 p_pd = [pars[k] for k in self.par_map['p_pd']] 147 s_pd = [pars[k] for k in self.par_map['s_pd']] 148 vol_pars = [pars[k] for k in self.par_map['vol_pars']] 149 106 def __call__(self, details, weights, values, cutoff): 107 # type: (CallDetails, np.ndarray, np.ndarray, float) -> np.ndarray 150 108 effect_radius, vol_ratio = call_ER_VR(self.p_kernel.info, vol_pars) 151 109 152 p_fixed[SCALE] = s_volfraction 153 p_fixed[BACKGROUND] = 0.0 154 s_fixed[SCALE] = scale 155 s_fixed[BACKGROUND] = 0.0 156 s_fixed[2] = s_volfraction/vol_ratio 157 s_pd[0] = [effect_radius], [1.0] 158 159 p_res = self.p_kernel(p_fixed, p_pd, cutoff) 160 s_res = self.s_kernel(s_fixed, s_pd, cutoff) 110 p_details, s_details = details.parts 111 p_res = self.p_kernel(p_details, weights, values, cutoff) 112 s_res = self.s_kernel(s_details, weights, values, cutoff) 161 113 #print s_fixed, s_pd, p_fixed, p_pd 162 114 163 return p_res*s_res + background115 return values[0]*(p_res*s_res) + values[1] 164 116 165 117 def release(self): 118 # type: () -> None 166 119 self.p_kernel.release() 167 self. q_kernel.release()120 self.s_kernel.release() 168 121 122 def call_ER_VR(model_info, pars): 123 """ 124 Return effect radius and volume ratio for the model. 125 """ 126 if model_info.ER is None and model_info.VR is None: 127 return 1.0, 1.0 128 129 value, weight = _vol_pars(model_info, pars) 130 131 if model_info.ER is not None: 132 individual_radii = model_info.ER(*value) 133 effect_radius = np.sum(weight*individual_radii) / np.sum(weight) 134 else: 135 effect_radius = 1.0 136 137 if model_info.VR is not None: 138 whole, part = model_info.VR(*value) 139 volume_ratio = np.sum(weight*part)/np.sum(weight*whole) 140 else: 141 volume_ratio = 1.0 142 143 return effect_radius, volume_ratio 144 145 def _vol_pars(model_info, pars): 146 # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray] 147 vol_pars = [get_weights(p, pars) 148 for p in model_info.parameters.call_parameters 149 if p.type == 'volume'] 150 value, weight = dispersion_mesh(model_info, vol_pars) 151 return value, weight 152 -
sasmodels/resolution.py
r4d76711 r7ae2b7f 6 6 from __future__ import division 7 7 8 from scipy.special import erf 9 from numpy import sqrt, log, log10 10 import numpy as np 8 from scipy.special import erf # type: ignore 9 from numpy import sqrt, log, log10, exp, pi # type: ignore 10 import numpy as np # type: ignore 11 11 12 12 __all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D", … … 35 35 smeared theory I(q). 36 36 """ 37 q = None 38 q_calc = None 37 q = None # type: np.ndarray 38 q_calc = None # type: np.ndarray 39 39 def apply(self, theory): 40 40 """ … … 476 476 *pars* are the parameter values to use when evaluating. 477 477 """ 478 from sasmodels import core478 from sasmodels import direct_model 479 479 kernel = form.make_kernel([q]) 480 theory = core.call_kernel(kernel, pars)480 theory = direct_model.call_kernel(kernel, pars) 481 481 kernel.release() 482 482 return theory … … 489 489 *q0* is the center, *dq* is the width and *q* are the points to evaluate. 490 490 """ 491 from numpy import exp, pi492 491 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 493 492 … … 500 499 that make it slow to evaluate but give it good accuracy. 501 500 """ 502 from scipy.integrate import romberg 503 504 if any(k not in form.info['defaults'] for k in pars.keys()): 505 keys = set(form.info['defaults'].keys()) 506 extra = set(pars.keys()) - keys 507 raise ValueError("bad parameters: [%s] not in [%s]"% 508 (", ".join(sorted(extra)), ", ".join(sorted(keys)))) 501 from scipy.integrate import romberg # type: ignore 502 503 par_set = set([p.name for p in form.info.parameters.call_parameters]) 504 if any(k not in par_set for k in pars.keys()): 505 extra = set(pars.keys()) - par_set 506 raise ValueError("bad parameters: [%s] not in [%s]" 507 % (", ".join(sorted(extra)), 508 ", ".join(sorted(pars.keys())))) 509 509 510 510 if np.isscalar(width): … … 554 554 that make it slow to evaluate but give it good accuracy. 555 555 """ 556 from scipy.integrate import romberg 557 558 if any(k not in form.info['defaults'] for k in pars.keys()): 559 keys = set(form.info['defaults'].keys()) 560 extra = set(pars.keys()) - keys 561 raise ValueError("bad parameters: [%s] not in [%s]"% 562 (", ".join(sorted(extra)), ", ".join(sorted(keys)))) 556 from scipy.integrate import romberg # type: ignore 557 558 par_set = set([p.name for p in form.info.parameters.call_parameters]) 559 if any(k not in par_set for k in pars.keys()): 560 extra = set(pars.keys()) - par_set 561 raise ValueError("bad parameters: [%s] not in [%s]" 562 % (", ".join(sorted(extra)), 563 ", ".join(sorted(pars.keys())))) 563 564 564 565 _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq) … … 692 693 693 694 def _eval_sphere(self, pars, resolution): 694 from sasmodels import core695 from sasmodels import direct_model 695 696 kernel = self.model.make_kernel([resolution.q_calc]) 696 theory = core.call_kernel(kernel, pars)697 theory = direct_model.call_kernel(kernel, pars) 697 698 result = resolution.apply(theory) 698 699 kernel.release() … … 750 751 #tol = 0.001 751 752 ## The default 3 sigma and no extra points gets 1% 752 q_calc, tol = None, 0.01 753 q_calc = None # type: np.ndarray 754 tol = 0.01 753 755 resolution = Pinhole1D(q, q_width, q_calc=q_calc) 754 756 output = self._eval_sphere(pars, resolution) 755 757 if 0: # debug plot 756 import matplotlib.pyplot as plt 758 import matplotlib.pyplot as plt # type: ignore 757 759 resolution = Perfect1D(q) 758 760 source = self._eval_sphere(pars, resolution) … … 1026 1028 """ 1027 1029 import sys 1028 import xmlrunner 1030 import xmlrunner # type: ignore 1029 1031 1030 1032 suite = unittest.TestSuite() … … 1043 1045 import sys 1044 1046 from sasmodels import core 1047 from sasmodels import direct_model 1045 1048 name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder' 1046 1049 … … 1063 1066 1064 1067 kernel = model.make_kernel([resolution.q_calc]) 1065 theory = core.call_kernel(kernel, pars)1068 theory = direct_model.call_kernel(kernel, pars) 1066 1069 Iq = resolution.apply(theory) 1067 1070 … … 1073 1076 Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars) 1074 1077 1075 import matplotlib.pyplot as plt 1078 import matplotlib.pyplot as plt # type: ignore 1076 1079 plt.loglog(resolution.q_calc, theory, label='unsmeared') 1077 1080 plt.loglog(resolution.q, Iq, label='smeared', hold=True) … … 1108 1111 Run the resolution demos. 1109 1112 """ 1110 import matplotlib.pyplot as plt 1113 import matplotlib.pyplot as plt # type: ignore 1111 1114 plt.subplot(121) 1112 1115 demo_pinhole_1d() -
sasmodels/resolution2d.py
rd6f5da6 r7ae2b7f 7 7 from __future__ import division 8 8 9 import numpy as np 10 from numpy import pi, cos, sin, sqrt 9 import numpy as np # type: ignore 10 from numpy import pi, cos, sin, sqrt # type: ignore 11 11 12 12 from . import resolution -
sasmodels/sasview_model.py
rf36c1b7 r0ff62d4 21 21 import logging 22 22 23 import numpy as np 23 import numpy as np # type: ignore 24 24 25 25 from . import core 26 26 from . import custom 27 27 from . import generate 28 from . import weights 29 from . import modelinfo 30 from . import kernel 28 31 29 32 try: 30 from typing import Dict, Mapping, Any, Sequence, Tuple, NamedTuple, List, Optional 33 from typing import Dict, Mapping, Any, Sequence, Tuple, NamedTuple, List, Optional, Union, Callable 34 from .modelinfo import ModelInfo, Parameter 31 35 from .kernel import KernelModel 32 36 MultiplicityInfoType = NamedTuple( … … 34 38 [("number", int), ("control", str), ("choices", List[str]), 35 39 ("x_axis_label", str)]) 40 SasviewModelType = Callable[[int], "SasviewModel"] 36 41 except ImportError: 37 42 pass 38 43 39 44 # TODO: separate x_axis_label from multiplicity info 40 # The x-axis label belongs with the profile generating function45 # The profile x-axis label belongs with the profile generating function 41 46 MultiplicityInfo = collections.namedtuple( 42 47 'MultiplicityInfo', … … 44 49 ) 45 50 51 # TODO: figure out how to say that the return type is a subclass 46 52 def load_standard_models(): 53 # type: () -> List[SasviewModelType] 47 54 """ 48 55 Load and return the list of predefined models. … … 55 62 try: 56 63 models.append(_make_standard_model(name)) 57 except :64 except Exception: 58 65 logging.error(traceback.format_exc()) 59 66 return models … … 61 68 62 69 def load_custom_model(path): 70 # type: (str) -> SasviewModelType 63 71 """ 64 72 Load a custom model given the model path. 65 73 """ 66 74 kernel_module = custom.load_custom_kernel_module(path) 67 model_info = generate.make_model_info(kernel_module)75 model_info = modelinfo.make_model_info(kernel_module) 68 76 return _make_model_from_info(model_info) 69 77 70 78 71 79 def _make_standard_model(name): 80 # type: (str) -> SasviewModelType 72 81 """ 73 82 Load the sasview model defined by *name*. … … 78 87 """ 79 88 kernel_module = generate.load_kernel_module(name) 80 model_info = generate.make_model_info(kernel_module)89 model_info = modelinfo.make_model_info(kernel_module) 81 90 return _make_model_from_info(model_info) 82 91 83 92 84 93 def _make_model_from_info(model_info): 94 # type: (ModelInfo) -> SasviewModelType 85 95 """ 86 96 Convert *model_info* into a SasView model wrapper. 87 97 """ 88 model_info['variant_info'] = None # temporary hack for older sasview 89 def __init__(self, multiplicity=1): 98 def __init__(self, multiplicity=None): 90 99 SasviewModel.__init__(self, multiplicity=multiplicity) 91 100 attrs = _generate_model_attributes(model_info) 92 101 attrs['__init__'] = __init__ 93 ConstructedModel = type(model_info ['id'], (SasviewModel,), attrs)102 ConstructedModel = type(model_info.name, (SasviewModel,), attrs) # type: SasviewModelType 94 103 return ConstructedModel 95 104 … … 104 113 All the attributes should be immutable to avoid accidents. 105 114 """ 115 116 # TODO: allow model to override axis labels input/output name/unit 117 118 # Process multiplicity 119 non_fittable = [] # type: List[str] 120 xlabel = model_info.profile_axes[0] if model_info.profile is not None else "" 121 variants = MultiplicityInfo(0, "", [], xlabel) 122 for p in model_info.parameters.kernel_parameters: 123 if p.name == model_info.control: 124 non_fittable.append(p.name) 125 variants = MultiplicityInfo( 126 len(p.choices), p.name, p.choices, xlabel 127 ) 128 break 129 elif p.is_control: 130 non_fittable.append(p.name) 131 variants = MultiplicityInfo( 132 int(p.limits[1]), p.name, p.choices, xlabel 133 ) 134 break 135 136 # Organize parameter sets 137 orientation_params = [] 138 magnetic_params = [] 139 fixed = [] 140 for p in model_info.parameters.user_parameters(): 141 if p.type == 'orientation': 142 orientation_params.append(p.name) 143 orientation_params.append(p.name+".width") 144 fixed.append(p.name+".width") 145 if p.type == 'magnetic': 146 orientation_params.append(p.name) 147 magnetic_params.append(p.name) 148 fixed.append(p.name+".width") 149 150 # Build class dictionary 106 151 attrs = {} # type: Dict[str, Any] 107 152 attrs['_model_info'] = model_info 108 attrs['name'] = model_info['name'] 109 attrs['id'] = model_info['id'] 110 attrs['description'] = model_info['description'] 111 attrs['category'] = model_info['category'] 112 113 # TODO: allow model to override axis labels input/output name/unit 114 115 #self.is_multifunc = False 116 non_fittable = [] # type: List[str] 117 variants = MultiplicityInfo(0, "", [], "") 118 attrs['is_structure_factor'] = model_info['structure_factor'] 119 attrs['is_form_factor'] = model_info['ER'] is not None 153 attrs['name'] = model_info.name 154 attrs['id'] = model_info.id 155 attrs['description'] = model_info.description 156 attrs['category'] = model_info.category 157 attrs['is_structure_factor'] = model_info.structure_factor 158 attrs['is_form_factor'] = model_info.ER is not None 120 159 attrs['is_multiplicity_model'] = variants[0] > 1 121 160 attrs['multiplicity_info'] = variants 122 123 partype = model_info['partype']124 orientation_params = (125 partype['orientation']126 + [n + '.width' for n in partype['orientation']]127 + partype['magnetic'])128 magnetic_params = partype['magnetic']129 fixed = [n + '.width' for n in partype['pd-2d']]130 131 161 attrs['orientation_params'] = tuple(orientation_params) 132 162 attrs['magnetic_params'] = tuple(magnetic_params) 133 163 attrs['fixed'] = tuple(fixed) 134 135 164 attrs['non_fittable'] = tuple(non_fittable) 136 165 … … 192 221 dispersion = None # type: Dict[str, Any] 193 222 #: units and limits for each parameter 194 details = None # type: Mapping[str, Tuple(str, float, float)] 195 #: multiplicity used, or None if no multiplicity controls 223 details = None # type: Dict[str, Sequence[Any]] 224 # # actual type is Dict[str, List[str, float, float]] 225 #: multiplicity value, or None if no multiplicity on the model 196 226 multiplicity = None # type: Optional[int] 197 198 def __init__(self, multiplicity): 199 # type: () -> None 200 #print("initializing", self.name) 201 #raise Exception("first initialization") 202 self._model = None 203 204 ## _persistency_dict is used by sas.perspectives.fitting.basepage 205 ## to store dispersity reference. 227 #: memory for polydispersity array if using ArrayDispersion (used by sasview). 228 _persistency_dict = None # type: Dict[str, Tuple[np.ndarray, np.ndarray]] 229 230 def __init__(self, multiplicity=None): 231 # type: (Optional[int]) -> None 232 233 # TODO: _persistency_dict to persistency_dict throughout sasview 234 # TODO: refactor multiplicity to encompass variants 235 # TODO: dispersion should be a class 236 # TODO: refactor multiplicity info 237 # TODO: separate profile view from multiplicity 238 # The button label, x and y axis labels and scale need to be under 239 # the control of the model, not the fit page. Maximum flexibility, 240 # the fit page would supply the canvas and the profile could plot 241 # how it wants, but this assumes matplotlib. Next level is that 242 # we provide some sort of data description including title, labels 243 # and lines to plot. 244 245 # Get the list of hidden parameters given the mulitplicity 246 # Don't include multiplicity in the list of parameters 247 self.multiplicity = multiplicity 248 if multiplicity is not None: 249 hidden = self._model_info.get_hidden_parameters(multiplicity) 250 hidden |= set([self.multiplicity_info.control]) 251 else: 252 hidden = set() 253 206 254 self._persistency_dict = {} 207 208 self.multiplicity = multiplicity209 210 255 self.params = collections.OrderedDict() 211 256 self.dispersion = {} 212 257 self.details = {} 213 214 for p in self._model_info['parameters']: 258 for p in self._model_info.parameters.user_parameters(): 259 if p.name in hidden: 260 continue 215 261 self.params[p.name] = p.default 216 self.details[p.name] = [p.units] + p.limits 217 218 for name in self._model_info['partype']['pd-2d']: 219 self.dispersion[name] = { 220 'width': 0, 221 'npts': 35, 222 'nsigmas': 3, 223 'type': 'gaussian', 224 } 262 self.details[p.id] = [p.units, p.limits[0], p.limits[1]] 263 if p.polydisperse: 264 self.details[p.id+".width"] = [ 265 "", 0.0, 1.0 if p.relative_pd else np.inf 266 ] 267 self.dispersion[p.name] = { 268 'width': 0, 269 'npts': 35, 270 'nsigmas': 3, 271 'type': 'gaussian', 272 } 225 273 226 274 def __get_state__(self): 275 # type: () -> Dict[str, Any] 227 276 state = self.__dict__.copy() 228 277 state.pop('_model') … … 233 282 234 283 def __set_state__(self, state): 284 # type: (Dict[str, Any]) -> None 235 285 self.__dict__ = state 236 286 self._model = None 237 287 238 288 def __str__(self): 289 # type: () -> str 239 290 """ 240 291 :return: string representation … … 243 294 244 295 def is_fittable(self, par_name): 296 # type: (str) -> bool 245 297 """ 246 298 Check if a given parameter is fittable or not … … 253 305 254 306 255 # pylint: disable=no-self-use256 307 def getProfile(self): 308 # type: () -> (np.ndarray, np.ndarray) 257 309 """ 258 310 Get SLD profile … … 261 313 beta is a list of the corresponding SLD values 262 314 """ 263 return None, None 315 args = [] # type: List[Union[float, np.ndarray]] 316 for p in self._model_info.parameters.kernel_parameters: 317 if p.id == self.multiplicity_info.control: 318 args.append(float(self.multiplicity)) 319 elif p.length == 1: 320 args.append(self.params.get(p.id, np.NaN)) 321 else: 322 args.append([self.params.get(p.id+str(k), np.NaN) 323 for k in range(1,p.length+1)]) 324 return self._model_info.profile(*args) 264 325 265 326 def setParam(self, name, value): 327 # type: (str, float) -> None 266 328 """ 267 329 Set the value of a model parameter … … 290 352 291 353 def getParam(self, name): 354 # type: (str) -> float 292 355 """ 293 356 Set the value of a model parameter … … 313 376 314 377 def getParamList(self): 378 # type: () -> Sequence[str] 315 379 """ 316 380 Return a list of all available parameters for the model 317 381 """ 318 param_list = self.params.keys()382 param_list = list(self.params.keys()) 319 383 # WARNING: Extending the list with the dispersion parameters 320 384 param_list.extend(self.getDispParamList()) … … 322 386 323 387 def getDispParamList(self): 388 # type: () -> Sequence[str] 324 389 """ 325 390 Return a list of polydispersity parameters for the model 326 391 """ 327 392 # TODO: fix test so that parameter order doesn't matter 328 ret = ['%s.%s' % (d.lower(), p) 329 for d in self._model_info['partype']['pd-2d'] 330 for p in ('npts', 'nsigmas', 'width')] 393 ret = ['%s.%s' % (p.name.lower(), ext) 394 for p in self._model_info.parameters.user_parameters() 395 for ext in ('npts', 'nsigmas', 'width') 396 if p.polydisperse] 331 397 #print(ret) 332 398 return ret 333 399 334 400 def clone(self): 401 # type: () -> "SasviewModel" 335 402 """ Return a identical copy of self """ 336 403 return deepcopy(self) 337 404 338 405 def run(self, x=0.0): 406 # type: (Union[float, (float, float), List[float]]) -> float 339 407 """ 340 408 Evaluate the model … … 349 417 # pylint: disable=unpacking-non-sequence 350 418 q, phi = x 351 return self.calculate_Iq([q * math.cos(phi)], 352 [q * math.sin(phi)])[0] 353 else: 354 return self.calculate_Iq([float(x)])[0] 419 return self.calculate_Iq([q*math.cos(phi)], [q*math.sin(phi)])[0] 420 else: 421 return self.calculate_Iq([x])[0] 355 422 356 423 357 424 def runXY(self, x=0.0): 425 # type: (Union[float, (float, float), List[float]]) -> float 358 426 """ 359 427 Evaluate the model in cartesian coordinates … … 366 434 """ 367 435 if isinstance(x, (list, tuple)): 368 return self.calculate_Iq([ float(x[0])], [float(x[1])])[0]369 else: 370 return self.calculate_Iq([ float(x)])[0]436 return self.calculate_Iq([x[0]], [x[1]])[0] 437 else: 438 return self.calculate_Iq([x])[0] 371 439 372 440 def evalDistribution(self, qdist): 441 # type: (Union[np.ndarray, Tuple[np.ndarray, np.ndarray], List[np.ndarray]]) -> np.ndarray 373 442 r""" 374 443 Evaluate a distribution of q-values. … … 401 470 # Check whether we have a list of ndarrays [qx,qy] 402 471 qx, qy = qdist 403 partype = self._model_info['partype'] 404 if not partype['orientation'] and not partype['magnetic']: 472 if not self._model_info.parameters.has_2d: 405 473 return self.calculate_Iq(np.sqrt(qx ** 2 + qy ** 2)) 406 474 else: … … 415 483 % type(qdist)) 416 484 417 def calculate_Iq(self, *args): 485 def calculate_Iq(self, qx, qy=None): 486 # type: (Sequence[float], Optional[Sequence[float]]) -> np.ndarray 418 487 """ 419 488 Calculate Iq for one set of q with the current parameters. … … 426 495 if self._model is None: 427 496 self._model = core.build_model(self._model_info) 428 q_vectors = [np.asarray(q) for q in args] 429 fn = self._model.make_kernel(q_vectors) 430 pars = [self.params[v] for v in fn.fixed_pars] 431 pd_pars = [self._get_weights(p) for p in fn.pd_pars] 432 result = fn(pars, pd_pars, self.cutoff) 433 fn.q_input.release() 434 fn.release() 497 if qy is not None: 498 q_vectors = [np.asarray(qx), np.asarray(qy)] 499 else: 500 q_vectors = [np.asarray(qx)] 501 kernel = self._model.make_kernel(q_vectors) 502 pairs = [self._get_weights(p) 503 for p in self._model_info.parameters.call_parameters] 504 call_details, weight, value = kernel.build_details(kernel, pairs) 505 result = kernel(call_details, weight, value, cutoff=self.cutoff) 506 kernel.release() 435 507 return result 436 508 437 509 def calculate_ER(self): 510 # type: () -> float 438 511 """ 439 512 Calculate the effective radius for P(q)*S(q) … … 441 514 :return: the value of the effective radius 442 515 """ 443 ER = self._model_info.get('ER', None) 444 if ER is None: 516 if self._model_info.ER is None: 445 517 return 1.0 446 518 else: 447 value s, weights= self._dispersion_mesh()448 fv = ER(*values)519 value, weight = self._dispersion_mesh() 520 fv = self._model_info.ER(*value) 449 521 #print(values[0].shape, weights.shape, fv.shape) 450 return np.sum(weight s * fv) / np.sum(weights)522 return np.sum(weight * fv) / np.sum(weight) 451 523 452 524 def calculate_VR(self): 525 # type: () -> float 453 526 """ 454 527 Calculate the volf ratio for P(q)*S(q) … … 456 529 :return: the value of the volf ratio 457 530 """ 458 VR = self._model_info.get('VR', None) 459 if VR is None: 531 if self._model_info.VR is None: 460 532 return 1.0 461 533 else: 462 value s, weights= self._dispersion_mesh()463 whole, part = VR(*values)464 return np.sum(weight s * part) / np.sum(weights* whole)534 value, weight = self._dispersion_mesh() 535 whole, part = self._model_info.VR(*value) 536 return np.sum(weight * part) / np.sum(weight * whole) 465 537 466 538 def set_dispersion(self, parameter, dispersion): 539 # type: (str, weights.Dispersion) -> Dict[str, Any] 467 540 """ 468 541 Set the dispersion object for a model parameter … … 487 560 from . import weights 488 561 disperser = weights.dispersers[dispersion.__class__.__name__] 489 dispersion = weights. models[disperser]()562 dispersion = weights.MODELS[disperser]() 490 563 self.dispersion[parameter] = dispersion.get_pars() 491 564 else: … … 493 566 494 567 def _dispersion_mesh(self): 568 # type: () -> List[Tuple[np.ndarray, np.ndarray]] 495 569 """ 496 570 Create a mesh grid of dispersion parameters and weights. … … 500 574 parameter set in the vector. 501 575 """ 502 pars = self._model_info['partype']['volume'] 503 return core.dispersion_mesh([self._get_weights(p) for p in pars]) 576 pars = [self._get_weights(p) 577 for p in self._model_info.parameters.call_parameters 578 if p.type == 'volume'] 579 return kernel.dispersion_mesh(self._model_info, pars) 504 580 505 581 def _get_weights(self, par): 582 # type: (Parameter) -> Tuple[np.ndarray, np.ndarray] 506 583 """ 507 584 Return dispersion weights for parameter 508 585 """ 509 from . import weights 510 relative = self._model_info['partype']['pd-rel'] 511 limits = self._model_info['limits'] 512 dis = self.dispersion[par] 513 value, weight = weights.get_weights( 514 dis['type'], dis['npts'], dis['width'], dis['nsigmas'], 515 self.params[par], limits[par], par in relative) 516 return value, weight / np.sum(weight) 517 586 if par.name not in self.params: 587 if par.name == self.multiplicity_info.control: 588 return [self.multiplicity], [] 589 else: 590 return [np.NaN], [] 591 elif par.polydisperse: 592 dis = self.dispersion[par.name] 593 value, weight = weights.get_weights( 594 dis['type'], dis['npts'], dis['width'], dis['nsigmas'], 595 self.params[par.name], par.limits, par.relative_pd) 596 return value, weight / np.sum(weight) 597 else: 598 return [self.params[par.name]], [] 518 599 519 600 def test_model(): 601 # type: () -> float 520 602 """ 521 603 Test that a sasview model (cylinder) can be run. … … 525 607 return cylinder.evalDistribution([0.1,0.1]) 526 608 609 def test_rpa(): 610 # type: () -> float 611 """ 612 Test that a sasview model (cylinder) can be run. 613 """ 614 RPA = _make_standard_model('rpa') 615 rpa = RPA(3) 616 return rpa.evalDistribution([0.1,0.1]) 617 527 618 528 619 def test_model_list(): 620 # type: () -> None 529 621 """ 530 622 Make sure that all models build as sasview models. -
sasmodels/sesans.py
r2684d45 r7ae2b7f 12 12 from __future__ import division 13 13 14 import numpy as np 15 from numpy import pi, exp 14 import numpy as np # type: ignore 15 from numpy import pi, exp # type: ignore 16 16 from scipy.special import jv as besselj 17 17 #import direct_model.DataMixin as model -
sasmodels/weights.py
ra936688 rfa5fd8d 3 3 """ 4 4 # TODO: include dispersion docs with the disperser models 5 from math import sqrt 6 import numpy as np 7 from scipy.special import gammaln 5 from math import sqrt # type: ignore 6 import numpy as np # type: ignore 7 from scipy.special import gammaln # type: ignore 8 8 9 9 class Dispersion(object):
Note: See TracChangeset
for help on using the changeset viewer.