source: sasmodels/sasmodels/direct_model.py @ c1799d3

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since c1799d3 was c1799d3, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

Merge branch 'beta_approx' into ticket-1157

  • Property mode set to 100644
File size: 16.7 KB
Line 
1"""
2Class interface to the model calculator.
3
4Calling a model is somewhat non-trivial since the functions called depend
5on the data type.  For 1D data the *Iq* kernel needs to be called, for
62D data the *Iqxy* kernel needs to be called, and for SESANS data the
7*Iq* kernel needs to be called followed by a Hankel transform.  Before
8the kernel is called an appropriate *q* calculation vector needs to be
9constructed.  This is not the simple *q* vector where you have measured
10the data since the resolution calculation will require values beyond the
11range of the measured data.  After the calculation the resolution calculator
12must be called to return the predicted value for each measured data point.
13
14:class:`DirectModel` is a callable object that takes *parameter=value*
15keyword arguments and returns the appropriate theory values for the data.
16
17:class:`DataMixin` does the real work of interpreting the data and calling
18the model calculator.  This is used by :class:`DirectModel`, which uses
19direct parameter values and by :class:`bumps_model.Experiment` which wraps
20the parameter values in boxes so that the user can set fitting ranges, etc.
21on the individual parameters and send the model to the Bumps optimizers.
22"""
23from __future__ import print_function
24
25import numpy as np  # type: ignore
26
27# TODO: fix sesans module
28from . import sesans  # type: ignore
29from . import weights
30from . import resolution
31from . import resolution2d
32from .details import make_kernel_args, dispersion_mesh
33from .modelinfo import DEFAULT_BACKGROUND
34from .product import RADIUS_MODE_ID
35
36# pylint: disable=unused-import
37try:
38    from typing import Optional, Dict, Tuple
39except ImportError:
40    pass
41else:
42    from .data import Data
43    from .kernel import Kernel, KernelModel
44    from .modelinfo import Parameter, ParameterSet
45# pylint: enable=unused-import
46
47def call_kernel(calculator, pars, cutoff=0., mono=False):
48    # type: (Kernel, ParameterSet, float, bool) -> np.ndarray
49    """
50    Call *kernel* returned from *model.make_kernel* with parameters *pars*.
51
52    *cutoff* is the limiting value for the product of dispersion weights used
53    to perform the multidimensional dispersion calculation more quickly at a
54    slight cost to accuracy. The default value of *cutoff=0* integrates over
55    the entire dispersion cube.  Using *cutoff=1e-5* can be 50% faster, but
56    with an error of about 1%, which is usually less than the measurement
57    uncertainty.
58
59    *mono* is True if polydispersity should be set to none on all parameters.
60    """
61    mesh = get_mesh(calculator.info, pars, dim=calculator.dim, mono=mono)
62    #print("pars", list(zip(*mesh))[0])
63    call_details, values, is_magnetic = make_kernel_args(calculator, mesh)
64    #print("values:", values)
65    return calculator(call_details, values, cutoff, is_magnetic)
66
67def call_Fq(calculator, pars, cutoff=0., mono=False):
68    # type: (Kernel, ParameterSet, float, bool) -> np.ndarray
69    """
70    Like :func:`call_kernel`, but returning F, F^2, R_eff, V_shell, V_form/V_shell.
71
72    For solid objects V_shell is equal to V_form and the volume ratio is 1.
73
74    Use parameter *radius_effective_mode* to select the effective radius
75    calculation.
76    """
77    R_eff_type = int(pars.pop(RADIUS_MODE_ID, 1.0))
78    mesh = get_mesh(calculator.info, pars, dim=calculator.dim, mono=mono)
79    #print("pars", list(zip(*mesh))[0])
80    call_details, values, is_magnetic = make_kernel_args(calculator, mesh)
81    #print("values:", values)
82    return calculator.Fq(call_details, values, cutoff, is_magnetic, R_eff_type)
83
84def call_profile(model_info, pars=None):
85    # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray, Tuple[str, str]]
86    """
87    Returns the profile *x, y, (xlabel, ylabel)* representing the model.
88    """
89    if pars is None:
90        pars = {}
91    args = {}
92    for p in model_info.parameters.kernel_parameters:
93        if p.length > 1:
94            value = np.array([pars.get(p.id+str(j), p.default)
95                              for j in range(1, p.length+1)])
96        else:
97            value = pars.get(p.id, p.default)
98        args[p.id] = value
99    x, y = model_info.profile(**args)
100    return x, y, model_info.profile_axes
101
102def get_mesh(model_info, values, dim='1d', mono=False):
103    # type: (ModelInfo, Dict[str, float], str, bool) -> List[Tuple[float, np.ndarray, np.ndarry]]
104    """
105    Retrieve the dispersity mesh described by the parameter set.
106
107    Returns a list of *(value, dispersity, weights)* with one tuple for each
108    parameter in the model call parameters.  Inactive parameters return the
109    default value with a weight of 1.0.
110    """
111    parameters = model_info.parameters
112    if mono:
113        active = lambda name: False
114    elif dim == '1d':
115        active = lambda name: name in parameters.pd_1d
116    elif dim == '2d':
117        active = lambda name: name in parameters.pd_2d
118    else:
119        active = lambda name: True
120
121    #print("pars",[p.id for p in parameters.call_parameters])
122    mesh = [_get_par_weights(p, values, active(p.name))
123            for p in parameters.call_parameters]
124    return mesh
125
126
127def _get_par_weights(parameter, values, active=True):
128    # type: (Parameter, Dict[str, float]) -> Tuple[float, np.ndarray, np.ndarray]
129    """
130    Generate the distribution for parameter *name* given the parameter values
131    in *pars*.
132
133    Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma"
134    from the *pars* dictionary for parameter value and parameter dispersion.
135    """
136    value = float(values.get(parameter.name, parameter.default))
137    npts = values.get(parameter.name+'_pd_n', 0)
138    width = values.get(parameter.name+'_pd', 0.0)
139    relative = parameter.relative_pd
140    if npts == 0 or width == 0.0 or not active:
141        # Note: orientation parameters have the viewing angle as the parameter
142        # value and the jitter in the distribution, so be sure to set the
143        # empty pd for orientation parameters to 0.
144        pd = [value if relative or not parameter.polydisperse else 0.0], [1.0]
145    else:
146        limits = parameter.limits
147        disperser = values.get(parameter.name+'_pd_type', 'gaussian')
148        nsigma = values.get(parameter.name+'_pd_nsigma', 3.0)
149        pd = weights.get_weights(disperser, npts, width, nsigma,
150                                 value, limits, relative)
151    return value, pd[0], pd[1]
152
153
154def _vol_pars(model_info, values):
155    # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray]
156    vol_pars = [_get_par_weights(p, values)
157                for p in model_info.parameters.call_parameters
158                if p.type == 'volume']
159    #import pylab; pylab.plot(vol_pars[0][0],vol_pars[0][1]); pylab.show()
160    dispersity, weight = dispersion_mesh(model_info, vol_pars)
161    return dispersity, weight
162
163
164def _make_sesans_transform(data):
165    from sas.sascalc.data_util.nxsunit import Converter
166
167    # Pre-compute the Hankel matrix (H)
168    SElength = Converter(data._xunit)(data.x, "A")
169
170    theta_max = Converter("radians")(data.sample.zacceptance)[0]
171    q_max = 2 * np.pi / np.max(data.source.wavelength) * np.sin(theta_max)
172    zaccept = Converter("1/A")(q_max, "1/" + data.source.wavelength_unit),
173
174    Rmax = 10000000
175    hankel = sesans.SesansTransform(data.x, SElength,
176                                    data.source.wavelength,
177                                    zaccept, Rmax)
178    return hankel
179
180
181class DataMixin(object):
182    """
183    DataMixin captures the common aspects of evaluating a SAS model for a
184    particular data set, including calculating Iq and evaluating the
185    resolution function.  It is used in particular by :class:`DirectModel`,
186    which evaluates a SAS model parameters as key word arguments to the
187    calculator method, and by :class:`bumps_model.Experiment`, which wraps the
188    model and data for use with the Bumps fitting engine.  It is not
189    currently used by :class:`sasview_model.SasviewModel` since this will
190    require a number of changes to SasView before we can do it.
191
192    :meth:`_interpret_data` initializes the data structures necessary
193    to manage the calculations.  This sets attributes in the child class
194    such as *data_type* and *resolution*.
195
196    :meth:`_calc_theory` evaluates the model at the given control values.
197
198    :meth:`_set_data` sets the intensity data in the data object,
199    possibly with random noise added.  This is useful for simulating a
200    dataset with the results from :meth:`_calc_theory`.
201    """
202    def _interpret_data(self, data, model):
203        # type: (Data, KernelModel) -> None
204        # pylint: disable=attribute-defined-outside-init
205
206        self._data = data
207        self._model = model
208
209        # interpret data
210        if hasattr(data, 'isSesans') and data.isSesans:
211            self.data_type = 'sesans'
212        elif hasattr(data, 'qx_data'):
213            self.data_type = 'Iqxy'
214        elif getattr(data, 'oriented', False):
215            self.data_type = 'Iq-oriented'
216        else:
217            self.data_type = 'Iq'
218
219        if self.data_type == 'sesans':
220            res = _make_sesans_transform(data)
221            index = slice(None, None)
222            if data.y is not None:
223                Iq, dIq = data.y, data.dy
224            else:
225                Iq, dIq = None, None
226            #self._theory = np.zeros_like(q)
227            q_vectors = [res.q_calc]
228        elif self.data_type == 'Iqxy':
229            #if not model.info.parameters.has_2d:
230            #    raise ValueError("not 2D without orientation or magnetic parameters")
231            q = np.sqrt(data.qx_data**2 + data.qy_data**2)
232            qmin = getattr(data, 'qmin', 1e-16)
233            qmax = getattr(data, 'qmax', np.inf)
234            accuracy = getattr(data, 'accuracy', 'Low')
235            index = (data.mask == 0) & (q >= qmin) & (q <= qmax)
236            if data.data is not None:
237                index &= ~np.isnan(data.data)
238                Iq = data.data[index]
239                dIq = data.err_data[index]
240            else:
241                Iq, dIq = None, None
242            res = resolution2d.Pinhole2D(data=data, index=index,
243                                         nsigma=3.0, accuracy=accuracy)
244            #self._theory = np.zeros_like(self.Iq)
245            q_vectors = res.q_calc
246        elif self.data_type == 'Iq':
247            index = (data.x >= data.qmin) & (data.x <= data.qmax)
248            mask = getattr(data, 'mask', None)
249            if mask is not None:
250                index &= (mask == 0)
251            if data.y is not None:
252                index &= ~np.isnan(data.y)
253                Iq = data.y[index]
254                dIq = data.dy[index]
255            else:
256                Iq, dIq = None, None
257            if getattr(data, 'dx', None) is not None:
258                q, dq = data.x[index], data.dx[index]
259                if (dq > 0).any():
260                    res = resolution.Pinhole1D(q, dq)
261                else:
262                    res = resolution.Perfect1D(q)
263            elif (getattr(data, 'dxl', None) is not None
264                  and getattr(data, 'dxw', None) is not None):
265                res = resolution.Slit1D(data.x[index],
266                                        qx_width=data.dxl[index],
267                                        qy_width=data.dxw[index])
268            else:
269                res = resolution.Perfect1D(data.x[index])
270
271            #self._theory = np.zeros_like(self.Iq)
272            q_vectors = [res.q_calc]
273        elif self.data_type == 'Iq-oriented':
274            index = (data.x >= data.qmin) & (data.x <= data.qmax)
275            if data.y is not None:
276                index &= ~np.isnan(data.y)
277                Iq = data.y[index]
278                dIq = data.dy[index]
279            else:
280                Iq, dIq = None, None
281            if (getattr(data, 'dxl', None) is None
282                    or getattr(data, 'dxw', None) is None):
283                raise ValueError("oriented sample with 1D data needs slit resolution")
284
285            res = resolution2d.Slit2D(data.x[index],
286                                      qx_width=data.dxw[index],
287                                      qy_width=data.dxl[index])
288            q_vectors = res.q_calc
289        else:
290            raise ValueError("Unknown data type") # never gets here
291
292        # Remember function inputs so we can delay loading the function and
293        # so we can save/restore state
294        self._kernel_inputs = q_vectors
295        self._kernel = None
296        self.Iq, self.dIq, self.index = Iq, dIq, index
297        self.resolution = res
298
299    def _set_data(self, Iq, noise=None):
300        # type: (np.ndarray, Optional[float]) -> None
301        # pylint: disable=attribute-defined-outside-init
302        if noise is not None:
303            self.dIq = Iq*noise*0.01
304        dy = self.dIq
305        y = Iq + np.random.randn(*dy.shape) * dy
306        self.Iq = y
307        if self.data_type in ('Iq', 'Iq-oriented'):
308            if self._data.y is None:
309                self._data.y = np.empty(len(self._data.x), 'd')
310            if self._data.dy is None:
311                self._data.dy = np.empty(len(self._data.x), 'd')
312            self._data.dy[self.index] = dy
313            self._data.y[self.index] = y
314        elif self.data_type == 'Iqxy':
315            if self._data.data is None:
316                self._data.data = np.empty_like(self._data.qx_data, 'd')
317            if self._data.err_data is None:
318                self._data.err_data = np.empty_like(self._data.qx_data, 'd')
319            self._data.data[self.index] = y
320            self._data.err_data[self.index] = dy
321        elif self.data_type == 'sesans':
322            if self._data.y is None:
323                self._data.y = np.empty(len(self._data.x), 'd')
324            self._data.y[self.index] = y
325        else:
326            raise ValueError("Unknown model")
327
328    def _calc_theory(self, pars, cutoff=0.0):
329        # type: (ParameterSet, float) -> np.ndarray
330        if self._kernel is None:
331            self._kernel = self._model.make_kernel(self._kernel_inputs)
332
333        # Need to pull background out of resolution for multiple scattering
334        default_background = self._model.info.parameters.common_parameters[1].default
335        background = pars.get('background', default_background)
336        pars = pars.copy()
337        pars['background'] = 0.
338
339        Iq_calc = call_kernel(self._kernel, pars, cutoff=cutoff)
340        # Storing the calculated Iq values so that they can be plotted.
341        # Only applies to oriented USANS data for now.
342        # TODO: extend plotting of calculate Iq to other measurement types
343        # TODO: refactor so we don't store the result in the model
344        self.Iq_calc = Iq_calc
345        result = self.resolution.apply(Iq_calc)
346        if hasattr(self.resolution, 'nx'):
347            self.Iq_calc = (
348                self.resolution.qx_calc, self.resolution.qy_calc,
349                np.reshape(Iq_calc, (self.resolution.ny, self.resolution.nx))
350            )
351        return result + background
352
353
354class DirectModel(DataMixin):
355    """
356    Create a calculator object for a model.
357
358    *data* is 1D SAS, 2D SAS or SESANS data
359
360    *model* is a model calculator return from :func:`generate.load_model`
361
362    *cutoff* is the polydispersity weight cutoff.
363    """
364    def __init__(self, data, model, cutoff=1e-5):
365        # type: (Data, KernelModel, float) -> None
366        self.model = model
367        self.cutoff = cutoff
368        # Note: _interpret_data defines the model attributes
369        self._interpret_data(data, model)
370
371    def __call__(self, **pars):
372        # type: (**float) -> np.ndarray
373        return self._calc_theory(pars, cutoff=self.cutoff)
374
375    def simulate_data(self, noise=None, **pars):
376        # type: (Optional[float], **float) -> None
377        """
378        Generate simulated data for the model.
379        """
380        Iq = self.__call__(**pars)
381        self._set_data(Iq, noise=noise)
382
383    def profile(self, **pars):
384        # type: (**float) -> None
385        """
386        Generate a plottable profile.
387        """
388        return call_profile(self.model.info, pars)
389
390def main():
391    # type: () -> None
392    """
393    Program to evaluate a particular model at a set of q values.
394    """
395    import sys
396    from .data import empty_data1D, empty_data2D
397    from .core import load_model_info, build_model
398
399    if len(sys.argv) < 3:
400        print("usage: python -m sasmodels.direct_model modelname (q|qx,qy) par=val ...")
401        sys.exit(1)
402    model_name = sys.argv[1]
403    call = sys.argv[2].upper()
404    try:
405        values = [float(v) for v in call.split(',')]
406    except ValueError:
407        values = []
408    if len(values) == 1:
409        q, = values
410        data = empty_data1D([q])
411    elif len(values) == 2:
412        qx, qy = values
413        data = empty_data2D([qx], [qy])
414    else:
415        print("use q or qx,qy")
416        sys.exit(1)
417
418    model_info = load_model_info(model_name)
419    model = build_model(model_info)
420    calculator = DirectModel(data, model)
421    pars = dict((k, (float(v) if not k.endswith("_pd_type") else v))
422                for pair in sys.argv[3:]
423                for k, v in [pair.split('=')])
424    Iq = calculator(**pars)
425    print(Iq[0])
426
427if __name__ == "__main__":
428    main()
Note: See TracBrowser for help on using the repository browser.