source: sasmodels/sasmodels/direct_model.py @ a7db39fa

ticket-1257-vesicle-productticket_1156ticket_822_more_unit_tests
Last change on this file since a7db39fa was a7db39fa, checked in by ajj, 5 years ago

Fixing model tests to properly handle P*S tests

  • Property mode set to 100644
File size: 16.9 KB
Line 
1"""
2Class interface to the model calculator.
3
4Calling a model is somewhat non-trivial since the functions called depend
5on the data type.  For 1D data the *Iq* kernel needs to be called, for
62D data the *Iqxy* kernel needs to be called, and for SESANS data the
7*Iq* kernel needs to be called followed by a Hankel transform.  Before
8the kernel is called an appropriate *q* calculation vector needs to be
9constructed.  This is not the simple *q* vector where you have measured
10the data since the resolution calculation will require values beyond the
11range of the measured data.  After the calculation the resolution calculator
12must be called to return the predicted value for each measured data point.
13
14:class:`DirectModel` is a callable object that takes *parameter=value*
15keyword arguments and returns the appropriate theory values for the data.
16
17:class:`DataMixin` does the real work of interpreting the data and calling
18the model calculator.  This is used by :class:`DirectModel`, which uses
19direct parameter values and by :class:`bumps_model.Experiment` which wraps
20the parameter values in boxes so that the user can set fitting ranges, etc.
21on the individual parameters and send the model to the Bumps optimizers.
22"""
23from __future__ import print_function
24
25import numpy as np  # type: ignore
26
27# TODO: fix sesans module
28from . import sesans  # type: ignore
29from . import weights
30from . import resolution
31from . import resolution2d
32from .details import make_kernel_args, dispersion_mesh
33from .product import RADIUS_MODE_ID
34
35# pylint: disable=unused-import
36try:
37    from typing import Optional, Dict, Tuple
38except ImportError:
39    pass
40else:
41    from .data import Data
42    from .kernel import Kernel, KernelModel
43    from .modelinfo import Parameter, ParameterSet
44# pylint: enable=unused-import
45
46def call_kernel(calculator, pars, cutoff=0., mono=False):
47    # type: (Kernel, ParameterSet, float, bool) -> np.ndarray
48    """
49    Call *kernel* returned from *model.make_kernel* with parameters *pars*.
50
51    *cutoff* is the limiting value for the product of dispersion weights used
52    to perform the multidimensional dispersion calculation more quickly at a
53    slight cost to accuracy. The default value of *cutoff=0* integrates over
54    the entire dispersion cube.  Using *cutoff=1e-5* can be 50% faster, but
55    with an error of about 1%, which is usually less than the measurement
56    uncertainty.
57
58    *mono* is True if polydispersity should be set to none on all parameters.
59    """
60    mesh = get_mesh(calculator.info, pars, dim=calculator.dim, mono=mono)
61    #print("in call_kernel: pars:", list(zip(*mesh))[0])
62    call_details, values, is_magnetic = make_kernel_args(calculator, mesh)
63    #print("in call_kernel: values:", values)
64    return calculator(call_details, values, cutoff, is_magnetic)
65
66def call_Fq(calculator, pars, cutoff=0., mono=False):
67    # type: (Kernel, ParameterSet, float, bool) -> np.ndarray
68    """
69    Like :func:`call_kernel`, but returning F, F^2, R_eff, V_shell, V_form/V_shell.
70
71    For solid objects V_shell is equal to V_form and the volume ratio is 1.
72
73    Use parameter *effective_radius_type* to select the effective radius THIS MIGHT NEED CHECKING radius_effective_mode or effecive_radius_type???
74    calculation.
75    """
76    R_eff_type = int(pars.pop(RADIUS_MODE_ID, 1.0))
77    mesh = get_mesh(calculator.info, pars, dim=calculator.dim, mono=mono)
78    print("in call_Fq: pars", list(zip(*mesh))[0])
79    call_details, values, is_magnetic = make_kernel_args(calculator, mesh)
80    #print("in call_Fq: values:", values)
81    return calculator.Fq(call_details, values, cutoff, is_magnetic, R_eff_type)
82
83def call_profile(model_info, pars=None):
84    # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray, Tuple[str, str]]
85    """
86    Returns the profile *x, y, (xlabel, ylabel)* representing the model.
87    """
88    if pars is None:
89        pars = {}
90    args = {}
91    for p in model_info.parameters.kernel_parameters:
92        if p.length > 1:
93            value = np.array([pars.get(p.id+str(j), p.default)
94                              for j in range(1, p.length+1)])
95        else:
96            value = pars.get(p.id, p.default)
97        args[p.id] = value
98    x, y = model_info.profile(**args)
99    return x, y, model_info.profile_axes
100
101def get_mesh(model_info, values, dim='1d', mono=False):
102    # type: (ModelInfo, Dict[str, float], str, bool) -> List[Tuple[float, np.ndarray, np.ndarry]]
103    """
104    Retrieve the dispersity mesh described by the parameter set.
105
106    Returns a list of *(value, dispersity, weights)* with one tuple for each
107    parameter in the model call parameters.  Inactive parameters return the
108    default value with a weight of 1.0.
109    """
110    parameters = model_info.parameters
111    if mono:
112        active = lambda name: False
113    elif dim == '1d':
114        active = lambda name: name in parameters.pd_1d
115    elif dim == '2d':
116        active = lambda name: name in parameters.pd_2d
117    else:
118        active = lambda name: True
119
120    #print("in get_mesh: pars:",[p.id for p in parameters.call_parameters])
121    mesh = [_get_par_weights(p, values, active(p.name))
122            for p in parameters.call_parameters]
123    return mesh
124
125
126def _get_par_weights(parameter, values, active=True):
127    # type: (Parameter, Dict[str, float]) -> Tuple[float, np.ndarray, np.ndarray]
128    """
129    Generate the distribution for parameter *name* given the parameter values
130    in *pars*.
131
132    Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma"
133    from the *pars* dictionary for parameter value and parameter dispersion.
134    """
135    value = float(values.get(parameter.name, parameter.default))
136    npts = values.get(parameter.name+'_pd_n', 0)
137    width = values.get(parameter.name+'_pd', 0.0)
138    relative = parameter.relative_pd
139    if npts == 0 or width == 0.0 or not active:
140        # Note: orientation parameters have the viewing angle as the parameter
141        # value and the jitter in the distribution, so be sure to set the
142        # empty pd for orientation parameters to 0.
143        pd = [value if relative or not parameter.polydisperse else 0.0], [1.0]
144    else:
145        limits = parameter.limits
146        disperser = values.get(parameter.name+'_pd_type', 'gaussian')
147        nsigma = values.get(parameter.name+'_pd_nsigma', 3.0)
148        pd = weights.get_weights(disperser, npts, width, nsigma,
149                                 value, limits, relative)
150    return value, pd[0], pd[1]
151
152
153def _vol_pars(model_info, values):
154    # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray]
155    vol_pars = [_get_par_weights(p, values)
156                for p in model_info.parameters.call_parameters
157                if p.type == 'volume']
158    #import pylab; pylab.plot(vol_pars[0][0],vol_pars[0][1]); pylab.show()
159    dispersity, weight = dispersion_mesh(model_info, vol_pars)
160    return dispersity, weight
161
162
163def _make_sesans_transform(data):
164    from sas.sascalc.data_util.nxsunit import Converter
165
166    # Pre-compute the Hankel matrix (H)
167    SElength = Converter(data._xunit)(data.x, "A")
168
169    theta_max = Converter("radians")(data.sample.zacceptance)[0]
170    q_max = 2 * np.pi / np.max(data.source.wavelength) * np.sin(theta_max)
171    zaccept = Converter("1/A")(q_max, "1/" + data.source.wavelength_unit),
172
173    Rmax = 10000000
174    hankel = sesans.SesansTransform(data.x, SElength,
175                                    data.source.wavelength,
176                                    zaccept, Rmax)
177    return hankel
178
179
180class DataMixin(object):
181    """
182    DataMixin captures the common aspects of evaluating a SAS model for a
183    particular data set, including calculating Iq and evaluating the
184    resolution function.  It is used in particular by :class:`DirectModel`,
185    which evaluates a SAS model parameters as key word arguments to the
186    calculator method, and by :class:`bumps_model.Experiment`, which wraps the
187    model and data for use with the Bumps fitting engine.  It is not
188    currently used by :class:`sasview_model.SasviewModel` since this will
189    require a number of changes to SasView before we can do it.
190
191    :meth:`_interpret_data` initializes the data structures necessary
192    to manage the calculations.  This sets attributes in the child class
193    such as *data_type* and *resolution*.
194
195    :meth:`_calc_theory` evaluates the model at the given control values.
196
197    :meth:`_set_data` sets the intensity data in the data object,
198    possibly with random noise added.  This is useful for simulating a
199    dataset with the results from :meth:`_calc_theory`.
200    """
201    def _interpret_data(self, data, model):
202        # type: (Data, KernelModel) -> None
203        # pylint: disable=attribute-defined-outside-init
204
205        self._data = data
206        self._model = model
207
208        # interpret data
209        if hasattr(data, 'isSesans') and data.isSesans:
210            self.data_type = 'sesans'
211        elif hasattr(data, 'qx_data'):
212            self.data_type = 'Iqxy'
213        elif getattr(data, 'oriented', False):
214            self.data_type = 'Iq-oriented'
215        else:
216            self.data_type = 'Iq'
217
218        if self.data_type == 'sesans':
219            res = _make_sesans_transform(data)
220            index = slice(None, None)
221            if data.y is not None:
222                Iq, dIq = data.y, data.dy
223            else:
224                Iq, dIq = None, None
225        elif self.data_type == 'Iqxy':
226            #if not model.info.parameters.has_2d:
227            #    raise ValueError("not 2D without orientation or magnetic parameters")
228            q = np.sqrt(data.qx_data**2 + data.qy_data**2)
229            qmin = getattr(data, 'qmin', 1e-16)
230            qmax = getattr(data, 'qmax', np.inf)
231            accuracy = getattr(data, 'accuracy', 'Low')
232            index = (data.mask == 0) & (q >= qmin) & (q <= qmax)
233            if data.data is not None:
234                index &= ~np.isnan(data.data)
235                Iq = data.data[index]
236                dIq = data.err_data[index]
237            else:
238                Iq, dIq = None, None
239            res = resolution2d.Pinhole2D(data=data, index=index,
240                                         nsigma=3.0, accuracy=accuracy)
241        elif self.data_type == 'Iq':
242            index = (data.x >= data.qmin) & (data.x <= data.qmax)
243            mask = getattr(data, 'mask', None)
244            if mask is not None:
245                index &= (mask == 0)
246            if data.y is not None:
247                index &= ~np.isnan(data.y)
248                Iq = data.y[index]
249                dIq = data.dy[index]
250            else:
251                Iq, dIq = None, None
252            if getattr(data, 'dx', None) is not None:
253                q, dq = data.x[index], data.dx[index]
254                if (dq > 0).any():
255                    res = resolution.Pinhole1D(q, dq)
256                else:
257                    res = resolution.Perfect1D(q)
258            elif (getattr(data, 'dxl', None) is not None
259                  and getattr(data, 'dxw', None) is not None):
260                res = resolution.Slit1D(data.x[index],
261                                        qx_width=data.dxl[index],
262                                        qy_width=data.dxw[index])
263            else:
264                res = resolution.Perfect1D(data.x[index])
265        elif self.data_type == 'Iq-oriented':
266            index = (data.x >= data.qmin) & (data.x <= data.qmax)
267            if data.y is not None:
268                index &= ~np.isnan(data.y)
269                Iq = data.y[index]
270                dIq = data.dy[index]
271            else:
272                Iq, dIq = None, None
273            if (getattr(data, 'dxl', None) is None
274                    or getattr(data, 'dxw', None) is None):
275                raise ValueError("oriented sample with 1D data needs slit resolution")
276
277            res = resolution2d.Slit2D(data.x[index],
278                                      qx_width=data.dxw[index],
279                                      qy_width=data.dxl[index])
280        else:
281            raise ValueError("Unknown data type") # never gets here
282
283        # Remember function inputs so we can delay loading the function and
284        # so we can save/restore state
285        self._kernel = None
286        self.Iq, self.dIq, self.index = Iq, dIq, index
287        self.resolution = res
288
289    def _set_data(self, Iq, noise=None):
290        # type: (np.ndarray, Optional[float]) -> None
291        # pylint: disable=attribute-defined-outside-init
292        if noise is not None:
293            self.dIq = Iq*noise*0.01
294        dy = self.dIq
295        y = Iq + np.random.randn(*dy.shape) * dy
296        self.Iq = y
297        if self.data_type in ('Iq', 'Iq-oriented'):
298            if self._data.y is None:
299                self._data.y = np.empty(len(self._data.x), 'd')
300            if self._data.dy is None:
301                self._data.dy = np.empty(len(self._data.x), 'd')
302            self._data.dy[self.index] = dy
303            self._data.y[self.index] = y
304        elif self.data_type == 'Iqxy':
305            if self._data.data is None:
306                self._data.data = np.empty_like(self._data.qx_data, 'd')
307            if self._data.err_data is None:
308                self._data.err_data = np.empty_like(self._data.qx_data, 'd')
309            self._data.data[self.index] = y
310            self._data.err_data[self.index] = dy
311        elif self.data_type == 'sesans':
312            if self._data.y is None:
313                self._data.y = np.empty(len(self._data.x), 'd')
314            self._data.y[self.index] = y
315        else:
316            raise ValueError("Unknown model")
317
318    def _calc_theory(self, pars, cutoff=0.0):
319        # type: (ParameterSet, float) -> np.ndarray
320        if self._kernel is None:
321            # TODO: change interfaces so that resolution returns kernel inputs
322            # Maybe have resolution always return a tuple, or maybe have
323            # make_kernel accept either an ndarray or a pair of ndarrays.
324            kernel_inputs = self.resolution.q_calc
325            if isinstance(kernel_inputs, np.ndarray):
326                kernel_inputs = (kernel_inputs,)
327            self._kernel = self._model.make_kernel(kernel_inputs)
328
329        # Need to pull background out of resolution for multiple scattering
330        default_background = self._model.info.parameters.common_parameters[1].default
331        background = pars.get('background', default_background)
332        pars = pars.copy()
333        pars['background'] = 0.
334
335        Iq_calc = call_kernel(self._kernel, pars, cutoff=cutoff)
336        # Storing the calculated Iq values so that they can be plotted.
337        # Only applies to oriented USANS data for now.
338        # TODO: extend plotting of calculate Iq to other measurement types
339        # TODO: refactor so we don't store the result in the model
340        self.Iq_calc = Iq_calc
341        result = self.resolution.apply(Iq_calc)
342        if hasattr(self.resolution, 'nx'):
343            self.Iq_calc = (
344                self.resolution.qx_calc, self.resolution.qy_calc,
345                np.reshape(Iq_calc, (self.resolution.ny, self.resolution.nx))
346            )
347        return result + background
348
349
350class DirectModel(DataMixin):
351    """
352    Create a calculator object for a model.
353
354    *data* is 1D SAS, 2D SAS or SESANS data
355
356    *model* is a model calculator return from :func:`generate.load_model`
357
358    *cutoff* is the polydispersity weight cutoff.
359    """
360    def __init__(self, data, model, cutoff=1e-5):
361        # type: (Data, KernelModel, float) -> None
362        self.model = model
363        self.cutoff = cutoff
364        # Note: _interpret_data defines the model attributes
365        self._interpret_data(data, model)
366
367    def __call__(self, **pars):
368        # type: (**float) -> np.ndarray
369        return self._calc_theory(pars, cutoff=self.cutoff)
370
371    def simulate_data(self, noise=None, **pars):
372        # type: (Optional[float], **float) -> None
373        """
374        Generate simulated data for the model.
375        """
376        Iq = self.__call__(**pars)
377        self._set_data(Iq, noise=noise)
378
379    def profile(self, **pars):
380        # type: (**float) -> None
381        """
382        Generate a plottable profile.
383        """
384        return call_profile(self.model.info, pars)
385
386def main():
387    # type: () -> None
388    """
389    Program to evaluate a particular model at a set of q values.
390    """
391    import sys
392    from .data import empty_data1D, empty_data2D
393    from .core import load_model_info, build_model
394
395    if len(sys.argv) < 3:
396        print("usage: python -m sasmodels.direct_model modelname (q|qx,qy) par=val ...")
397        sys.exit(1)
398    model_name = sys.argv[1]
399    call = sys.argv[2].upper()
400    try:
401        values = [float(v) for v in call.split(',')]
402    except ValueError:
403        values = []
404    if len(values) == 1:
405        q, = values
406        data = empty_data1D([q])
407    elif len(values) == 2:
408        qx, qy = values
409        data = empty_data2D([qx], [qy])
410    else:
411        print("use q or qx,qy")
412        sys.exit(1)
413
414    model_info = load_model_info(model_name)
415    model = build_model(model_info)
416    calculator = DirectModel(data, model)
417    pars = dict((k, (float(v) if not k.endswith("_pd_type") else v))
418                for pair in sys.argv[3:]
419                for k, v in [pair.split('=')])
420    Iq = calculator(**pars)
421    print(Iq[0])
422
423if __name__ == "__main__":
424    main()
Note: See TracBrowser for help on using the repository browser.