source: sasmodels/sasmodels/direct_model.py @ 0444c02

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 0444c02 was 0444c02, checked in by jhbakker, 8 years ago

Changes for SESANS integration, next is merge with ajj_sesans

  • Property mode set to 100644
File size: 15.9 KB
Line 
1"""
2Class interface to the model calculator.
3
4Calling a model is somewhat non-trivial since the functions called depend
5on the data type.  For 1D data the *Iq* kernel needs to be called, for
62D data the *Iqxy* kernel needs to be called, and for SESANS data the
7*Iq* kernel needs to be called followed by a Hankel transform.  Before
8the kernel is called an appropriate *q* calculation vector needs to be
9constructed.  This is not the simple *q* vector where you have measured
10the data since the resolution calculation will require values beyond the
11range of the measured data.  After the calculation the resolution calculator
12must be called to return the predicted value for each measured data point.
13
14:class:`DirectModel` is a callable object that takes *parameter=value*
15keyword arguments and returns the appropriate theory values for the data.
16
17:class:`DataMixin` does the real work of interpreting the data and calling
18the model calculator.  This is used by :class:`DirectModel`, which uses
19direct parameter values and by :class:`bumps_model.Experiment` which wraps
20the parameter values in boxes so that the user can set fitting ranges, etc.
21on the individual parameters and send the model to the Bumps optimizers.
22"""
23from __future__ import print_function
24
25import numpy as np  # type: ignore
26
27# TODO: fix sesans module
28from . import sesans  # type: ignore
29from . import weights
30from . import resolution
31from . import resolution2d
32from .details import make_kernel_args, dispersion_mesh
33from sas.sasgui.perspectives.fitting.fitpage import FitPage
34
35try:
36    from typing import Optional, Dict, Tuple
37except ImportError:
38    pass
39else:
40    from .data import Data
41    from .kernel import Kernel, KernelModel
42    from .modelinfo import Parameter, ParameterSet
43
44def call_kernel(calculator, pars, cutoff=0., mono=False):
45    # type: (Kernel, ParameterSet, float, bool) -> np.ndarray
46    """
47    Call *kernel* returned from *model.make_kernel* with parameters *pars*.
48
49    *cutoff* is the limiting value for the product of dispersion weights used
50    to perform the multidimensional dispersion calculation more quickly at a
51    slight cost to accuracy. The default value of *cutoff=0* integrates over
52    the entire dispersion cube.  Using *cutoff=1e-5* can be 50% faster, but
53    with an error of about 1%, which is usually less than the measurement
54    uncertainty.
55
56    *mono* is True if polydispersity should be set to none on all parameters.
57    """
58    parameters = calculator.info.parameters
59    if mono:
60        active = lambda name: False
61    elif calculator.dim == '1d':
62        active = lambda name: name in parameters.pd_1d
63    elif calculator.dim == '2d':
64        active = lambda name: name in parameters.pd_2d
65    else:
66        active = lambda name: True
67
68    #print("pars",[p.id for p in parameters.call_parameters])
69    vw_pairs = [(get_weights(p, pars) if active(p.name)
70                 else ([pars.get(p.name, p.default)], [1.0]))
71                for p in parameters.call_parameters]
72
73    call_details, values, is_magnetic = make_kernel_args(calculator, vw_pairs)
74    #print("values:", values)
75    return calculator(call_details, values, cutoff, is_magnetic)
76
77
78def call_ER(model_info, pars):
79    # type: (ModelInfo, ParameterSet) -> float
80    """
81    Call the model ER function using *values*.
82
83    *model_info* is either *model.info* if you have a loaded model,
84    or *kernel.info* if you have a model kernel prepared for evaluation.
85    """
86    if model_info.ER is None:
87        return 1.0
88    elif not model_info.parameters.form_volume_parameters:
89        # handle the case where ER is provided but model is not polydisperse
90        return model_info.ER()
91    else:
92        value, weight = _vol_pars(model_info, pars)
93        individual_radii = model_info.ER(*value)
94        return np.sum(weight*individual_radii) / np.sum(weight)
95
96
97def call_VR(model_info, pars):
98    # type: (ModelInfo, ParameterSet) -> float
99    """
100    Call the model VR function using *pars*.
101
102    *model_info* is either *model.info* if you have a loaded model,
103    or *kernel.info* if you have a model kernel prepared for evaluation.
104    """
105    if model_info.VR is None:
106        return 1.0
107    elif not model_info.parameters.form_volume_parameters:
108        # handle the case where ER is provided but model is not polydisperse
109        return model_info.VR()
110    else:
111        value, weight = _vol_pars(model_info, pars)
112        whole, part = model_info.VR(*value)
113        return np.sum(weight*part)/np.sum(weight*whole)
114
115
116def call_profile(model_info, **pars):
117    # type: (ModelInfo, ...) -> Tuple[np.ndarray, np.ndarray, Tuple[str, str]]
118    """
119    Returns the profile *x, y, (xlabel, ylabel)* representing the model.
120    """
121    args = {}
122    for p in model_info.parameters.kernel_parameters:
123        if p.length > 1:
124            value = np.array([pars.get(p.id+str(j), p.default)
125                              for j in range(1, p.length+1)])
126        else:
127            value = pars.get(p.id, p.default)
128        args[p.id] = value
129    x, y = model_info.profile(**args)
130    return x, y, model_info.profile_axes
131
132
133def get_weights(parameter, values):
134    # type: (Parameter, Dict[str, float]) -> Tuple[np.ndarray, np.ndarray]
135    """
136    Generate the distribution for parameter *name* given the parameter values
137    in *pars*.
138
139    Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma"
140    from the *pars* dictionary for parameter value and parameter dispersion.
141    """
142    value = float(values.get(parameter.name, parameter.default))
143    relative = parameter.relative_pd
144    limits = parameter.limits
145    disperser = values.get(parameter.name+'_pd_type', 'gaussian')
146    npts = values.get(parameter.name+'_pd_n', 0)
147    width = values.get(parameter.name+'_pd', 0.0)
148    nsigma = values.get(parameter.name+'_pd_nsigma', 3.0)
149    if npts == 0 or width == 0:
150        return [value], [1.0]
151    value, weight = weights.get_weights(
152        disperser, npts, width, nsigma, value, limits, relative)
153    return value, weight / np.sum(weight)
154
155
156def _vol_pars(model_info, pars):
157    # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray]
158    vol_pars = [get_weights(p, pars)
159                for p in model_info.parameters.call_parameters
160                if p.type == 'volume']
161    #import pylab; pylab.plot(vol_pars[0][0],vol_pars[0][1]); pylab.show()
162    value, weight = dispersion_mesh(model_info, vol_pars)
163    return value, weight
164
165
166class DataMixin(object):
167    """
168    DataMixin captures the common aspects of evaluating a SAS model for a
169    particular data set, including calculating Iq and evaluating the
170    resolution function.  It is used in particular by :class:`DirectModel`,
171    which evaluates a SAS model parameters as key word arguments to the
172    calculator method, and by :class:`bumps_model.Experiment`, which wraps the
173    model and data for use with the Bumps fitting engine.  It is not
174    currently used by :class:`sasview_model.SasviewModel` since this will
175    require a number of changes to SasView before we can do it.
176
177    :meth:`_interpret_data` initializes the data structures necessary
178    to manage the calculations.  This sets attributes in the child class
179    such as *data_type* and *resolution*.
180
181    :meth:`_calc_theory` evaluates the model at the given control values.
182
183    :meth:`_set_data` sets the intensity data in the data object,
184    possibly with random noise added.  This is useful for simulating a
185    dataset with the results from :meth:`_calc_theory`.
186    """
187    def _interpret_data(self, data, model):
188        # type: (Data, KernelModel) -> None
189        # pylint: disable=attribute-defined-outside-init
190
191        self._data = data
192        self._model = model
193
194        # interpret data
195        if hasattr(data, 'lam'):
196        #if not FitPage.no_transform.GetValue(): #if the no_transform radio button is not active DOES NOT WORK! not active before fitting
197            self.data_type = 'sesans'
198        elif hasattr(data, 'qx_data'):
199            self.data_type = 'Iqxy'
200        elif getattr(data, 'oriented', False):
201            self.data_type = 'Iq-oriented'
202        else:
203            self.data_type = 'Iq'
204
205        if self.data_type == 'sesans':
206            q = sesans.make_q(data.sample.zacceptance, data.Rmax)
207            index = slice(None, None)
208            res = None
209            if data.y is not None:
210                Iq, dIq = data.y, data.dy
211            else:
212                Iq, dIq = None, None
213            #self._theory = np.zeros_like(q)
214            q_vectors = [q]
215            q_mono = sesans.make_all_q(data)
216        elif self.data_type == 'Iqxy':
217            #if not model.info.parameters.has_2d:
218            #    raise ValueError("not 2D without orientation or magnetic parameters")
219            q = np.sqrt(data.qx_data**2 + data.qy_data**2)
220            qmin = getattr(data, 'qmin', 1e-16)
221            qmax = getattr(data, 'qmax', np.inf)
222            accuracy = getattr(data, 'accuracy', 'Low')
223            index = ~data.mask & (q >= qmin) & (q <= qmax)
224            if data.data is not None:
225                index &= ~np.isnan(data.data)
226                Iq = data.data[index]
227                dIq = data.err_data[index]
228            else:
229                Iq, dIq = None, None
230            res = resolution2d.Pinhole2D(data=data, index=index,
231                                         nsigma=3.0, accuracy=accuracy)
232            #self._theory = np.zeros_like(self.Iq)
233            q_vectors = res.q_calc
234            q_mono = []
235        elif self.data_type == 'Iq':
236            index = (data.x >= data.qmin) & (data.x <= data.qmax)
237            if data.y is not None:
238                index &= ~np.isnan(data.y)
239                Iq = data.y[index]
240                dIq = data.dy[index]
241            else:
242                Iq, dIq = None, None
243            if getattr(data, 'dx', None) is not None:
244                q, dq = data.x[index], data.dx[index]
245                if (dq > 0).any():
246                    res = resolution.Pinhole1D(q, dq)
247                else:
248                    res = resolution.Perfect1D(q)
249            elif (getattr(data, 'dxl', None) is not None
250                  and getattr(data, 'dxw', None) is not None):
251                res = resolution.Slit1D(data.x[index],
252                                        qx_width=data.dxl[index],
253                                        qy_width=data.dxw[index])
254            else:
255                res = resolution.Perfect1D(data.x[index])
256
257            #self._theory = np.zeros_like(self.Iq)
258            q_vectors = [res.q_calc]
259            q_mono = []
260        elif self.data_type == 'Iq-oriented':
261            index = (data.x >= data.qmin) & (data.x <= data.qmax)
262            if data.y is not None:
263                index &= ~np.isnan(data.y)
264                Iq = data.y[index]
265                dIq = data.dy[index]
266            else:
267                Iq, dIq = None, None
268            if (getattr(data, 'dxl', None) is None
269                    or getattr(data, 'dxw', None) is None):
270                raise ValueError("oriented sample with 1D data needs slit resolution")
271
272            res = resolution2d.Slit2D(data.x[index],
273                                      qx_width=data.dxw[index],
274                                      qy_width=data.dxl[index])
275            q_vectors = res.q_calc
276            q_mono = []
277        else:
278            raise ValueError("Unknown data type") # never gets here
279
280        # Remember function inputs so we can delay loading the function and
281        # so we can save/restore state
282        self._kernel_inputs = q_vectors
283        self._kernel_mono_inputs = q_mono
284        self._kernel = None
285        self.Iq, self.dIq, self.index = Iq, dIq, index
286        self.resolution = res
287
288    def _set_data(self, Iq, noise=None):
289        # type: (np.ndarray, Optional[float]) -> None
290        # pylint: disable=attribute-defined-outside-init
291        if noise is not None:
292            self.dIq = Iq*noise*0.01
293        dy = self.dIq
294        y = Iq + np.random.randn(*dy.shape) * dy
295        self.Iq = y
296        if self.data_type in ('Iq', 'Iq-oriented'):
297            self._data.dy[self.index] = dy
298            self._data.y[self.index] = y
299        elif self.data_type == 'Iqxy':
300            self._data.data[self.index] = y
301        elif self.data_type == 'sesans':
302            self._data.y[self.index] = y
303        else:
304            raise ValueError("Unknown model")
305
306    def _calc_theory(self, pars, cutoff=0.0):
307        # type: (ParameterSet, float) -> np.ndarray
308        if self._kernel is None:
309            self._kernel = self._model.make_kernel(self._kernel_inputs)
310            self._kernel_mono = (
311                self._model.make_kernel(self._kernel_mono_inputs)
312                if self._kernel_mono_inputs else None)
313
314        Iq_calc = call_kernel(self._kernel, pars, cutoff=cutoff)
315        # Storing the calculated Iq values so that they can be plotted.
316        # Only applies to oriented USANS data for now.
317        # TODO: extend plotting of calculate Iq to other measurement types
318        # TODO: refactor so we don't store the result in the model
319        self.Iq_calc = None
320        if self.data_type == 'sesans':
321            Iq_mono = (call_kernel(self._kernel_mono, pars, mono=True)
322                       if self._kernel_mono_inputs else None)
323            result = sesans.transform(self._data,
324                                      self._kernel_inputs[0], Iq_calc,
325                                      self._kernel_mono_inputs, Iq_mono)
326        else:
327            result = self.resolution.apply(Iq_calc)
328            if hasattr(self.resolution, 'nx'):
329                self.Iq_calc = (
330                    self.resolution.qx_calc, self.resolution.qy_calc,
331                    np.reshape(Iq_calc, (self.resolution.ny, self.resolution.nx))
332                )
333        return result
334
335
336class DirectModel(DataMixin):
337    """
338    Create a calculator object for a model.
339
340    *data* is 1D SAS, 2D SAS or SESANS data
341
342    *model* is a model calculator return from :func:`generate.load_model`
343
344    *cutoff* is the polydispersity weight cutoff.
345    """
346    def __init__(self, data, model, cutoff=1e-5):
347        # type: (Data, KernelModel, float) -> None
348        self.model = model
349        self.cutoff = cutoff
350        # Note: _interpret_data defines the model attributes
351        self._interpret_data(data, model)
352
353    def __call__(self, **pars):
354        # type: (**float) -> np.ndarray
355        return self._calc_theory(pars, cutoff=self.cutoff)
356
357    def simulate_data(self, noise=None, **pars):
358        # type: (Optional[float], **float) -> None
359        """
360        Generate simulated data for the model.
361        """
362        Iq = self.__call__(**pars)
363        self._set_data(Iq, noise=noise)
364
365    def profile(self, **pars):
366        # type: (**float) -> None
367        """
368        Generate a plottable profile.
369        """
370        return call_profile(self.model.info, **pars)
371
372def main():
373    # type: () -> None
374    """
375    Program to evaluate a particular model at a set of q values.
376    """
377    import sys
378    from .data import empty_data1D, empty_data2D
379    from .core import load_model_info, build_model
380
381    if len(sys.argv) < 3:
382        print("usage: python -m sasmodels.direct_model modelname (q|qx,qy) par=val ...")
383        sys.exit(1)
384    model_name = sys.argv[1]
385    call = sys.argv[2].upper()
386    if call != "ER_VR":
387        try:
388            values = [float(v) for v in call.split(',')]
389        except Exception:
390            values = []
391        if len(values) == 1:
392            q, = values
393            data = empty_data1D([q])
394        elif len(values) == 2:
395            qx, qy = values
396            data = empty_data2D([qx], [qy])
397        else:
398            print("use q or qx,qy or ER or VR")
399            sys.exit(1)
400    else:
401        data = empty_data1D([0.001])  # Data not used in ER/VR
402
403    model_info = load_model_info(model_name)
404    model = build_model(model_info)
405    calculator = DirectModel(data, model)
406    pars = dict((k, (float(v) if not k.endswith("_pd_type") else v))
407                for pair in sys.argv[3:]
408                for k, v in [pair.split('=')])
409    if call == "ER_VR":
410        ER = call_ER(model_info, pars)
411        VR = call_VR(model_info, pars)
412        print(ER, VR)
413    else:
414        Iq = calculator(**pars)
415        print(Iq[0])
416
417if __name__ == "__main__":
418    main()
Note: See TracBrowser for help on using the repository browser.