source: sasmodels/sasmodels/resolution.py @ 87964ac

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 87964ac was 87964ac, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

correct pinhole demo for truncated gaussian weight

  • Property mode set to 100644
File size: 41.3 KB
RevLine 
[d9633b1]1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
[7954f5c]6from __future__ import division
[d138d43]7
[2d81cfe]8import unittest
9
[7ae2b7f]10from scipy.special import erf  # type: ignore
11from numpy import sqrt, log, log10, exp, pi  # type: ignore
12import numpy as np  # type: ignore
[190fc2b]13
[d138d43]14__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
15           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
16           "pinhole_extend_q", "slit_extend_q", "bin_edges",
17           "interpolate", "linear_extrapolation", "geometric_extrapolation",
[823e620]18          ]
[d138d43]19
[d9633b1]20MINIMUM_RESOLUTION = 1e-8
[355ee8b]21MINIMUM_ABSOLUTE_Q = 0.02  # relative to the minimum q in the data
[1198f90]22PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC
[eb588ef]23
[346bc88]24class Resolution(object):
[d9633b1]25    """
26    Abstract base class defining a 1D resolution function.
27
28    *q* is the set of q values at which the data is measured.
29
30    *q_calc* is the set of q values at which the theory needs to be evaluated.
31    This may extend and interpolate the q values.
32
33    *apply* is the method to call with I(q_calc) to compute the resolution
34    smeared theory I(q).
35    """
[7ae2b7f]36    q = None  # type: np.ndarray
37    q_calc = None  # type: np.ndarray
[33c8d73]38    def apply(self, theory):
[d9633b1]39        """
[33c8d73]40        Smear *theory* by the resolution function, returning *Iq*.
[d9633b1]41        """
42        raise NotImplementedError("Subclass does not define the apply function")
43
[b397165]44
[346bc88]45class Perfect1D(Resolution):
[d9633b1]46    """
47    Resolution function to use when there is no actual resolution smearing
48    to be applied.  It has the same interface as the other resolution
49    functions, but returns the identity function.
50    """
51    def __init__(self, q):
52        self.q_calc = self.q = q
53
[33c8d73]54    def apply(self, theory):
55        return theory
[d9633b1]56
[b397165]57
[346bc88]58class Pinhole1D(Resolution):
[d9633b1]59    r"""
60    Pinhole aperture with q-dependent gaussian resolution.
[63b32bb]61
[d9633b1]62    *q* points at which the data is measured.
[49d1d42f]63
[33c8d73]64    *q_width* gaussian 1-sigma resolution at each data point.
[d9633b1]65
66    *q_calc* is the list of points to calculate, or None if this should
[7954f5c]67    be estimated from the *q* and *q_width*.
[1198f90]68
69    *nsigma* is the width of the resolution function.  Should be 2.5.
70    See :func:`pinhole_resolution` for details.
[d9633b1]71    """
[1198f90]72    def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA):
[d9633b1]73        #*min_step* is the minimum point spacing to use when computing the
74        #underlying model.  It should be on the order of
75        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
76        #are computed with sufficient density to avoid aliasing effects.
77
78        # Protect against calls with q_width=0.  The extend_q function will
79        # not extend the q if q_width is 0, but q_width must be non-zero when
80        # constructing the weight matrix to avoid division by zero errors.
81        # In practice this should never be needed, since resolution should
82        # default to Perfect1D if the pinhole geometry is not defined.
[49d1d42f]83        self.q, self.q_width = q, q_width
[823e620]84        self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma)
85                       if q_calc is None else np.sort(q_calc))
[355ee8b]86
87        # Protect against models which are not defined for very low q.  Limit
88        # the smallest q value evaluated (in absolute) to 0.02*min
89        cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q)
90        self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff]
91
92        # Build weight matrix from calculated q values
[2d81cfe]93        self.weight_matrix = pinhole_resolution(
[1198f90]94            self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION),
95            nsigma=nsigma)
[7b7fcf0]96        self.q_calc = abs(self.q_calc)
[d9633b1]97
[33c8d73]98    def apply(self, theory):
99        return apply_resolution_matrix(self.weight_matrix, theory)
[d9633b1]100
[7954f5c]101
[346bc88]102class Slit1D(Resolution):
[d9633b1]103    """
[ea75043]104    Slit aperture with resolution function.
[d9633b1]105
106    *q* points at which the data is measured.
[49d1d42f]107
[87964ac]108    *qx_width* slit width in qx
[d9633b1]109
[87964ac]110    *qy_width* slit height in qy
[7954f5c]111
112    *q_calc* is the list of points to calculate, or None if this should
113    be estimated from the *q* and *q_width*.
114
115    The *weight_matrix* is computed by :func:`slit1d_resolution`
[d9633b1]116    """
[ea75043]117    def __init__(self, q, qx_width, qy_width=0., q_calc=None):
118        # Remember what width/dqy was used even though we won't need them
[7954f5c]119        # after the weight matrix is constructed
[ea75043]120        self.qx_width, self.qy_width = qx_width, qy_width
[7954f5c]121
[dbde9f8]122        # Allow independent resolution on each point even though it is not
123        # needed in practice.
[ea75043]124        if np.isscalar(qx_width):
125            qx_width = np.ones(len(q))*qx_width
[dbde9f8]126        else:
[ea75043]127            qx_width = np.asarray(qx_width)
128        if np.isscalar(qy_width):
129            qy_width = np.ones(len(q))*qy_width
[dbde9f8]130        else:
[ea75043]131            qy_width = np.asarray(qy_width)
[dbde9f8]132
[7954f5c]133        self.q = q.flatten()
[ea75043]134        self.q_calc = slit_extend_q(q, qx_width, qy_width) \
[7954f5c]135            if q_calc is None else np.sort(q_calc)
[355ee8b]136
137        # Protect against models which are not defined for very low q.  Limit
138        # the smallest q value evaluated (in absolute) to 0.02*min
139        cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q)
140        self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff]
141
142        # Build weight matrix from calculated q values
[d9633b1]143        self.weight_matrix = \
[ea75043]144            slit_resolution(self.q_calc, self.q, qx_width, qy_width)
[7b7fcf0]145        self.q_calc = abs(self.q_calc)
[49d1d42f]146
[33c8d73]147    def apply(self, theory):
148        return apply_resolution_matrix(self.weight_matrix, theory)
[d9633b1]149
150
[33c8d73]151def apply_resolution_matrix(weight_matrix, theory):
[d9633b1]152    """
153    Apply the resolution weight matrix to the computed theory function.
154    """
[9404dd3]155    #print("apply shapes", theory.shape, weight_matrix.shape)
[fdc538a]156    Iq = np.dot(theory[None, :], weight_matrix)
[9404dd3]157    #print("result shape",Iq.shape)
[d9633b1]158    return Iq.flatten()
159
[7954f5c]160
[1198f90]161def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA):
162    r"""
[3fdb4b6]163    Compute the convolution matrix *W* for pinhole resolution 1-D data.
164
165    Each row *W[i]* determines the normalized weight that the corresponding
166    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
167    *W*, the resolution smearing can be computed using *dot(W,q)*.
168
[1198f90]169    Note that resolution is limited to $\pm 2.5 \sigma$.[1]  The true resolution
170    function is a broadened triangle, and does not extend over the entire
171    range $(-\infty, +\infty)$.  It is important to impose this limitation
172    since some models fall so steeply that the weighted value in gaussian
173    tails would otherwise dominate the integral.
174
[7954f5c]175    *q_calc* must be increasing.  *q_width* must be greater than zero.
[1198f90]176
177    [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects
178    in Radially Symmetric Small-Angle Neutron Scattering by Numerical and
179    Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14.
180    https://doi.org/10.1107/S0021889894010095.
[3fdb4b6]181    """
[7954f5c]182    # The current algorithm is a midpoint rectangle rule.  In the test case,
183    # neither trapezoid nor Simpson's rule improved the accuracy.
[3fdb4b6]184    edges = bin_edges(q_calc)
[7b7fcf0]185    #edges[edges < 0.0] = 0.0 # clip edges below zero
[40a87fa]186    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])
187    weights = cdf[1:] - cdf[:-1]
[1198f90]188    # Limit q range to +/- 2.5 sigma
189    weights[q_calc[:, None] < (q - nsigma*q_width)[None, :]] = 0.
190    weights[q_calc[:, None] > (q + nsigma*q_width)[None, :]] = 0.
[fdc538a]191    weights /= np.sum(weights, axis=0)[None, :]
[3fdb4b6]192    return weights
193
[49d1d42f]194
[eb588ef]195def slit_resolution(q_calc, q, width, height, n_height=30):
[7954f5c]196    r"""
197    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
[eb588ef]198    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
199    is the number of steps to use in the integration over $q_\parallel$
200    when both $q_\perp$ and $q_\parallel$ are non-zero.
[7954f5c]201
[eb588ef]202    Each $q$ can have an independent width and height value even though
203    current instruments use the same slit setting for all measured points.
[7954f5c]204
205    If slit height is large relative to width, use:
206
207    .. math::
208
[eb588ef]209        I_s(q_i) = \frac{1}{\Delta q_\perp}
[5258859]210            \int_0^{\Delta q_\perp}
211                I\left(\sqrt{q_i^2 + q_\perp^2}\right) \,dq_\perp
[7954f5c]212
213    If slit width is large relative to height, use:
214
215    .. math::
216
[eb588ef]217        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
218            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
[5258859]219                I\left(|q_i + q_\parallel|\right) \,dq_\parallel
[eb588ef]220
221    For a mixture of slit width and height use:
222
223    .. math::
224
225        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
[5258859]226            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
227            \int_0^{\Delta q_\perp}
228                I\left(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2}\right)
229                \,dq_\perp dq_\parallel
[eb588ef]230
[2f63032]231    **Definition**
[eb588ef]232
233    We are using the mid-point integration rule to assign weights to each
234    element of a weight matrix $W$ so that
235
236    .. math::
237
[5258859]238        I_s(q) = W\,I(q_\text{calc})
[eb588ef]239
240    If *q_calc* is at the mid-point, we can infer the bin edges from the
241    pairwise averages of *q_calc*, adding the missing edges before
242    *q_calc[0]* and after *q_calc[-1]*.
243
244    For $q_\parallel = 0$, the smeared value can be computed numerically
245    using the $u$ substitution
246
247    .. math::
248
249        u_j = \sqrt{q_j^2 - q^2}
250
251    This gives
252
253    .. math::
254
255        I_s(q) \approx \sum_j I(u_j) \Delta u_j
256
257    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
258    difference between consecutive edges which have been first converted
259    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
[5258859]260    to $q_j \in \left[q, \sqrt{q^2 + \Delta q_\perp}\right]$, so
[eb588ef]261
262    .. math::
263
264        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
[5258859]265               = \frac{1}{\Delta q_\perp} \left(
266                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2} \right)
267            \ \text{if}\  q_j \in \left[q_i, \sqrt{q_i^2 + q_\perp^2}\right]
[eb588ef]268
269    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
270    mid-points between the calculated values in *q_calc*.  We tweak the
271    edges of the initial and final intervals so that they lie on integration
272    limits.
273
274    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
275    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
276    but it is at least in the interval, so the approximation is going to be
277    a little better than the left or right Riemann sum, and should be
278    good enough for our purposes.)
279
280    For $q_\perp = 0$, the $u$ substitution is simpler:
281
282    .. math::
283
[5258859]284        u_j = \left|q_j - q\right|
[eb588ef]285
286    so
287
288    .. math::
289
[5258859]290        W_{ij} = \frac{1}{2 \Delta q_\parallel} \Delta u_j
[eb588ef]291            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
[5258859]292            \ \text{if}\ q_j \in
293                \left[q-\Delta q_\parallel, q+\Delta q_\parallel\right]
[eb588ef]294
295    However, we need to support cases were $u_j < 0$, which means using
[5258859]296    $2 (q_{j+1} - q_j)$ when $q_j \in \left[0, q_\parallel-q_i\right]$.
297    This is not an issue for $q_i > q_\parallel$.
[eb588ef]298
[5258859]299    For both $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
[eb588ef]300    integration with
301
302    .. math::
303
[5258859]304        u_{jk} = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
305            \ \text{for}\ k = -L \ldots L
[eb588ef]306
307    for $L$ = *n_height*.  This gives
308
309    .. math::
310
311        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
[5258859]312            \sum_{k=-L}^L \Delta u_{jk}
313                \left(\frac{\Delta q_\parallel}{2 L + 1}\right)
[eb588ef]314
315
[7954f5c]316    """
[dbde9f8]317    #np.set_printoptions(precision=6, linewidth=10000)
[7954f5c]318
[dbde9f8]319    # The current algorithm is a midpoint rectangle rule.
[7954f5c]320    q_edges = bin_edges(q_calc) # Note: requires q > 0
[7b7fcf0]321    #q_edges[q_edges < 0.0] = 0.0 # clip edges below zero
[dbde9f8]322    weights = np.zeros((len(q), len(q_calc)), 'd')
323
[9404dd3]324    #print(q_calc)
[dbde9f8]325    for i, (qi, w, h) in enumerate(zip(q, width, height)):
326        if w == 0. and h == 0.:
327            # Perfect resolution, so return the theory value directly.
328            # Note: assumes that q is a subset of q_calc.  If qi need not be
329            # in q_calc, then we can do a weighted interpolation by looking
330            # up qi in q_calc, then weighting the result by the relative
331            # distance to the neighbouring points.
332            weights[i, :] = (q_calc == qi)
[eb588ef]333        elif h == 0:
334            weights[i, :] = _q_perp_weights(q_edges, qi, w)
[dbde9f8]335        elif w == 0:
[eb588ef]336            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
337            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
[9404dd3]338            #print(qi - h, qi + h)
339            #print(in_x + abs_x)
[fdc538a]340            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
[eb588ef]341        else:
[2b3a1bd]342            for k in range(-n_height, n_height+1):
[40a87fa]343                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/n_height, w)
344            weights[i, :] /= 2*n_height + 1
[dbde9f8]345
346    return weights.T
[7954f5c]347
348
[eb588ef]349def _q_perp_weights(q_edges, qi, w):
350    # Convert bin edges from q to u
351    u_limit = np.sqrt(qi**2 + w**2)
352    u_edges = q_edges**2 - qi**2
353    u_edges[q_edges < abs(qi)] = 0.
354    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
355    weights = np.diff(np.sqrt(u_edges))/w
[9404dd3]356    #print("i, qi",i,qi,qi+width)
357    #print(q_calc)
358    #print(weights)
[eb588ef]359    return weights
360
361
[7954f5c]362def pinhole_extend_q(q, q_width, nsigma=3):
[d9633b1]363    """
364    Given *q* and *q_width*, find a set of sampling points *q_calc* so
[5258859]365    that each point $I(q)$ has sufficient support from the underlying
[d9633b1]366    function.
367    """
[7954f5c]368    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
[f1b8c90]369    return linear_extrapolation(q, q_min, q_max)
[49d1d42f]370
371
[7954f5c]372def slit_extend_q(q, width, height):
373    """
374    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
375    that each point I(q) has sufficient support from the underlying
376    function.
377    """
[eb588ef]378    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
379
[7954f5c]380    return geometric_extrapolation(q, q_min, q_max)
[49d1d42f]381
382
[3fdb4b6]383def bin_edges(x):
[d9633b1]384    """
385    Determine bin edges from bin centers, assuming that edges are centered
386    between the bins.
387
388    Note: this uses the arithmetic mean, which may not be appropriate for
389    log-scaled data.
390    """
[fdc538a]391    if len(x) < 2 or (np.diff(x) < 0).any():
[3fdb4b6]392        raise ValueError("Expected bins to be an increasing set")
393    edges = np.hstack([
394        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
395        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
396        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
397        ])
398    return edges
[49d1d42f]399
[7954f5c]400
401def interpolate(q, max_step):
402    """
403    Returns *q_calc* with points spaced at most max_step apart.
404    """
405    step = np.diff(q)
[fdc538a]406    index = step > max_step
[7954f5c]407    if np.any(index):
408        inserts = []
[fdc538a]409        for q_i, step_i in zip(q[:-1][index], step[index]):
[7954f5c]410            n = np.ceil(step_i/max_step)
[fdc538a]411            inserts.extend(q_i + np.arange(1, n)*(step_i/n))
[7954f5c]412        # Extend a couple of fringes beyond the end of the data
[fdc538a]413        inserts.extend(q[-1] + np.arange(1, 8)*max_step)
414        q_calc = np.sort(np.hstack((q, inserts)))
[7954f5c]415    else:
416        q_calc = q
417    return q_calc
418
419
420def linear_extrapolation(q, q_min, q_max):
421    """
422    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
[f1b8c90]423    a guide.  Extrapolation below uses about the same size as the first
424    interval.  Extrapolation above uses about the same size as the final
[7954f5c]425    interval.
426
[355ee8b]427    Note that extrapolated values may be negative.
[7954f5c]428    """
429    q = np.sort(q)
[ea75043]430    if q_min + 2*MINIMUM_RESOLUTION < q[0]:
[fdc538a]431        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15
[f1b8c90]432        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
[7954f5c]433    else:
434        q_low = []
[ea75043]435    if q_max - 2*MINIMUM_RESOLUTION > q[-1]:
[fdc538a]436        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15
[f1b8c90]437        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
[7954f5c]438    else:
439        q_high = []
440    return np.concatenate([q_low, q, q_high])
441
442
[f1b8c90]443def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
[7954f5c]444    r"""
445    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
446    average geometric step size in *q* as the step size.
447
448    if *q_min* is zero or less then *q[0]/10* is used instead.
449
[f1b8c90]450    *points_per_decade* sets the ratio between consecutive steps such
451    that there will be $n$ points used for every factor of 10 increase
452    in *q*.
453
454    If *points_per_decade* is not given, it will be estimated as follows.
455    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
456    in $n$ points gives a geometric average of:
[7954f5c]457
458    .. math::
459
[990d8df]460         \log \Delta q = (\log q_n - \log q_1) / (n - 1)
[7954f5c]461
462    From this we can compute the number of steps required to extend $q$
463    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
464
465    .. math::
466
467         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
468
469    Substituting:
470
[d138d43]471    .. math::
472
[a146eaa]473         n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
[990d8df]474            / (\log q_n - \log q_1)
[7954f5c]475    """
476    q = np.sort(q)
[f1b8c90]477    if points_per_decade is None:
478        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
479    else:
480        log_delta_q = log(10.) / points_per_decade
[7954f5c]481    if q_min < q[0]:
[990d8df]482        if q_min < 0:
483            q_min = q[0]*MINIMUM_ABSOLUTE_Q
[f1b8c90]484        n_low = log_delta_q * (log(q[0])-log(q_min))
[fdc538a]485        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
[7954f5c]486    else:
487        q_low = []
488    if q_max > q[-1]:
[f1b8c90]489        n_high = log_delta_q * (log(q_max)-log(q[-1]))
[7954f5c]490        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
491    else:
492        q_high = []
493    return np.concatenate([q_low, q, q_high])
494
495
[d9633b1]496############################################################################
497# unit tests
498############################################################################
[7954f5c]499
500def eval_form(q, form, pars):
[5925e90]501    """
502    Return the SAS model evaluated at *q*.
503
504    *form* is the SAS model returned from :fun:`core.load_model`.
505
506    *pars* are the parameter values to use when evaluating.
507    """
[6d6508e]508    from sasmodels import direct_model
[48fbd50]509    kernel = form.make_kernel([q])
[6d6508e]510    theory = direct_model.call_kernel(kernel, pars)
[7954f5c]511    kernel.release()
512    return theory
513
514
[87964ac]515def gaussian(q, q0, dq, nsigma=2.5):
[5925e90]516    """
[87964ac]517    Return the truncated Gaussian resolution function.
[5925e90]518
519    *q0* is the center, *dq* is the width and *q* are the points to evaluate.
520    """
[87964ac]521    # Calculate the density of the tails; the resulting gaussian needs to be
522    # scaled by this amount in ordere to integrate to 1.0
523    two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2
524    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density)
[7954f5c]525
526
[dbde9f8]527def romberg_slit_1d(q, width, height, form, pars):
[7954f5c]528    """
529    Romberg integration for slit resolution.
530
531    This is an adaptive integration technique.  It is called with settings
532    that make it slow to evaluate but give it good accuracy.
533    """
[7ae2b7f]534    from scipy.integrate import romberg  # type: ignore
[6871c9e]535
[6d6508e]536    par_set = set([p.name for p in form.info.parameters.call_parameters])
[303d8d6]537    if any(k not in par_set for k in pars.keys()):
538        extra = set(pars.keys()) - par_set
[d2bb604]539        raise ValueError("bad parameters: [%s] not in [%s]"
540                         % (", ".join(sorted(extra)),
541                            ", ".join(sorted(pars.keys()))))
[6871c9e]542
[dbde9f8]543    if np.isscalar(width):
544        width = [width]*len(q)
545    if np.isscalar(height):
546        height = [height]*len(q)
547    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
548    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
[eb588ef]549    # If both width and height are defined, then it is too slow to use dblquad.
550    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
551    # the extended Q range.
552    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
553    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
554    Iq = eval_form(q_calc, form, pars)
[dbde9f8]555    result = np.empty(len(q))
556    for i, (qi, w, h) in enumerate(zip(q, width, height)):
557        if h == 0.:
[40a87fa]558            total = romberg(_int_w, 0, w, args=(qi,),
559                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
560            result[i] = total/w
[dbde9f8]561        elif w == 0.:
[40a87fa]562            total = romberg(_int_h, -h, h, args=(qi,),
563                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
564            result[i] = total/(2*h)
[dbde9f8]565        else:
[fdc538a]566            w_grid = np.linspace(0, w, 21)[None, :]
567            h_grid = np.linspace(-h, h, 23)[:, None]
[40a87fa]568            u_sub = sqrt((qi+h_grid)**2 + w_grid**2)
569            f_at_u = np.interp(u_sub, q_calc, Iq)
[9404dd3]570            #print(np.trapz(Iu, w_grid, axis=1))
[2d81cfe]571            total = np.trapz(np.trapz(f_at_u, w_grid, axis=1), h_grid[:, 0])
[40a87fa]572            result[i] = total / (2*h*w)
[fdc538a]573            # from scipy.integrate import dblquad
574            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
575            #                  args=(qi,))
576            # result[i] = r/(w*2*h)
[dbde9f8]577
[7954f5c]578    # r should be [float, ...], but it is [array([float]), array([float]),...]
[dbde9f8]579    return result
[7954f5c]580
581
[df0d2ca]582def romberg_pinhole_1d(q, q_width, form, pars, nsigma=2.5):
[7954f5c]583    """
584    Romberg integration for pinhole resolution.
585
586    This is an adaptive integration technique.  It is called with settings
587    that make it slow to evaluate but give it good accuracy.
588    """
[7ae2b7f]589    from scipy.integrate import romberg  # type: ignore
[7954f5c]590
[6d6508e]591    par_set = set([p.name for p in form.info.parameters.call_parameters])
[303d8d6]592    if any(k not in par_set for k in pars.keys()):
593        extra = set(pars.keys()) - par_set
[d2bb604]594        raise ValueError("bad parameters: [%s] not in [%s]"
595                         % (", ".join(sorted(extra)),
596                            ", ".join(sorted(pars.keys()))))
[6871c9e]597
[2472141]598    func = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
599    total = [romberg(func, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,
[40a87fa]600                     args=(qi, dqi), divmax=100, vec_func=True,
601                     tol=0, rtol=1e-8)
602             for qi, dqi in zip(q, q_width)]
[2472141]603    return np.asarray(total).flatten()
[7954f5c]604
605
[d9633b1]606class ResolutionTest(unittest.TestCase):
[5925e90]607    """
608    Test the resolution calculations.
609    """
[d9633b1]610
611    def setUp(self):
[33c8d73]612        self.x = 0.001*np.arange(1, 11)
[d9633b1]613        self.y = self.Iq(self.x)
614
615    def Iq(self, q):
616        "Linear function for resolution unit test"
617        return 12.0 - 1000.0*q
618
619    def test_perfect(self):
620        """
621        Perfect resolution and no smearing.
622        """
623        resolution = Perfect1D(self.x)
[33c8d73]624        theory = self.Iq(resolution.q_calc)
625        output = resolution.apply(theory)
[d9633b1]626        np.testing.assert_equal(output, self.y)
627
628    def test_slit_zero(self):
629        """
630        Slit smearing with perfect resolution.
631        """
[ea75043]632        resolution = Slit1D(self.x, qx_width=0, qy_width=0, q_calc=self.x)
[33c8d73]633        theory = self.Iq(resolution.q_calc)
634        output = resolution.apply(theory)
[d9633b1]635        np.testing.assert_equal(output, self.y)
636
[7954f5c]637    @unittest.skip("not yet supported")
638    def test_slit_high(self):
[d9633b1]639        """
640        Slit smearing with height 0.005
641        """
[ea75043]642        resolution = Slit1D(self.x, qx_width=0, qy_width=0.005, q_calc=self.x)
[7954f5c]643        theory = self.Iq(resolution.q_calc)
644        output = resolution.apply(theory)
[fdc538a]645        answer = [
646            9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
647            5.5555, 4.5584, 3.5606, 2.5623, 2.0000,
648            ]
[7954f5c]649        np.testing.assert_allclose(output, answer, atol=1e-4)
650
651    @unittest.skip("not yet supported")
652    def test_slit_both_high(self):
653        """
654        Slit smearing with width < 100*height.
655        """
656        q = np.logspace(-4, -1, 10)
[ea75043]657        resolution = Slit1D(q, qx_width=0.2, qy_width=np.inf)
[7954f5c]658        theory = 1000*self.Iq(resolution.q_calc**4)
659        output = resolution.apply(theory)
[fdc538a]660        answer = [
661            8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
662            5.40363, 4.40655, 3.40880, 2.41058, 2.00000,
663            ]
[7954f5c]664        np.testing.assert_allclose(output, answer, atol=1e-4)
665
666    @unittest.skip("not yet supported")
667    def test_slit_wide(self):
668        """
669        Slit smearing with width 0.0002
670        """
[ea75043]671        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0, q_calc=self.x)
[33c8d73]672        theory = self.Iq(resolution.q_calc)
673        output = resolution.apply(theory)
[fdc538a]674        answer = [
675            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
676            ]
[7954f5c]677        np.testing.assert_allclose(output, answer, atol=1e-4)
678
679    @unittest.skip("not yet supported")
680    def test_slit_both_wide(self):
681        """
682        Slit smearing with width > 100*height.
683        """
[ea75043]684        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0.000001,
[7954f5c]685                            q_calc=self.x)
686        theory = self.Iq(resolution.q_calc)
687        output = resolution.apply(theory)
[fdc538a]688        answer = [
689            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
690            ]
[d9633b1]691        np.testing.assert_allclose(output, answer, atol=1e-4)
692
693    def test_pinhole_zero(self):
694        """
695        Pinhole smearing with perfect resolution
696        """
697        resolution = Pinhole1D(self.x, 0.0*self.x)
[33c8d73]698        theory = self.Iq(resolution.q_calc)
699        output = resolution.apply(theory)
[d9633b1]700        np.testing.assert_equal(output, self.y)
701
[87964ac]702    # TODO: turn pinhole/slit demos into tests
703
[d9633b1]704    def test_pinhole(self):
705        """
706        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
707        """
708        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
709                               q_calc=self.x)
[33c8d73]710        theory = 12.0-1000.0*resolution.q_calc
711        output = resolution.apply(theory)
[df0d2ca]712        # Note: answer came from output of previous run.  Non-integer
713        # values at ends come from the fact that q_calc does not
714        # extend beyond q, and so the weights don't balance.
[fdc538a]715        answer = [
[df0d2ca]716            10.47037734, 9.86925860,
717            9., 8., 7., 6., 5., 4.,
718            3.13074140, 2.52962266,
[fdc538a]719            ]
[d9633b1]720        np.testing.assert_allclose(output, answer, atol=1e-8)
721
[7954f5c]722
723class IgorComparisonTest(unittest.TestCase):
[5925e90]724    """
725    Test resolution calculations against those returned by Igor.
726    """
[7954f5c]727
728    def setUp(self):
729        self.pars = TEST_PARS_PINHOLE_SPHERE
730        from sasmodels import core
[8b935d1]731        self.model = core.load_model("sphere", dtype='double')
[7954f5c]732
[5925e90]733    def _eval_sphere(self, pars, resolution):
[6d6508e]734        from sasmodels import direct_model
[48fbd50]735        kernel = self.model.make_kernel([resolution.q_calc])
[6d6508e]736        theory = direct_model.call_kernel(kernel, pars)
[7954f5c]737        result = resolution.apply(theory)
738        kernel.release()
739        return result
740
[5925e90]741    def _compare(self, q, output, answer, tolerance):
[dbde9f8]742        #err = (output - answer)/answer
743        #idx = abs(err) >= tolerance
744        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
[9404dd3]745        #print("\n".join(str(v) for v in problem))
[7954f5c]746        np.testing.assert_allclose(output, answer, rtol=tolerance)
747
748    def test_perfect(self):
749        """
750        Compare sphere model with NIST Igor SANS, no resolution smearing.
751        """
752        pars = TEST_PARS_SLIT_SPHERE
753        data_string = TEST_DATA_SLIT_SPHERE
754
755        data = np.loadtxt(data_string.split('\n')).T
[40a87fa]756        q, _, answer, _ = data
[7954f5c]757        resolution = Perfect1D(q)
[5925e90]758        output = self._eval_sphere(pars, resolution)
759        self._compare(q, output, answer, 1e-6)
[7954f5c]760
[df0d2ca]761    @unittest.skip("suppress comparison with old version; pinhole calc changed")
[7954f5c]762    def test_pinhole(self):
763        """
764        Compare pinhole resolution smearing with NIST Igor SANS
765        """
766        pars = TEST_PARS_PINHOLE_SPHERE
767        data_string = TEST_DATA_PINHOLE_SPHERE
768
769        data = np.loadtxt(data_string.split('\n')).T
770        q, q_width, answer = data
771        resolution = Pinhole1D(q, q_width)
[5925e90]772        output = self._eval_sphere(pars, resolution)
[7954f5c]773        # TODO: relative error should be lower
[5925e90]774        self._compare(q, output, answer, 3e-4)
[7954f5c]775
[df0d2ca]776    @unittest.skip("suppress comparison with old version; pinhole calc changed")
[7954f5c]777    def test_pinhole_romberg(self):
778        """
779        Compare pinhole resolution smearing with romberg integration result.
780        """
781        pars = TEST_PARS_PINHOLE_SPHERE
782        data_string = TEST_DATA_PINHOLE_SPHERE
783        pars['radius'] *= 5
784
785        data = np.loadtxt(data_string.split('\n')).T
786        q, q_width, answer = data
787        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
788        ## Getting 0.1% requires 5 sigma and 200 points per fringe
789        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
[40a87fa]790        #                     2*np.pi/pars['radius']/200)
[7954f5c]791        #tol = 0.001
[df0d2ca]792        ## The default 2.5 sigma and no extra points gets 1%
[7ae2b7f]793        q_calc = None  # type: np.ndarray
794        tol = 0.01
[7954f5c]795        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
[5925e90]796        output = self._eval_sphere(pars, resolution)
[7954f5c]797        if 0: # debug plot
[7ae2b7f]798            import matplotlib.pyplot as plt  # type: ignore
[7954f5c]799            resolution = Perfect1D(q)
[5925e90]800            source = self._eval_sphere(pars, resolution)
[7954f5c]801            plt.loglog(q, source, '.')
802            plt.loglog(q, answer, '-', hold=True)
803            plt.loglog(q, output, '-', hold=True)
804            plt.show()
[5925e90]805        self._compare(q, output, answer, tol)
[7954f5c]806
807    def test_slit(self):
808        """
809        Compare slit resolution smearing with NIST Igor SANS
810        """
811        pars = TEST_PARS_SLIT_SPHERE
812        data_string = TEST_DATA_SLIT_SPHERE
813
814        data = np.loadtxt(data_string.split('\n')).T
815        q, delta_qv, _, answer = data
[ea75043]816        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0)
[5925e90]817        output = self._eval_sphere(pars, resolution)
[7954f5c]818        # TODO: eliminate Igor test since it is too inaccurate to be useful.
819        # This means we can eliminate the test data as well, and instead
820        # use a generated q vector.
[5925e90]821        self._compare(q, output, answer, 0.5)
[7954f5c]822
823    def test_slit_romberg(self):
824        """
825        Compare slit resolution smearing with romberg integration result.
826        """
827        pars = TEST_PARS_SLIT_SPHERE
828        data_string = TEST_DATA_SLIT_SPHERE
829
830        data = np.loadtxt(data_string.split('\n')).T
831        q, delta_qv, _, answer = data
[dbde9f8]832        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
[40a87fa]833        q_calc = slit_extend_q(interpolate(q, 2*np.pi/pars['radius']/20),
[dbde9f8]834                               delta_qv[0], 0.)
[ea75043]835        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0, q_calc=q_calc)
[5925e90]836        output = self._eval_sphere(pars, resolution)
[7954f5c]837        # TODO: relative error should be lower
[5925e90]838        self._compare(q, output, answer, 0.025)
[7954f5c]839
[6871c9e]840    def test_ellipsoid(self):
841        """
842        Compare romberg integration for ellipsoid model.
843        """
844        from .core import load_model
845        pars = {
846            'scale':0.05,
[69ef533]847            'radius_polar':500, 'radius_equatorial':15000,
[486fcf6]848            'sld':6, 'sld_solvent': 1,
[6871c9e]849            }
850        form = load_model('ellipsoid', dtype='double')
[fdc538a]851        q = np.logspace(log10(4e-5), log10(2.5e-2), 68)
[dbde9f8]852        width, height = 0.117, 0.
[ea75043]853        resolution = Slit1D(q, qx_width=width, qy_width=height)
[dbde9f8]854        answer = romberg_slit_1d(q, width, height, form, pars)
[6871c9e]855        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
856        # TODO: 10% is too much error; use better algorithm
[9404dd3]857        #print(np.max(abs(answer-output)/answer))
[5925e90]858        self._compare(q, output, answer, 0.1)
[6871c9e]859
[7954f5c]860    #TODO: can sas q spacing be too sparse for the resolution calculation?
861    @unittest.skip("suppress sparse data test; not supported by current code")
862    def test_pinhole_sparse(self):
863        """
864        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
865        """
866        pars = TEST_PARS_PINHOLE_SPHERE
867        data_string = TEST_DATA_PINHOLE_SPHERE
868
869        data = np.loadtxt(data_string.split('\n')).T
870        q, q_width, answer = data[:, ::20] # Take every nth point
871        resolution = Pinhole1D(q, q_width)
[5925e90]872        output = self._eval_sphere(pars, resolution)
873        self._compare(q, output, answer, 1e-6)
[7954f5c]874
875
876# pinhole sphere parameters
877TEST_PARS_PINHOLE_SPHERE = {
878    'scale': 1.0, 'background': 0.01,
[486fcf6]879    'radius': 60.0, 'sld': 1, 'sld_solvent': 6.3,
[7954f5c]880    }
881# Q, dQ, I(Q) calculated by NIST Igor SANS package
882TEST_DATA_PINHOLE_SPHERE = """\
[d9633b1]8830.001278 0.0002847 2538.41176383
8840.001562 0.0002905 2536.91820405
8850.001846 0.0002956 2535.13182479
8860.002130 0.0003017 2533.06217813
8870.002414 0.0003087 2530.70378586
8880.002698 0.0003165 2528.05024192
8890.002982 0.0003249 2525.10408349
8900.003266 0.0003340 2521.86667499
8910.003550 0.0003437 2518.33907750
8920.003834 0.0003539 2514.52246995
8930.004118 0.0003646 2510.41798319
8940.004402 0.0003757 2506.02690988
8950.004686 0.0003872 2501.35067884
8960.004970 0.0003990 2496.38678318
8970.005253 0.0004112 2491.16237596
8980.005537 0.0004237 2485.63911673
8990.005821 0.0004365 2479.83657083
9000.006105 0.0004495 2473.75676948
9010.006389 0.0004628 2467.40145990
9020.006673 0.0004762 2460.77293372
9030.006957 0.0004899 2453.86724627
9040.007241 0.0005037 2446.69623838
9050.007525 0.0005177 2439.25775219
9060.007809 0.0005318 2431.55421398
9070.008093 0.0005461 2423.58785521
9080.008377 0.0005605 2415.36158137
9090.008661 0.0005750 2406.87009473
9100.008945 0.0005896 2398.12841186
9110.009229 0.0006044 2389.13360806
9120.009513 0.0006192 2379.88958042
9130.009797 0.0006341 2370.39776774
9140.010080 0.0006491 2360.69528793
9150.010360 0.0006641 2350.85169027
9160.010650 0.0006793 2340.42023633
9170.010930 0.0006945 2330.11206013
9180.011220 0.0007097 2319.20109972
9190.011500 0.0007251 2308.43503981
9200.011780 0.0007404 2297.44820179
9210.012070 0.0007558 2285.83853677
9220.012350 0.0007713 2274.41290746
9230.012640 0.0007868 2262.36219581
9240.012920 0.0008024 2250.51169731
9250.013200 0.0008180 2238.45596231
9260.013490 0.0008336 2225.76495666
9270.013770 0.0008493 2213.29618391
9280.014060 0.0008650 2200.19110751
9290.014340 0.0008807 2187.34050325
9300.014620 0.0008965 2174.30529864
9310.014910 0.0009123 2160.61632548
9320.015190 0.0009281 2147.21038112
9330.015470 0.0009440 2133.62023580
9340.015760 0.0009598 2119.37907426
9350.016040 0.0009757 2105.45234903
9360.016330 0.0009916 2090.86319102
9370.016610 0.0010080 2076.60576032
9380.016890 0.0010240 2062.19214565
9390.017180 0.0010390 2047.10550219
9400.017460 0.0010550 2032.38715621
9410.017740 0.0010710 2017.52560123
9420.018030 0.0010880 2001.99124318
9430.018310 0.0011040 1986.84662060
9440.018600 0.0011200 1971.03389745
9450.018880 0.0011360 1955.61395119
9460.019160 0.0011520 1940.08291563
9470.019450 0.0011680 1923.87672225
9480.019730 0.0011840 1908.10656374
9490.020020 0.0012000 1891.66297192
9500.020300 0.0012160 1875.66789021
9510.020580 0.0012320 1859.56357196
9520.020870 0.0012490 1842.79468290
9530.021150 0.0012650 1826.50064489
9540.021430 0.0012810 1810.11533702
9550.021720 0.0012970 1793.06840882
9560.022000 0.0013130 1776.51153580
9570.022280 0.0013290 1759.87201249
9580.022570 0.0013460 1742.57354412
9590.022850 0.0013620 1725.79397319
9600.023140 0.0013780 1708.35831550
9610.023420 0.0013940 1691.45256069
9620.023700 0.0014110 1674.48561783
9630.023990 0.0014270 1656.86525366
9640.024270 0.0014430 1639.79847285
9650.024550 0.0014590 1622.68887088
9660.024840 0.0014760 1604.96421100
9670.025120 0.0014920 1587.85768129
9680.025410 0.0015080 1569.99297335
9690.025690 0.0015240 1552.84580279
9700.025970 0.0015410 1535.54074115
9710.026260 0.0015570 1517.75249337
9720.026540 0.0015730 1500.40115023
9730.026820 0.0015900 1483.03632237
9740.027110 0.0016060 1465.05942429
9750.027390 0.0016220 1447.67682181
9760.027670 0.0016390 1430.46495191
9770.027960 0.0016550 1412.49232282
9780.028240 0.0016710 1395.13182318
9790.028520 0.0016880 1377.93439837
9800.028810 0.0017040 1359.99528971
9810.029090 0.0017200 1342.67274512
9820.029370 0.0017370 1325.55375609
983"""
984
[7954f5c]985# Slit sphere parameters
986TEST_PARS_SLIT_SPHERE = {
987    'scale': 0.01, 'background': 0.01,
[486fcf6]988    'radius': 60000, 'sld': 1, 'sld_solvent': 4,
[7954f5c]989    }
990# Q dQ I(Q) I_smeared(Q)
991TEST_DATA_SLIT_SPHERE = """\
9922.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9932.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9942.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9953.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9963.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9973.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9984.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9995.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
10005.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
10016.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
10026.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
10037.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
10047.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
10058.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
10068.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
10079.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
10081.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
10091.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
10101.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
10111.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
10121.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
10131.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
10141.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
10151.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
10161.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
10171.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
10182.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
10192.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
10202.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
10212.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
10222.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
10232.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
10242.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
10252.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
10263.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
10273.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
10283.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
10293.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
10304.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
10314.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
10324.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
10334.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
10345.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
10355.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
10366.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
10376.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
10387.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
10397.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
10408.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
10418.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
10429.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
10439.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
10441.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
10451.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
10461.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
10471.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
10481.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
10491.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
10501.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
10511.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
10521.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
10531.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
10542.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
10552.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
10562.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
10572.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
10582.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
10592.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
1060"""
[49d1d42f]1061
[db8756e]1062def main():
1063    """
1064    Run tests given is sys.argv.
1065
1066    Returns 0 if success or 1 if any tests fail.
1067    """
1068    import sys
[7ae2b7f]1069    import xmlrunner  # type: ignore
[db8756e]1070
1071    suite = unittest.TestSuite()
1072    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1073
1074    runner = xmlrunner.XMLTestRunner(output='logs')
1075    result = runner.run(suite)
1076    return 1 if result.failures or result.errors else 0
1077
1078
[49d1d42f]1079############################################################################
1080# usage demo
1081############################################################################
1082
1083def _eval_demo_1d(resolution, title):
[dbde9f8]1084    import sys
[49d1d42f]1085    from sasmodels import core
[6d6508e]1086    from sasmodels import direct_model
[dbde9f8]1087    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1088
1089    if name == 'cylinder':
[486fcf6]1090        pars = {'length':210, 'radius':500, 'background': 0}
[dbde9f8]1091    elif name == 'teubner_strey':
1092        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1093    elif name == 'sphere' or name == 'spherepy':
1094        pars = TEST_PARS_SLIT_SPHERE
1095    elif name == 'ellipsoid':
1096        pars = {
[486fcf6]1097            'scale':0.05, 'background': 0,
1098            'r_polar':500, 'r_equatorial':15000,
1099            'sld':6, 'sld_solvent': 1,
[dbde9f8]1100            }
1101    else:
1102        pars = {}
[17bbadd]1103    model_info = core.load_model_info(name)
1104    model = core.build_model(model_info)
[49d1d42f]1105
[48fbd50]1106    kernel = model.make_kernel([resolution.q_calc])
[6d6508e]1107    theory = direct_model.call_kernel(kernel, pars)
[33c8d73]1108    Iq = resolution.apply(theory)
[49d1d42f]1109
[dbde9f8]1110    if isinstance(resolution, Slit1D):
[87964ac]1111        width, height = resolution.qx_width, resolution.qy_width
[dbde9f8]1112        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1113    else:
1114        dq = resolution.q_width
1115        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1116
[7ae2b7f]1117    import matplotlib.pyplot as plt  # type: ignore
[33c8d73]1118    plt.loglog(resolution.q_calc, theory, label='unsmeared')
[49d1d42f]1119    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
[dbde9f8]1120    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
[49d1d42f]1121    plt.legend()
1122    plt.title(title)
1123    plt.xlabel("Q (1/Ang)")
1124    plt.ylabel("I(Q) (1/cm)")
1125
1126def demo_pinhole_1d():
[5925e90]1127    """
1128    Show example of pinhole smearing.
1129    """
[dbde9f8]1130    q = np.logspace(-4, np.log10(0.2), 400)
[33c8d73]1131    q_width = 0.1*q
1132    resolution = Pinhole1D(q, q_width)
[49d1d42f]1133    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1134
1135def demo_slit_1d():
[5925e90]1136    """
1137    Show example of slit smearing.
1138    """
[eb588ef]1139    q = np.logspace(-4, np.log10(0.2), 100)
[dbde9f8]1140    w = h = 0.
[eb588ef]1141    #w = 0.000000277790
[dbde9f8]1142    w = 0.0277790
1143    #h = 0.00277790
[eb588ef]1144    #h = 0.0277790
[dbde9f8]1145    resolution = Slit1D(q, w, h)
[fdc538a]1146    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))
[49d1d42f]1147
1148def demo():
[5925e90]1149    """
1150    Run the resolution demos.
1151    """
[7ae2b7f]1152    import matplotlib.pyplot as plt  # type: ignore
[49d1d42f]1153    plt.subplot(121)
1154    demo_pinhole_1d()
[dbde9f8]1155    #plt.yscale('linear')
[49d1d42f]1156    plt.subplot(122)
1157    demo_slit_1d()
[dbde9f8]1158    #plt.yscale('linear')
[49d1d42f]1159    plt.show()
1160
1161
1162if __name__ == "__main__":
[7f7f99f]1163    #demo()
[fdc538a]1164    main()
Note: See TracBrowser for help on using the repository browser.