source: sasmodels/sasmodels/resolution.py @ 0ae0f9a

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 0ae0f9a was 0ae0f9a, checked in by jhbakker, 8 years ago

Merge branch 'ajj_sesans' into Jurrian1D

  • Property mode set to 100644
File size: 39.4 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7
8from scipy.special import erf  # type: ignore
9from numpy import sqrt, log, log10, exp, pi  # type: ignore
10import numpy as np  # type: ignore
11
12__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
13           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
14           "pinhole_extend_q", "slit_extend_q", "bin_edges",
15           "interpolate", "linear_extrapolation", "geometric_extrapolation",
16          ]
17
18MINIMUM_RESOLUTION = 1e-8
19
20
21# When extrapolating to -q, what is the minimum positive q relative to q_min
22# that we wish to calculate?
23MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
24
25class Resolution(object):
26    """
27    Abstract base class defining a 1D resolution function.
28
29    *q* is the set of q values at which the data is measured.
30
31    *q_calc* is the set of q values at which the theory needs to be evaluated.
32    This may extend and interpolate the q values.
33
34    *apply* is the method to call with I(q_calc) to compute the resolution
35    smeared theory I(q).
36    """
37    q = None  # type: np.ndarray
38    q_calc = None  # type: np.ndarray
39    def apply(self, theory):
40        """
41        Smear *theory* by the resolution function, returning *Iq*.
42        """
43        raise NotImplementedError("Subclass does not define the apply function")
44
45class Perfect1D(Resolution):
46    """
47    Resolution function to use when there is no actual resolution smearing
48    to be applied.  It has the same interface as the other resolution
49    functions, but returns the identity function.
50    """
51    def __init__(self, q):
52        self.q_calc = self.q = q
53
54    def apply(self, theory):
55        return theory
56
57
58class SESANS1D(Resolution):
59
60    def __init__(self, data, q_calc):
61        self.data = data
62        self.q_calc = q_calc
63
64    def apply(self, theory):
65        return sesans.transform(self.data, self.q_calc, theory, None, None)
66
67class Pinhole1D(Resolution):
68    r"""
69    Pinhole aperture with q-dependent gaussian resolution.
70
71    *q* points at which the data is measured.
72
73    *q_width* gaussian 1-sigma resolution at each data point.
74
75    *q_calc* is the list of points to calculate, or None if this should
76    be estimated from the *q* and *q_width*.
77    """
78    def __init__(self, q, q_width, q_calc=None, nsigma=3):
79        #*min_step* is the minimum point spacing to use when computing the
80        #underlying model.  It should be on the order of
81        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
82        #are computed with sufficient density to avoid aliasing effects.
83
84        # Protect against calls with q_width=0.  The extend_q function will
85        # not extend the q if q_width is 0, but q_width must be non-zero when
86        # constructing the weight matrix to avoid division by zero errors.
87        # In practice this should never be needed, since resolution should
88        # default to Perfect1D if the pinhole geometry is not defined.
89        self.q, self.q_width = q, q_width
90        self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma)
91                       if q_calc is None else np.sort(q_calc))
92        self.weight_matrix = pinhole_resolution(self.q_calc, self.q,
93                                np.maximum(q_width, MINIMUM_RESOLUTION))
94
95    def apply(self, theory):
96        return apply_resolution_matrix(self.weight_matrix, theory)
97
98
99class Slit1D(Resolution):
100    """
101    Slit aperture with resolution function.
102
103    *q* points at which the data is measured.
104
105    *dqx* slit width in qx
106
107    *dqy* slit height in qy
108
109    *q_calc* is the list of points to calculate, or None if this should
110    be estimated from the *q* and *q_width*.
111
112    The *weight_matrix* is computed by :func:`slit1d_resolution`
113    """
114    def __init__(self, q, qx_width, qy_width=0., q_calc=None):
115        # Remember what width/dqy was used even though we won't need them
116        # after the weight matrix is constructed
117        self.qx_width, self.qy_width = qx_width, qy_width
118
119        # Allow independent resolution on each point even though it is not
120        # needed in practice.
121        if np.isscalar(qx_width):
122            qx_width = np.ones(len(q))*qx_width
123        else:
124            qx_width = np.asarray(qx_width)
125        if np.isscalar(qy_width):
126            qy_width = np.ones(len(q))*qy_width
127        else:
128            qy_width = np.asarray(qy_width)
129
130        self.q = q.flatten()
131        self.q_calc = slit_extend_q(q, qx_width, qy_width) \
132            if q_calc is None else np.sort(q_calc)
133        self.weight_matrix = \
134            slit_resolution(self.q_calc, self.q, qx_width, qy_width)
135
136    def apply(self, theory):
137        return apply_resolution_matrix(self.weight_matrix, theory)
138
139
140def apply_resolution_matrix(weight_matrix, theory):
141    """
142    Apply the resolution weight matrix to the computed theory function.
143    """
144    #print("apply shapes", theory.shape, weight_matrix.shape)
145    Iq = np.dot(theory[None, :], weight_matrix)
146    #print("result shape",Iq.shape)
147    return Iq.flatten()
148
149
150def pinhole_resolution(q_calc, q, q_width):
151    """
152    Compute the convolution matrix *W* for pinhole resolution 1-D data.
153
154    Each row *W[i]* determines the normalized weight that the corresponding
155    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
156    *W*, the resolution smearing can be computed using *dot(W,q)*.
157
158    *q_calc* must be increasing.  *q_width* must be greater than zero.
159    """
160    # The current algorithm is a midpoint rectangle rule.  In the test case,
161    # neither trapezoid nor Simpson's rule improved the accuracy.
162    edges = bin_edges(q_calc)
163    edges[edges < 0.0] = 0.0 # clip edges below zero
164    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])
165    weights = cdf[1:] - cdf[:-1]
166    weights /= np.sum(weights, axis=0)[None, :]
167    return weights
168
169
170def slit_resolution(q_calc, q, width, height, n_height=30):
171    r"""
172    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
173    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
174    is the number of steps to use in the integration over $q_\parallel$
175    when both $q_\perp$ and $q_\parallel$ are non-zero.
176
177    Each $q$ can have an independent width and height value even though
178    current instruments use the same slit setting for all measured points.
179
180    If slit height is large relative to width, use:
181
182    .. math::
183
184        I_s(q_i) = \frac{1}{\Delta q_\perp}
185            \int_0^{\Delta q_\perp}
186                I\left(\sqrt{q_i^2 + q_\perp^2}\right) \,dq_\perp
187
188    If slit width is large relative to height, use:
189
190    .. math::
191
192        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
193            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
194                I\left(|q_i + q_\parallel|\right) \,dq_\parallel
195
196    For a mixture of slit width and height use:
197
198    .. math::
199
200        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
201            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
202            \int_0^{\Delta q_\perp}
203                I\left(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2}\right)
204                \,dq_\perp dq_\parallel
205
206    **Definition**
207
208    We are using the mid-point integration rule to assign weights to each
209    element of a weight matrix $W$ so that
210
211    .. math::
212
213        I_s(q) = W\,I(q_\text{calc})
214
215    If *q_calc* is at the mid-point, we can infer the bin edges from the
216    pairwise averages of *q_calc*, adding the missing edges before
217    *q_calc[0]* and after *q_calc[-1]*.
218
219    For $q_\parallel = 0$, the smeared value can be computed numerically
220    using the $u$ substitution
221
222    .. math::
223
224        u_j = \sqrt{q_j^2 - q^2}
225
226    This gives
227
228    .. math::
229
230        I_s(q) \approx \sum_j I(u_j) \Delta u_j
231
232    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
233    difference between consecutive edges which have been first converted
234    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
235    to $q_j \in \left[q, \sqrt{q^2 + \Delta q_\perp}\right]$, so
236
237    .. math::
238
239        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
240               = \frac{1}{\Delta q_\perp} \left(
241                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2} \right)
242            \ \text{if}\  q_j \in \left[q_i, \sqrt{q_i^2 + q_\perp^2}\right]
243
244    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
245    mid-points between the calculated values in *q_calc*.  We tweak the
246    edges of the initial and final intervals so that they lie on integration
247    limits.
248
249    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
250    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
251    but it is at least in the interval, so the approximation is going to be
252    a little better than the left or right Riemann sum, and should be
253    good enough for our purposes.)
254
255    For $q_\perp = 0$, the $u$ substitution is simpler:
256
257    .. math::
258
259        u_j = \left|q_j - q\right|
260
261    so
262
263    .. math::
264
265        W_{ij} = \frac{1}{2 \Delta q_\parallel} \Delta u_j
266            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
267            \ \text{if}\ q_j \in
268                \left[q-\Delta q_\parallel, q+\Delta q_\parallel\right]
269
270    However, we need to support cases were $u_j < 0$, which means using
271    $2 (q_{j+1} - q_j)$ when $q_j \in \left[0, q_\parallel-q_i\right]$.
272    This is not an issue for $q_i > q_\parallel$.
273
274    For both $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
275    integration with
276
277    .. math::
278
279        u_{jk} = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
280            \ \text{for}\ k = -L \ldots L
281
282    for $L$ = *n_height*.  This gives
283
284    .. math::
285
286        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
287            \sum_{k=-L}^L \Delta u_{jk}
288                \left(\frac{\Delta q_\parallel}{2 L + 1}\right)
289
290
291    """
292    #np.set_printoptions(precision=6, linewidth=10000)
293
294    # The current algorithm is a midpoint rectangle rule.
295    q_edges = bin_edges(q_calc) # Note: requires q > 0
296    q_edges[q_edges < 0.0] = 0.0 # clip edges below zero
297    weights = np.zeros((len(q), len(q_calc)), 'd')
298
299    #print(q_calc)
300    for i, (qi, w, h) in enumerate(zip(q, width, height)):
301        if w == 0. and h == 0.:
302            # Perfect resolution, so return the theory value directly.
303            # Note: assumes that q is a subset of q_calc.  If qi need not be
304            # in q_calc, then we can do a weighted interpolation by looking
305            # up qi in q_calc, then weighting the result by the relative
306            # distance to the neighbouring points.
307            weights[i, :] = (q_calc == qi)
308        elif h == 0:
309            weights[i, :] = _q_perp_weights(q_edges, qi, w)
310        elif w == 0:
311            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
312            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
313            #print(qi - h, qi + h)
314            #print(in_x + abs_x)
315            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
316        else:
317            for k in range(-n_height, n_height+1):
318                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/n_height, w)
319            weights[i, :] /= 2*n_height + 1
320
321    return weights.T
322
323
324def _q_perp_weights(q_edges, qi, w):
325    # Convert bin edges from q to u
326    u_limit = np.sqrt(qi**2 + w**2)
327    u_edges = q_edges**2 - qi**2
328    u_edges[q_edges < abs(qi)] = 0.
329    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
330    weights = np.diff(np.sqrt(u_edges))/w
331    #print("i, qi",i,qi,qi+width)
332    #print(q_calc)
333    #print(weights)
334    return weights
335
336
337def pinhole_extend_q(q, q_width, nsigma=3):
338    """
339    Given *q* and *q_width*, find a set of sampling points *q_calc* so
340    that each point $I(q)$ has sufficient support from the underlying
341    function.
342    """
343    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
344    return linear_extrapolation(q, q_min, q_max)
345
346
347def slit_extend_q(q, width, height):
348    """
349    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
350    that each point I(q) has sufficient support from the underlying
351    function.
352    """
353    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
354
355    return geometric_extrapolation(q, q_min, q_max)
356
357
358def bin_edges(x):
359    """
360    Determine bin edges from bin centers, assuming that edges are centered
361    between the bins.
362
363    Note: this uses the arithmetic mean, which may not be appropriate for
364    log-scaled data.
365    """
366    if len(x) < 2 or (np.diff(x) < 0).any():
367        raise ValueError("Expected bins to be an increasing set")
368    edges = np.hstack([
369        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
370        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
371        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
372        ])
373    return edges
374
375
376def interpolate(q, max_step):
377    """
378    Returns *q_calc* with points spaced at most max_step apart.
379    """
380    step = np.diff(q)
381    index = step > max_step
382    if np.any(index):
383        inserts = []
384        for q_i, step_i in zip(q[:-1][index], step[index]):
385            n = np.ceil(step_i/max_step)
386            inserts.extend(q_i + np.arange(1, n)*(step_i/n))
387        # Extend a couple of fringes beyond the end of the data
388        inserts.extend(q[-1] + np.arange(1, 8)*max_step)
389        q_calc = np.sort(np.hstack((q, inserts)))
390    else:
391        q_calc = q
392    return q_calc
393
394
395def linear_extrapolation(q, q_min, q_max):
396    """
397    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
398    a guide.  Extrapolation below uses about the same size as the first
399    interval.  Extrapolation above uses about the same size as the final
400    interval.
401
402    if *q_min* is zero or less then *q[0]/10* is used instead.
403    """
404    q = np.sort(q)
405    if q_min + 2*MINIMUM_RESOLUTION < q[0]:
406        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
407        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15
408        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
409    else:
410        q_low = []
411    if q_max - 2*MINIMUM_RESOLUTION > q[-1]:
412        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15
413        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
414    else:
415        q_high = []
416    return np.concatenate([q_low, q, q_high])
417
418
419def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
420    r"""
421    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
422    average geometric step size in *q* as the step size.
423
424    if *q_min* is zero or less then *q[0]/10* is used instead.
425
426    *points_per_decade* sets the ratio between consecutive steps such
427    that there will be $n$ points used for every factor of 10 increase
428    in *q*.
429
430    If *points_per_decade* is not given, it will be estimated as follows.
431    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
432    in $n$ points gives a geometric average of:
433
434    .. math::
435
436         \log \Delta q = (\log q_n - log q_1) / (n - 1)
437
438    From this we can compute the number of steps required to extend $q$
439    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
440
441    .. math::
442
443         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
444
445    Substituting:
446
447    .. math::
448
449         n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
450            / (\log q_n - log q_1)
451    """
452    q = np.sort(q)
453    if points_per_decade is None:
454        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
455    else:
456        log_delta_q = log(10.) / points_per_decade
457    if q_min < q[0]:
458        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
459        n_low = log_delta_q * (log(q[0])-log(q_min))
460        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
461    else:
462        q_low = []
463    if q_max > q[-1]:
464        n_high = log_delta_q * (log(q_max)-log(q[-1]))
465        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
466    else:
467        q_high = []
468    return np.concatenate([q_low, q, q_high])
469
470
471############################################################################
472# unit tests
473############################################################################
474import unittest
475
476
477def eval_form(q, form, pars):
478    """
479    Return the SAS model evaluated at *q*.
480
481    *form* is the SAS model returned from :fun:`core.load_model`.
482
483    *pars* are the parameter values to use when evaluating.
484    """
485    from sasmodels import direct_model
486    kernel = form.make_kernel([q])
487    theory = direct_model.call_kernel(kernel, pars)
488    kernel.release()
489    return theory
490
491
492def gaussian(q, q0, dq):
493    """
494    Return the Gaussian resolution function.
495
496    *q0* is the center, *dq* is the width and *q* are the points to evaluate.
497    """
498    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
499
500
501def romberg_slit_1d(q, width, height, form, pars):
502    """
503    Romberg integration for slit resolution.
504
505    This is an adaptive integration technique.  It is called with settings
506    that make it slow to evaluate but give it good accuracy.
507    """
508    from scipy.integrate import romberg  # type: ignore
509
510    par_set = set([p.name for p in form.info.parameters.call_parameters])
511    if any(k not in par_set for k in pars.keys()):
512        extra = set(pars.keys()) - par_set
513        raise ValueError("bad parameters: [%s] not in [%s]"
514                         % (", ".join(sorted(extra)),
515                            ", ".join(sorted(pars.keys()))))
516
517    if np.isscalar(width):
518        width = [width]*len(q)
519    if np.isscalar(height):
520        height = [height]*len(q)
521    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
522    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
523    # If both width and height are defined, then it is too slow to use dblquad.
524    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
525    # the extended Q range.
526    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
527    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
528    Iq = eval_form(q_calc, form, pars)
529    result = np.empty(len(q))
530    for i, (qi, w, h) in enumerate(zip(q, width, height)):
531        if h == 0.:
532            total = romberg(_int_w, 0, w, args=(qi,),
533                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
534            result[i] = total/w
535        elif w == 0.:
536            total = romberg(_int_h, -h, h, args=(qi,),
537                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
538            result[i] = total/(2*h)
539        else:
540            w_grid = np.linspace(0, w, 21)[None, :]
541            h_grid = np.linspace(-h, h, 23)[:, None]
542            u_sub = sqrt((qi+h_grid)**2 + w_grid**2)
543            f_at_u = np.interp(u_sub, q_calc, Iq)
544            #print(np.trapz(Iu, w_grid, axis=1))
545            total  = np.trapz(np.trapz(f_at_u, w_grid, axis=1), h_grid[:, 0])
546            result[i] = total / (2*h*w)
547            # from scipy.integrate import dblquad
548            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
549            #                  args=(qi,))
550            # result[i] = r/(w*2*h)
551
552    # r should be [float, ...], but it is [array([float]), array([float]),...]
553    return result
554
555
556def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
557    """
558    Romberg integration for pinhole resolution.
559
560    This is an adaptive integration technique.  It is called with settings
561    that make it slow to evaluate but give it good accuracy.
562    """
563    from scipy.integrate import romberg  # type: ignore
564
565    par_set = set([p.name for p in form.info.parameters.call_parameters])
566    if any(k not in par_set for k in pars.keys()):
567        extra = set(pars.keys()) - par_set
568        raise ValueError("bad parameters: [%s] not in [%s]"
569                         % (", ".join(sorted(extra)),
570                            ", ".join(sorted(pars.keys()))))
571
572    func = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
573    total = [romberg(func, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,
574                     args=(qi, dqi), divmax=100, vec_func=True,
575                     tol=0, rtol=1e-8)
576             for qi, dqi in zip(q, q_width)]
577    return np.asarray(total).flatten()
578
579
580class ResolutionTest(unittest.TestCase):
581    """
582    Test the resolution calculations.
583    """
584
585    def setUp(self):
586        self.x = 0.001*np.arange(1, 11)
587        self.y = self.Iq(self.x)
588
589    def Iq(self, q):
590        "Linear function for resolution unit test"
591        return 12.0 - 1000.0*q
592
593    def test_perfect(self):
594        """
595        Perfect resolution and no smearing.
596        """
597        resolution = Perfect1D(self.x)
598        theory = self.Iq(resolution.q_calc)
599        output = resolution.apply(theory)
600        np.testing.assert_equal(output, self.y)
601
602    def test_slit_zero(self):
603        """
604        Slit smearing with perfect resolution.
605        """
606        resolution = Slit1D(self.x, qx_width=0, qy_width=0, q_calc=self.x)
607        theory = self.Iq(resolution.q_calc)
608        output = resolution.apply(theory)
609        np.testing.assert_equal(output, self.y)
610
611    @unittest.skip("not yet supported")
612    def test_slit_high(self):
613        """
614        Slit smearing with height 0.005
615        """
616        resolution = Slit1D(self.x, qx_width=0, qy_width=0.005, q_calc=self.x)
617        theory = self.Iq(resolution.q_calc)
618        output = resolution.apply(theory)
619        answer = [
620            9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
621            5.5555, 4.5584, 3.5606, 2.5623, 2.0000,
622            ]
623        np.testing.assert_allclose(output, answer, atol=1e-4)
624
625    @unittest.skip("not yet supported")
626    def test_slit_both_high(self):
627        """
628        Slit smearing with width < 100*height.
629        """
630        q = np.logspace(-4, -1, 10)
631        resolution = Slit1D(q, qx_width=0.2, qy_width=np.inf)
632        theory = 1000*self.Iq(resolution.q_calc**4)
633        output = resolution.apply(theory)
634        answer = [
635            8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
636            5.40363, 4.40655, 3.40880, 2.41058, 2.00000,
637            ]
638        np.testing.assert_allclose(output, answer, atol=1e-4)
639
640    @unittest.skip("not yet supported")
641    def test_slit_wide(self):
642        """
643        Slit smearing with width 0.0002
644        """
645        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0, q_calc=self.x)
646        theory = self.Iq(resolution.q_calc)
647        output = resolution.apply(theory)
648        answer = [
649            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
650            ]
651        np.testing.assert_allclose(output, answer, atol=1e-4)
652
653    @unittest.skip("not yet supported")
654    def test_slit_both_wide(self):
655        """
656        Slit smearing with width > 100*height.
657        """
658        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0.000001,
659                            q_calc=self.x)
660        theory = self.Iq(resolution.q_calc)
661        output = resolution.apply(theory)
662        answer = [
663            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
664            ]
665        np.testing.assert_allclose(output, answer, atol=1e-4)
666
667    def test_pinhole_zero(self):
668        """
669        Pinhole smearing with perfect resolution
670        """
671        resolution = Pinhole1D(self.x, 0.0*self.x)
672        theory = self.Iq(resolution.q_calc)
673        output = resolution.apply(theory)
674        np.testing.assert_equal(output, self.y)
675
676    def test_pinhole(self):
677        """
678        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
679        """
680        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
681                               q_calc=self.x)
682        theory = 12.0-1000.0*resolution.q_calc
683        output = resolution.apply(theory)
684        answer = [
685            10.44785079, 9.84991299, 8.98101708,
686            7.99906585, 6.99998311, 6.00001689,
687            5.00093415, 4.01898292, 3.15008701, 2.55214921,
688            ]
689        np.testing.assert_allclose(output, answer, atol=1e-8)
690
691
692class IgorComparisonTest(unittest.TestCase):
693    """
694    Test resolution calculations against those returned by Igor.
695    """
696
697    def setUp(self):
698        self.pars = TEST_PARS_PINHOLE_SPHERE
699        from sasmodels import core
700        self.model = core.load_model("sphere", dtype='double')
701
702    def _eval_sphere(self, pars, resolution):
703        from sasmodels import direct_model
704        kernel = self.model.make_kernel([resolution.q_calc])
705        theory = direct_model.call_kernel(kernel, pars)
706        result = resolution.apply(theory)
707        kernel.release()
708        return result
709
710    def _compare(self, q, output, answer, tolerance):
711        #err = (output - answer)/answer
712        #idx = abs(err) >= tolerance
713        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
714        #print("\n".join(str(v) for v in problem))
715        np.testing.assert_allclose(output, answer, rtol=tolerance)
716
717    def test_perfect(self):
718        """
719        Compare sphere model with NIST Igor SANS, no resolution smearing.
720        """
721        pars = TEST_PARS_SLIT_SPHERE
722        data_string = TEST_DATA_SLIT_SPHERE
723
724        data = np.loadtxt(data_string.split('\n')).T
725        q, _, answer, _ = data
726        resolution = Perfect1D(q)
727        output = self._eval_sphere(pars, resolution)
728        self._compare(q, output, answer, 1e-6)
729
730    def test_pinhole(self):
731        """
732        Compare pinhole resolution smearing with NIST Igor SANS
733        """
734        pars = TEST_PARS_PINHOLE_SPHERE
735        data_string = TEST_DATA_PINHOLE_SPHERE
736
737        data = np.loadtxt(data_string.split('\n')).T
738        q, q_width, answer = data
739        resolution = Pinhole1D(q, q_width)
740        output = self._eval_sphere(pars, resolution)
741        # TODO: relative error should be lower
742        self._compare(q, output, answer, 3e-4)
743
744    def test_pinhole_romberg(self):
745        """
746        Compare pinhole resolution smearing with romberg integration result.
747        """
748        pars = TEST_PARS_PINHOLE_SPHERE
749        data_string = TEST_DATA_PINHOLE_SPHERE
750        pars['radius'] *= 5
751
752        data = np.loadtxt(data_string.split('\n')).T
753        q, q_width, answer = data
754        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
755        ## Getting 0.1% requires 5 sigma and 200 points per fringe
756        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
757        #                     2*np.pi/pars['radius']/200)
758        #tol = 0.001
759        ## The default 3 sigma and no extra points gets 1%
760        q_calc = None  # type: np.ndarray
761        tol = 0.01
762        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
763        output = self._eval_sphere(pars, resolution)
764        if 0: # debug plot
765            import matplotlib.pyplot as plt  # type: ignore
766            resolution = Perfect1D(q)
767            source = self._eval_sphere(pars, resolution)
768            plt.loglog(q, source, '.')
769            plt.loglog(q, answer, '-', hold=True)
770            plt.loglog(q, output, '-', hold=True)
771            plt.show()
772        self._compare(q, output, answer, tol)
773
774    def test_slit(self):
775        """
776        Compare slit resolution smearing with NIST Igor SANS
777        """
778        pars = TEST_PARS_SLIT_SPHERE
779        data_string = TEST_DATA_SLIT_SPHERE
780
781        data = np.loadtxt(data_string.split('\n')).T
782        q, delta_qv, _, answer = data
783        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0)
784        output = self._eval_sphere(pars, resolution)
785        # TODO: eliminate Igor test since it is too inaccurate to be useful.
786        # This means we can eliminate the test data as well, and instead
787        # use a generated q vector.
788        self._compare(q, output, answer, 0.5)
789
790    def test_slit_romberg(self):
791        """
792        Compare slit resolution smearing with romberg integration result.
793        """
794        pars = TEST_PARS_SLIT_SPHERE
795        data_string = TEST_DATA_SLIT_SPHERE
796
797        data = np.loadtxt(data_string.split('\n')).T
798        q, delta_qv, _, answer = data
799        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
800        q_calc = slit_extend_q(interpolate(q, 2*np.pi/pars['radius']/20),
801                               delta_qv[0], 0.)
802        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0, q_calc=q_calc)
803        output = self._eval_sphere(pars, resolution)
804        # TODO: relative error should be lower
805        self._compare(q, output, answer, 0.025)
806
807    def test_ellipsoid(self):
808        """
809        Compare romberg integration for ellipsoid model.
810        """
811        from .core import load_model
812        pars = {
813            'scale':0.05,
814            'radius_polar':500, 'radius_equatorial':15000,
815            'sld':6, 'sld_solvent': 1,
816            }
817        form = load_model('ellipsoid', dtype='double')
818        q = np.logspace(log10(4e-5), log10(2.5e-2), 68)
819        width, height = 0.117, 0.
820        resolution = Slit1D(q, qx_width=width, qy_width=height)
821        answer = romberg_slit_1d(q, width, height, form, pars)
822        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
823        # TODO: 10% is too much error; use better algorithm
824        #print(np.max(abs(answer-output)/answer))
825        self._compare(q, output, answer, 0.1)
826
827    #TODO: can sas q spacing be too sparse for the resolution calculation?
828    @unittest.skip("suppress sparse data test; not supported by current code")
829    def test_pinhole_sparse(self):
830        """
831        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
832        """
833        pars = TEST_PARS_PINHOLE_SPHERE
834        data_string = TEST_DATA_PINHOLE_SPHERE
835
836        data = np.loadtxt(data_string.split('\n')).T
837        q, q_width, answer = data[:, ::20] # Take every nth point
838        resolution = Pinhole1D(q, q_width)
839        output = self._eval_sphere(pars, resolution)
840        self._compare(q, output, answer, 1e-6)
841
842
843# pinhole sphere parameters
844TEST_PARS_PINHOLE_SPHERE = {
845    'scale': 1.0, 'background': 0.01,
846    'radius': 60.0, 'sld': 1, 'sld_solvent': 6.3,
847    }
848# Q, dQ, I(Q) calculated by NIST Igor SANS package
849TEST_DATA_PINHOLE_SPHERE = """\
8500.001278 0.0002847 2538.41176383
8510.001562 0.0002905 2536.91820405
8520.001846 0.0002956 2535.13182479
8530.002130 0.0003017 2533.06217813
8540.002414 0.0003087 2530.70378586
8550.002698 0.0003165 2528.05024192
8560.002982 0.0003249 2525.10408349
8570.003266 0.0003340 2521.86667499
8580.003550 0.0003437 2518.33907750
8590.003834 0.0003539 2514.52246995
8600.004118 0.0003646 2510.41798319
8610.004402 0.0003757 2506.02690988
8620.004686 0.0003872 2501.35067884
8630.004970 0.0003990 2496.38678318
8640.005253 0.0004112 2491.16237596
8650.005537 0.0004237 2485.63911673
8660.005821 0.0004365 2479.83657083
8670.006105 0.0004495 2473.75676948
8680.006389 0.0004628 2467.40145990
8690.006673 0.0004762 2460.77293372
8700.006957 0.0004899 2453.86724627
8710.007241 0.0005037 2446.69623838
8720.007525 0.0005177 2439.25775219
8730.007809 0.0005318 2431.55421398
8740.008093 0.0005461 2423.58785521
8750.008377 0.0005605 2415.36158137
8760.008661 0.0005750 2406.87009473
8770.008945 0.0005896 2398.12841186
8780.009229 0.0006044 2389.13360806
8790.009513 0.0006192 2379.88958042
8800.009797 0.0006341 2370.39776774
8810.010080 0.0006491 2360.69528793
8820.010360 0.0006641 2350.85169027
8830.010650 0.0006793 2340.42023633
8840.010930 0.0006945 2330.11206013
8850.011220 0.0007097 2319.20109972
8860.011500 0.0007251 2308.43503981
8870.011780 0.0007404 2297.44820179
8880.012070 0.0007558 2285.83853677
8890.012350 0.0007713 2274.41290746
8900.012640 0.0007868 2262.36219581
8910.012920 0.0008024 2250.51169731
8920.013200 0.0008180 2238.45596231
8930.013490 0.0008336 2225.76495666
8940.013770 0.0008493 2213.29618391
8950.014060 0.0008650 2200.19110751
8960.014340 0.0008807 2187.34050325
8970.014620 0.0008965 2174.30529864
8980.014910 0.0009123 2160.61632548
8990.015190 0.0009281 2147.21038112
9000.015470 0.0009440 2133.62023580
9010.015760 0.0009598 2119.37907426
9020.016040 0.0009757 2105.45234903
9030.016330 0.0009916 2090.86319102
9040.016610 0.0010080 2076.60576032
9050.016890 0.0010240 2062.19214565
9060.017180 0.0010390 2047.10550219
9070.017460 0.0010550 2032.38715621
9080.017740 0.0010710 2017.52560123
9090.018030 0.0010880 2001.99124318
9100.018310 0.0011040 1986.84662060
9110.018600 0.0011200 1971.03389745
9120.018880 0.0011360 1955.61395119
9130.019160 0.0011520 1940.08291563
9140.019450 0.0011680 1923.87672225
9150.019730 0.0011840 1908.10656374
9160.020020 0.0012000 1891.66297192
9170.020300 0.0012160 1875.66789021
9180.020580 0.0012320 1859.56357196
9190.020870 0.0012490 1842.79468290
9200.021150 0.0012650 1826.50064489
9210.021430 0.0012810 1810.11533702
9220.021720 0.0012970 1793.06840882
9230.022000 0.0013130 1776.51153580
9240.022280 0.0013290 1759.87201249
9250.022570 0.0013460 1742.57354412
9260.022850 0.0013620 1725.79397319
9270.023140 0.0013780 1708.35831550
9280.023420 0.0013940 1691.45256069
9290.023700 0.0014110 1674.48561783
9300.023990 0.0014270 1656.86525366
9310.024270 0.0014430 1639.79847285
9320.024550 0.0014590 1622.68887088
9330.024840 0.0014760 1604.96421100
9340.025120 0.0014920 1587.85768129
9350.025410 0.0015080 1569.99297335
9360.025690 0.0015240 1552.84580279
9370.025970 0.0015410 1535.54074115
9380.026260 0.0015570 1517.75249337
9390.026540 0.0015730 1500.40115023
9400.026820 0.0015900 1483.03632237
9410.027110 0.0016060 1465.05942429
9420.027390 0.0016220 1447.67682181
9430.027670 0.0016390 1430.46495191
9440.027960 0.0016550 1412.49232282
9450.028240 0.0016710 1395.13182318
9460.028520 0.0016880 1377.93439837
9470.028810 0.0017040 1359.99528971
9480.029090 0.0017200 1342.67274512
9490.029370 0.0017370 1325.55375609
950"""
951
952# Slit sphere parameters
953TEST_PARS_SLIT_SPHERE = {
954    'scale': 0.01, 'background': 0.01,
955    'radius': 60000, 'sld': 1, 'sld_solvent': 4,
956    }
957# Q dQ I(Q) I_smeared(Q)
958TEST_DATA_SLIT_SPHERE = """\
9592.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9602.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9612.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9623.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9633.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9643.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9654.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9665.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9675.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9686.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9696.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9707.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9717.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9728.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9738.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9749.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9751.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9761.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9771.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9781.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9791.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9801.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9811.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9821.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9831.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9841.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9852.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9862.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
9872.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
9882.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
9892.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
9902.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
9912.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
9922.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
9933.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
9943.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
9953.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
9963.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
9974.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
9984.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
9994.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
10004.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
10015.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
10025.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
10036.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
10046.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
10057.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
10067.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
10078.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
10088.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
10099.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
10109.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
10111.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
10121.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
10131.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
10141.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
10151.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
10161.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
10171.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
10181.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
10191.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
10201.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
10212.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
10222.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
10232.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
10242.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
10252.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
10262.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
1027"""
1028
1029def main():
1030    """
1031    Run tests given is sys.argv.
1032
1033    Returns 0 if success or 1 if any tests fail.
1034    """
1035    import sys
1036    import xmlrunner  # type: ignore
1037
1038    suite = unittest.TestSuite()
1039    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1040
1041    runner = xmlrunner.XMLTestRunner(output='logs')
1042    result = runner.run(suite)
1043    return 1 if result.failures or result.errors else 0
1044
1045
1046############################################################################
1047# usage demo
1048############################################################################
1049
1050def _eval_demo_1d(resolution, title):
1051    import sys
1052    from sasmodels import core
1053    from sasmodels import direct_model
1054    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1055
1056    if name == 'cylinder':
1057        pars = {'length':210, 'radius':500, 'background': 0}
1058    elif name == 'teubner_strey':
1059        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1060    elif name == 'sphere' or name == 'spherepy':
1061        pars = TEST_PARS_SLIT_SPHERE
1062    elif name == 'ellipsoid':
1063        pars = {
1064            'scale':0.05, 'background': 0,
1065            'r_polar':500, 'r_equatorial':15000,
1066            'sld':6, 'sld_solvent': 1,
1067            }
1068    else:
1069        pars = {}
1070    model_info = core.load_model_info(name)
1071    model = core.build_model(model_info)
1072
1073    kernel = model.make_kernel([resolution.q_calc])
1074    theory = direct_model.call_kernel(kernel, pars)
1075    Iq = resolution.apply(theory)
1076
1077    if isinstance(resolution, Slit1D):
1078        width, height = resolution.dqx, resolution.dqy
1079        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1080    else:
1081        dq = resolution.q_width
1082        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1083
1084    import matplotlib.pyplot as plt  # type: ignore
1085    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1086    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1087    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1088    plt.legend()
1089    plt.title(title)
1090    plt.xlabel("Q (1/Ang)")
1091    plt.ylabel("I(Q) (1/cm)")
1092
1093def demo_pinhole_1d():
1094    """
1095    Show example of pinhole smearing.
1096    """
1097    q = np.logspace(-4, np.log10(0.2), 400)
1098    q_width = 0.1*q
1099    resolution = Pinhole1D(q, q_width)
1100    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1101
1102def demo_slit_1d():
1103    """
1104    Show example of slit smearing.
1105    """
1106    q = np.logspace(-4, np.log10(0.2), 100)
1107    w = h = 0.
1108    #w = 0.000000277790
1109    w = 0.0277790
1110    #h = 0.00277790
1111    #h = 0.0277790
1112    resolution = Slit1D(q, w, h)
1113    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))
1114
1115def demo():
1116    """
1117    Run the resolution demos.
1118    """
1119    import matplotlib.pyplot as plt  # type: ignore
1120    plt.subplot(121)
1121    demo_pinhole_1d()
1122    #plt.yscale('linear')
1123    plt.subplot(122)
1124    demo_slit_1d()
1125    #plt.yscale('linear')
1126    plt.show()
1127
1128
1129if __name__ == "__main__":
1130    #demo()
1131    main()
Note: See TracBrowser for help on using the repository browser.