source: sasmodels/sasmodels/resolution.py @ 2ccb775

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 2ccb775 was 2ccb775, checked in by jhbakker, 7 years ago

Experimenting with Hankel transform optimizations

  • Property mode set to 100644
File size: 39.7 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7
8from scipy.special import erf  # type: ignore
9from numpy import sqrt, log, log10, exp, pi  # type: ignore
10import numpy as np  # type: ignore
11
12from sasmodels import sesans
13
14__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
15           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
16           "pinhole_extend_q", "slit_extend_q", "bin_edges",
17           "interpolate", "linear_extrapolation", "geometric_extrapolation",
18          ]
19
20MINIMUM_RESOLUTION = 1e-8
21
22
23# When extrapolating to -q, what is the minimum positive q relative to q_min
24# that we wish to calculate?
25MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
26
27class Resolution(object):
28    """
29    Abstract base class defining a 1D resolution function.
30
31    *q* is the set of q values at which the data is measured.
32
33    *q_calc* is the set of q values at which the theory needs to be evaluated.
34    This may extend and interpolate the q values.
35
36    *apply* is the method to call with I(q_calc) to compute the resolution
37    smeared theory I(q).
38    """
39    q = None  # type: np.ndarray
40    q_calc = None  # type: np.ndarray
41    def apply(self, theory):
42        """
43        Smear *theory* by the resolution function, returning *Iq*.
44        """
45        raise NotImplementedError("Subclass does not define the apply function")
46
47class Perfect1D(Resolution):
48    """
49    Resolution function to use when there is no actual resolution smearing
50    to be applied.  It has the same interface as the other resolution
51    functions, but returns the identity function.
52    """
53    def __init__(self, q):
54        self.q_calc = self.q = q
55
56    def apply(self, theory):
57        return theory
58
59
60class SESANS1D(Resolution):
61    def __init__(self, data, H0, H, q_calc):
62        self.q = data.x
63        self.H0 = H0
64        self.H = H
65        self.data=data
66        self.q_calc = q_calc
67    def apply(self, theory):
68        return sesans.hankeltrafo(self.H0, self.H, theory)
69
70"""
71    def __init__(self, data, q_calc):
72        self.q = data.x
73        self.data = data
74        self.q_calc = q_calc
75
76    def apply(self, theory):
77        return sesans.transform(self.data, self.q_calc, theory, None, None)
78"""
79class Pinhole1D(Resolution):
80    r"""
81    Pinhole aperture with q-dependent gaussian resolution.
82
83    *q* points at which the data is measured.
84
85    *q_width* gaussian 1-sigma resolution at each data point.
86
87    *q_calc* is the list of points to calculate, or None if this should
88    be estimated from the *q* and *q_width*.
89    """
90    def __init__(self, q, q_width, q_calc=None, nsigma=3):
91        #*min_step* is the minimum point spacing to use when computing the
92        #underlying model.  It should be on the order of
93        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
94        #are computed with sufficient density to avoid aliasing effects.
95
96        # Protect against calls with q_width=0.  The extend_q function will
97        # not extend the q if q_width is 0, but q_width must be non-zero when
98        # constructing the weight matrix to avoid division by zero errors.
99        # In practice this should never be needed, since resolution should
100        # default to Perfect1D if the pinhole geometry is not defined.
101        self.q, self.q_width = q, q_width
102        self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma)
103                       if q_calc is None else np.sort(q_calc))
104        self.weight_matrix = pinhole_resolution(self.q_calc, self.q,
105                                np.maximum(q_width, MINIMUM_RESOLUTION))
106
107    def apply(self, theory):
108        return apply_resolution_matrix(self.weight_matrix, theory)
109
110
111class Slit1D(Resolution):
112    """
113    Slit aperture with resolution function.
114
115    *q* points at which the data is measured.
116
117    *dqx* slit width in qx
118
119    *dqy* slit height in qy
120
121    *q_calc* is the list of points to calculate, or None if this should
122    be estimated from the *q* and *q_width*.
123
124    The *weight_matrix* is computed by :func:`slit1d_resolution`
125    """
126    def __init__(self, q, qx_width, qy_width=0., q_calc=None):
127        # Remember what width/dqy was used even though we won't need them
128        # after the weight matrix is constructed
129        self.qx_width, self.qy_width = qx_width, qy_width
130
131        # Allow independent resolution on each point even though it is not
132        # needed in practice.
133        if np.isscalar(qx_width):
134            qx_width = np.ones(len(q))*qx_width
135        else:
136            qx_width = np.asarray(qx_width)
137        if np.isscalar(qy_width):
138            qy_width = np.ones(len(q))*qy_width
139        else:
140            qy_width = np.asarray(qy_width)
141
142        self.q = q.flatten()
143        self.q_calc = slit_extend_q(q, qx_width, qy_width) \
144            if q_calc is None else np.sort(q_calc)
145        self.weight_matrix = \
146            slit_resolution(self.q_calc, self.q, qx_width, qy_width)
147
148    def apply(self, theory):
149        return apply_resolution_matrix(self.weight_matrix, theory)
150
151
152def apply_resolution_matrix(weight_matrix, theory):
153    """
154    Apply the resolution weight matrix to the computed theory function.
155    """
156    #print("apply shapes", theory.shape, weight_matrix.shape)
157    Iq = np.dot(theory[None, :], weight_matrix)
158    #print("result shape",Iq.shape)
159    return Iq.flatten()
160
161
162def pinhole_resolution(q_calc, q, q_width):
163    """
164    Compute the convolution matrix *W* for pinhole resolution 1-D data.
165
166    Each row *W[i]* determines the normalized weight that the corresponding
167    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
168    *W*, the resolution smearing can be computed using *dot(W,q)*.
169
170    *q_calc* must be increasing.  *q_width* must be greater than zero.
171    """
172    # The current algorithm is a midpoint rectangle rule.  In the test case,
173    # neither trapezoid nor Simpson's rule improved the accuracy.
174    edges = bin_edges(q_calc)
175    edges[edges < 0.0] = 0.0 # clip edges below zero
176    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])
177    weights = cdf[1:] - cdf[:-1]
178    weights /= np.sum(weights, axis=0)[None, :]
179    return weights
180
181
182def slit_resolution(q_calc, q, width, height, n_height=30):
183    r"""
184    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
185    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
186    is the number of steps to use in the integration over $q_\parallel$
187    when both $q_\perp$ and $q_\parallel$ are non-zero.
188
189    Each $q$ can have an independent width and height value even though
190    current instruments use the same slit setting for all measured points.
191
192    If slit height is large relative to width, use:
193
194    .. math::
195
196        I_s(q_i) = \frac{1}{\Delta q_\perp}
197            \int_0^{\Delta q_\perp}
198                I\left(\sqrt{q_i^2 + q_\perp^2}\right) \,dq_\perp
199
200    If slit width is large relative to height, use:
201
202    .. math::
203
204        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
205            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
206                I\left(|q_i + q_\parallel|\right) \,dq_\parallel
207
208    For a mixture of slit width and height use:
209
210    .. math::
211
212        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
213            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
214            \int_0^{\Delta q_\perp}
215                I\left(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2}\right)
216                \,dq_\perp dq_\parallel
217
218    **Definition**
219
220    We are using the mid-point integration rule to assign weights to each
221    element of a weight matrix $W$ so that
222
223    .. math::
224
225        I_s(q) = W\,I(q_\text{calc})
226
227    If *q_calc* is at the mid-point, we can infer the bin edges from the
228    pairwise averages of *q_calc*, adding the missing edges before
229    *q_calc[0]* and after *q_calc[-1]*.
230
231    For $q_\parallel = 0$, the smeared value can be computed numerically
232    using the $u$ substitution
233
234    .. math::
235
236        u_j = \sqrt{q_j^2 - q^2}
237
238    This gives
239
240    .. math::
241
242        I_s(q) \approx \sum_j I(u_j) \Delta u_j
243
244    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
245    difference between consecutive edges which have been first converted
246    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
247    to $q_j \in \left[q, \sqrt{q^2 + \Delta q_\perp}\right]$, so
248
249    .. math::
250
251        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
252               = \frac{1}{\Delta q_\perp} \left(
253                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2} \right)
254            \ \text{if}\  q_j \in \left[q_i, \sqrt{q_i^2 + q_\perp^2}\right]
255
256    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
257    mid-points between the calculated values in *q_calc*.  We tweak the
258    edges of the initial and final intervals so that they lie on integration
259    limits.
260
261    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
262    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
263    but it is at least in the interval, so the approximation is going to be
264    a little better than the left or right Riemann sum, and should be
265    good enough for our purposes.)
266
267    For $q_\perp = 0$, the $u$ substitution is simpler:
268
269    .. math::
270
271        u_j = \left|q_j - q\right|
272
273    so
274
275    .. math::
276
277        W_{ij} = \frac{1}{2 \Delta q_\parallel} \Delta u_j
278            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
279            \ \text{if}\ q_j \in
280                \left[q-\Delta q_\parallel, q+\Delta q_\parallel\right]
281
282    However, we need to support cases were $u_j < 0$, which means using
283    $2 (q_{j+1} - q_j)$ when $q_j \in \left[0, q_\parallel-q_i\right]$.
284    This is not an issue for $q_i > q_\parallel$.
285
286    For both $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
287    integration with
288
289    .. math::
290
291        u_{jk} = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
292            \ \text{for}\ k = -L \ldots L
293
294    for $L$ = *n_height*.  This gives
295
296    .. math::
297
298        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
299            \sum_{k=-L}^L \Delta u_{jk}
300                \left(\frac{\Delta q_\parallel}{2 L + 1}\right)
301
302
303    """
304    #np.set_printoptions(precision=6, linewidth=10000)
305
306    # The current algorithm is a midpoint rectangle rule.
307    q_edges = bin_edges(q_calc) # Note: requires q > 0
308    q_edges[q_edges < 0.0] = 0.0 # clip edges below zero
309    weights = np.zeros((len(q), len(q_calc)), 'd')
310
311    #print(q_calc)
312    for i, (qi, w, h) in enumerate(zip(q, width, height)):
313        if w == 0. and h == 0.:
314            # Perfect resolution, so return the theory value directly.
315            # Note: assumes that q is a subset of q_calc.  If qi need not be
316            # in q_calc, then we can do a weighted interpolation by looking
317            # up qi in q_calc, then weighting the result by the relative
318            # distance to the neighbouring points.
319            weights[i, :] = (q_calc == qi)
320        elif h == 0:
321            weights[i, :] = _q_perp_weights(q_edges, qi, w)
322        elif w == 0:
323            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
324            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
325            #print(qi - h, qi + h)
326            #print(in_x + abs_x)
327            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
328        else:
329            for k in range(-n_height, n_height+1):
330                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/n_height, w)
331            weights[i, :] /= 2*n_height + 1
332
333    return weights.T
334
335
336def _q_perp_weights(q_edges, qi, w):
337    # Convert bin edges from q to u
338    u_limit = np.sqrt(qi**2 + w**2)
339    u_edges = q_edges**2 - qi**2
340    u_edges[q_edges < abs(qi)] = 0.
341    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
342    weights = np.diff(np.sqrt(u_edges))/w
343    #print("i, qi",i,qi,qi+width)
344    #print(q_calc)
345    #print(weights)
346    return weights
347
348
349def pinhole_extend_q(q, q_width, nsigma=3):
350    """
351    Given *q* and *q_width*, find a set of sampling points *q_calc* so
352    that each point $I(q)$ has sufficient support from the underlying
353    function.
354    """
355    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
356    return linear_extrapolation(q, q_min, q_max)
357
358
359def slit_extend_q(q, width, height):
360    """
361    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
362    that each point I(q) has sufficient support from the underlying
363    function.
364    """
365    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
366
367    return geometric_extrapolation(q, q_min, q_max)
368
369
370def bin_edges(x):
371    """
372    Determine bin edges from bin centers, assuming that edges are centered
373    between the bins.
374
375    Note: this uses the arithmetic mean, which may not be appropriate for
376    log-scaled data.
377    """
378    if len(x) < 2 or (np.diff(x) < 0).any():
379        raise ValueError("Expected bins to be an increasing set")
380    edges = np.hstack([
381        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
382        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
383        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
384        ])
385    return edges
386
387
388def interpolate(q, max_step):
389    """
390    Returns *q_calc* with points spaced at most max_step apart.
391    """
392    step = np.diff(q)
393    index = step > max_step
394    if np.any(index):
395        inserts = []
396        for q_i, step_i in zip(q[:-1][index], step[index]):
397            n = np.ceil(step_i/max_step)
398            inserts.extend(q_i + np.arange(1, n)*(step_i/n))
399        # Extend a couple of fringes beyond the end of the data
400        inserts.extend(q[-1] + np.arange(1, 8)*max_step)
401        q_calc = np.sort(np.hstack((q, inserts)))
402    else:
403        q_calc = q
404    return q_calc
405
406
407def linear_extrapolation(q, q_min, q_max):
408    """
409    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
410    a guide.  Extrapolation below uses about the same size as the first
411    interval.  Extrapolation above uses about the same size as the final
412    interval.
413
414    if *q_min* is zero or less then *q[0]/10* is used instead.
415    """
416    q = np.sort(q)
417    if q_min + 2*MINIMUM_RESOLUTION < q[0]:
418        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
419        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15
420        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
421    else:
422        q_low = []
423    if q_max - 2*MINIMUM_RESOLUTION > q[-1]:
424        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15
425        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
426    else:
427        q_high = []
428    return np.concatenate([q_low, q, q_high])
429
430
431def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
432    r"""
433    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
434    average geometric step size in *q* as the step size.
435
436    if *q_min* is zero or less then *q[0]/10* is used instead.
437
438    *points_per_decade* sets the ratio between consecutive steps such
439    that there will be $n$ points used for every factor of 10 increase
440    in *q*.
441
442    If *points_per_decade* is not given, it will be estimated as follows.
443    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
444    in $n$ points gives a geometric average of:
445
446    .. math::
447
448         \log \Delta q = (\log q_n - log q_1) / (n - 1)
449
450    From this we can compute the number of steps required to extend $q$
451    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
452
453    .. math::
454
455         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
456
457    Substituting:
458
459    .. math::
460
461         n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
462            / (\log q_n - log q_1)
463    """
464    q = np.sort(q)
465    if points_per_decade is None:
466        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
467    else:
468        log_delta_q = log(10.) / points_per_decade
469    if q_min < q[0]:
470        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
471        n_low = log_delta_q * (log(q[0])-log(q_min))
472        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
473    else:
474        q_low = []
475    if q_max > q[-1]:
476        n_high = log_delta_q * (log(q_max)-log(q[-1]))
477        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
478    else:
479        q_high = []
480    return np.concatenate([q_low, q, q_high])
481
482
483############################################################################
484# unit tests
485############################################################################
486import unittest
487
488
489def eval_form(q, form, pars):
490    """
491    Return the SAS model evaluated at *q*.
492
493    *form* is the SAS model returned from :fun:`core.load_model`.
494
495    *pars* are the parameter values to use when evaluating.
496    """
497    from sasmodels import direct_model
498    kernel = form.make_kernel([q])
499    theory = direct_model.call_kernel(kernel, pars)
500    kernel.release()
501    return theory
502
503
504def gaussian(q, q0, dq):
505    """
506    Return the Gaussian resolution function.
507
508    *q0* is the center, *dq* is the width and *q* are the points to evaluate.
509    """
510    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
511
512
513def romberg_slit_1d(q, width, height, form, pars):
514    """
515    Romberg integration for slit resolution.
516
517    This is an adaptive integration technique.  It is called with settings
518    that make it slow to evaluate but give it good accuracy.
519    """
520    from scipy.integrate import romberg  # type: ignore
521
522    par_set = set([p.name for p in form.info.parameters.call_parameters])
523    if any(k not in par_set for k in pars.keys()):
524        extra = set(pars.keys()) - par_set
525        raise ValueError("bad parameters: [%s] not in [%s]"
526                         % (", ".join(sorted(extra)),
527                            ", ".join(sorted(pars.keys()))))
528
529    if np.isscalar(width):
530        width = [width]*len(q)
531    if np.isscalar(height):
532        height = [height]*len(q)
533    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
534    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
535    # If both width and height are defined, then it is too slow to use dblquad.
536    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
537    # the extended Q range.
538    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
539    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
540    Iq = eval_form(q_calc, form, pars)
541    result = np.empty(len(q))
542    for i, (qi, w, h) in enumerate(zip(q, width, height)):
543        if h == 0.:
544            total = romberg(_int_w, 0, w, args=(qi,),
545                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
546            result[i] = total/w
547        elif w == 0.:
548            total = romberg(_int_h, -h, h, args=(qi,),
549                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
550            result[i] = total/(2*h)
551        else:
552            w_grid = np.linspace(0, w, 21)[None, :]
553            h_grid = np.linspace(-h, h, 23)[:, None]
554            u_sub = sqrt((qi+h_grid)**2 + w_grid**2)
555            f_at_u = np.interp(u_sub, q_calc, Iq)
556            #print(np.trapz(Iu, w_grid, axis=1))
557            total  = np.trapz(np.trapz(f_at_u, w_grid, axis=1), h_grid[:, 0])
558            result[i] = total / (2*h*w)
559            # from scipy.integrate import dblquad
560            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
561            #                  args=(qi,))
562            # result[i] = r/(w*2*h)
563
564    # r should be [float, ...], but it is [array([float]), array([float]),...]
565    return result
566
567
568def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
569    """
570    Romberg integration for pinhole resolution.
571
572    This is an adaptive integration technique.  It is called with settings
573    that make it slow to evaluate but give it good accuracy.
574    """
575    from scipy.integrate import romberg  # type: ignore
576
577    par_set = set([p.name for p in form.info.parameters.call_parameters])
578    if any(k not in par_set for k in pars.keys()):
579        extra = set(pars.keys()) - par_set
580        raise ValueError("bad parameters: [%s] not in [%s]"
581                         % (", ".join(sorted(extra)),
582                            ", ".join(sorted(pars.keys()))))
583
584    func = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
585    total = [romberg(func, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,
586                     args=(qi, dqi), divmax=100, vec_func=True,
587                     tol=0, rtol=1e-8)
588             for qi, dqi in zip(q, q_width)]
589    return np.asarray(total).flatten()
590
591
592class ResolutionTest(unittest.TestCase):
593    """
594    Test the resolution calculations.
595    """
596
597    def setUp(self):
598        self.x = 0.001*np.arange(1, 11)
599        self.y = self.Iq(self.x)
600
601    def Iq(self, q):
602        "Linear function for resolution unit test"
603        return 12.0 - 1000.0*q
604
605    def test_perfect(self):
606        """
607        Perfect resolution and no smearing.
608        """
609        resolution = Perfect1D(self.x)
610        theory = self.Iq(resolution.q_calc)
611        output = resolution.apply(theory)
612        np.testing.assert_equal(output, self.y)
613
614    def test_slit_zero(self):
615        """
616        Slit smearing with perfect resolution.
617        """
618        resolution = Slit1D(self.x, qx_width=0, qy_width=0, q_calc=self.x)
619        theory = self.Iq(resolution.q_calc)
620        output = resolution.apply(theory)
621        np.testing.assert_equal(output, self.y)
622
623    @unittest.skip("not yet supported")
624    def test_slit_high(self):
625        """
626        Slit smearing with height 0.005
627        """
628        resolution = Slit1D(self.x, qx_width=0, qy_width=0.005, q_calc=self.x)
629        theory = self.Iq(resolution.q_calc)
630        output = resolution.apply(theory)
631        answer = [
632            9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
633            5.5555, 4.5584, 3.5606, 2.5623, 2.0000,
634            ]
635        np.testing.assert_allclose(output, answer, atol=1e-4)
636
637    @unittest.skip("not yet supported")
638    def test_slit_both_high(self):
639        """
640        Slit smearing with width < 100*height.
641        """
642        q = np.logspace(-4, -1, 10)
643        resolution = Slit1D(q, qx_width=0.2, qy_width=np.inf)
644        theory = 1000*self.Iq(resolution.q_calc**4)
645        output = resolution.apply(theory)
646        answer = [
647            8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
648            5.40363, 4.40655, 3.40880, 2.41058, 2.00000,
649            ]
650        np.testing.assert_allclose(output, answer, atol=1e-4)
651
652    @unittest.skip("not yet supported")
653    def test_slit_wide(self):
654        """
655        Slit smearing with width 0.0002
656        """
657        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0, q_calc=self.x)
658        theory = self.Iq(resolution.q_calc)
659        output = resolution.apply(theory)
660        answer = [
661            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
662            ]
663        np.testing.assert_allclose(output, answer, atol=1e-4)
664
665    @unittest.skip("not yet supported")
666    def test_slit_both_wide(self):
667        """
668        Slit smearing with width > 100*height.
669        """
670        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0.000001,
671                            q_calc=self.x)
672        theory = self.Iq(resolution.q_calc)
673        output = resolution.apply(theory)
674        answer = [
675            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
676            ]
677        np.testing.assert_allclose(output, answer, atol=1e-4)
678
679    def test_pinhole_zero(self):
680        """
681        Pinhole smearing with perfect resolution
682        """
683        resolution = Pinhole1D(self.x, 0.0*self.x)
684        theory = self.Iq(resolution.q_calc)
685        output = resolution.apply(theory)
686        np.testing.assert_equal(output, self.y)
687
688    def test_pinhole(self):
689        """
690        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
691        """
692        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
693                               q_calc=self.x)
694        theory = 12.0-1000.0*resolution.q_calc
695        output = resolution.apply(theory)
696        answer = [
697            10.44785079, 9.84991299, 8.98101708,
698            7.99906585, 6.99998311, 6.00001689,
699            5.00093415, 4.01898292, 3.15008701, 2.55214921,
700            ]
701        np.testing.assert_allclose(output, answer, atol=1e-8)
702
703
704class IgorComparisonTest(unittest.TestCase):
705    """
706    Test resolution calculations against those returned by Igor.
707    """
708
709    def setUp(self):
710        self.pars = TEST_PARS_PINHOLE_SPHERE
711        from sasmodels import core
712        self.model = core.load_model("sphere", dtype='double')
713
714    def _eval_sphere(self, pars, resolution):
715        from sasmodels import direct_model
716        kernel = self.model.make_kernel([resolution.q_calc])
717        theory = direct_model.call_kernel(kernel, pars)
718        result = resolution.apply(theory)
719        kernel.release()
720        return result
721
722    def _compare(self, q, output, answer, tolerance):
723        #err = (output - answer)/answer
724        #idx = abs(err) >= tolerance
725        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
726        #print("\n".join(str(v) for v in problem))
727        np.testing.assert_allclose(output, answer, rtol=tolerance)
728
729    def test_perfect(self):
730        """
731        Compare sphere model with NIST Igor SANS, no resolution smearing.
732        """
733        pars = TEST_PARS_SLIT_SPHERE
734        data_string = TEST_DATA_SLIT_SPHERE
735
736        data = np.loadtxt(data_string.split('\n')).T
737        q, _, answer, _ = data
738        resolution = Perfect1D(q)
739        output = self._eval_sphere(pars, resolution)
740        self._compare(q, output, answer, 1e-6)
741
742    def test_pinhole(self):
743        """
744        Compare pinhole resolution smearing with NIST Igor SANS
745        """
746        pars = TEST_PARS_PINHOLE_SPHERE
747        data_string = TEST_DATA_PINHOLE_SPHERE
748
749        data = np.loadtxt(data_string.split('\n')).T
750        q, q_width, answer = data
751        resolution = Pinhole1D(q, q_width)
752        output = self._eval_sphere(pars, resolution)
753        # TODO: relative error should be lower
754        self._compare(q, output, answer, 3e-4)
755
756    def test_pinhole_romberg(self):
757        """
758        Compare pinhole resolution smearing with romberg integration result.
759        """
760        pars = TEST_PARS_PINHOLE_SPHERE
761        data_string = TEST_DATA_PINHOLE_SPHERE
762        pars['radius'] *= 5
763
764        data = np.loadtxt(data_string.split('\n')).T
765        q, q_width, answer = data
766        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
767        ## Getting 0.1% requires 5 sigma and 200 points per fringe
768        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
769        #                     2*np.pi/pars['radius']/200)
770        #tol = 0.001
771        ## The default 3 sigma and no extra points gets 1%
772        q_calc = None  # type: np.ndarray
773        tol = 0.01
774        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
775        output = self._eval_sphere(pars, resolution)
776        if 0: # debug plot
777            import matplotlib.pyplot as plt  # type: ignore
778            resolution = Perfect1D(q)
779            source = self._eval_sphere(pars, resolution)
780            plt.loglog(q, source, '.')
781            plt.loglog(q, answer, '-', hold=True)
782            plt.loglog(q, output, '-', hold=True)
783            plt.show()
784        self._compare(q, output, answer, tol)
785
786    def test_slit(self):
787        """
788        Compare slit resolution smearing with NIST Igor SANS
789        """
790        pars = TEST_PARS_SLIT_SPHERE
791        data_string = TEST_DATA_SLIT_SPHERE
792
793        data = np.loadtxt(data_string.split('\n')).T
794        q, delta_qv, _, answer = data
795        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0)
796        output = self._eval_sphere(pars, resolution)
797        # TODO: eliminate Igor test since it is too inaccurate to be useful.
798        # This means we can eliminate the test data as well, and instead
799        # use a generated q vector.
800        self._compare(q, output, answer, 0.5)
801
802    def test_slit_romberg(self):
803        """
804        Compare slit resolution smearing with romberg integration result.
805        """
806        pars = TEST_PARS_SLIT_SPHERE
807        data_string = TEST_DATA_SLIT_SPHERE
808
809        data = np.loadtxt(data_string.split('\n')).T
810        q, delta_qv, _, answer = data
811        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
812        q_calc = slit_extend_q(interpolate(q, 2*np.pi/pars['radius']/20),
813                               delta_qv[0], 0.)
814        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0, q_calc=q_calc)
815        output = self._eval_sphere(pars, resolution)
816        # TODO: relative error should be lower
817        self._compare(q, output, answer, 0.025)
818
819    def test_ellipsoid(self):
820        """
821        Compare romberg integration for ellipsoid model.
822        """
823        from .core import load_model
824        pars = {
825            'scale':0.05,
826            'radius_polar':500, 'radius_equatorial':15000,
827            'sld':6, 'sld_solvent': 1,
828            }
829        form = load_model('ellipsoid', dtype='double')
830        q = np.logspace(log10(4e-5), log10(2.5e-2), 68)
831        width, height = 0.117, 0.
832        resolution = Slit1D(q, qx_width=width, qy_width=height)
833        answer = romberg_slit_1d(q, width, height, form, pars)
834        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
835        # TODO: 10% is too much error; use better algorithm
836        #print(np.max(abs(answer-output)/answer))
837        self._compare(q, output, answer, 0.1)
838
839    #TODO: can sas q spacing be too sparse for the resolution calculation?
840    @unittest.skip("suppress sparse data test; not supported by current code")
841    def test_pinhole_sparse(self):
842        """
843        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
844        """
845        pars = TEST_PARS_PINHOLE_SPHERE
846        data_string = TEST_DATA_PINHOLE_SPHERE
847
848        data = np.loadtxt(data_string.split('\n')).T
849        q, q_width, answer = data[:, ::20] # Take every nth point
850        resolution = Pinhole1D(q, q_width)
851        output = self._eval_sphere(pars, resolution)
852        self._compare(q, output, answer, 1e-6)
853
854
855# pinhole sphere parameters
856TEST_PARS_PINHOLE_SPHERE = {
857    'scale': 1.0, 'background': 0.01,
858    'radius': 60.0, 'sld': 1, 'sld_solvent': 6.3,
859    }
860# Q, dQ, I(Q) calculated by NIST Igor SANS package
861TEST_DATA_PINHOLE_SPHERE = """\
8620.001278 0.0002847 2538.41176383
8630.001562 0.0002905 2536.91820405
8640.001846 0.0002956 2535.13182479
8650.002130 0.0003017 2533.06217813
8660.002414 0.0003087 2530.70378586
8670.002698 0.0003165 2528.05024192
8680.002982 0.0003249 2525.10408349
8690.003266 0.0003340 2521.86667499
8700.003550 0.0003437 2518.33907750
8710.003834 0.0003539 2514.52246995
8720.004118 0.0003646 2510.41798319
8730.004402 0.0003757 2506.02690988
8740.004686 0.0003872 2501.35067884
8750.004970 0.0003990 2496.38678318
8760.005253 0.0004112 2491.16237596
8770.005537 0.0004237 2485.63911673
8780.005821 0.0004365 2479.83657083
8790.006105 0.0004495 2473.75676948
8800.006389 0.0004628 2467.40145990
8810.006673 0.0004762 2460.77293372
8820.006957 0.0004899 2453.86724627
8830.007241 0.0005037 2446.69623838
8840.007525 0.0005177 2439.25775219
8850.007809 0.0005318 2431.55421398
8860.008093 0.0005461 2423.58785521
8870.008377 0.0005605 2415.36158137
8880.008661 0.0005750 2406.87009473
8890.008945 0.0005896 2398.12841186
8900.009229 0.0006044 2389.13360806
8910.009513 0.0006192 2379.88958042
8920.009797 0.0006341 2370.39776774
8930.010080 0.0006491 2360.69528793
8940.010360 0.0006641 2350.85169027
8950.010650 0.0006793 2340.42023633
8960.010930 0.0006945 2330.11206013
8970.011220 0.0007097 2319.20109972
8980.011500 0.0007251 2308.43503981
8990.011780 0.0007404 2297.44820179
9000.012070 0.0007558 2285.83853677
9010.012350 0.0007713 2274.41290746
9020.012640 0.0007868 2262.36219581
9030.012920 0.0008024 2250.51169731
9040.013200 0.0008180 2238.45596231
9050.013490 0.0008336 2225.76495666
9060.013770 0.0008493 2213.29618391
9070.014060 0.0008650 2200.19110751
9080.014340 0.0008807 2187.34050325
9090.014620 0.0008965 2174.30529864
9100.014910 0.0009123 2160.61632548
9110.015190 0.0009281 2147.21038112
9120.015470 0.0009440 2133.62023580
9130.015760 0.0009598 2119.37907426
9140.016040 0.0009757 2105.45234903
9150.016330 0.0009916 2090.86319102
9160.016610 0.0010080 2076.60576032
9170.016890 0.0010240 2062.19214565
9180.017180 0.0010390 2047.10550219
9190.017460 0.0010550 2032.38715621
9200.017740 0.0010710 2017.52560123
9210.018030 0.0010880 2001.99124318
9220.018310 0.0011040 1986.84662060
9230.018600 0.0011200 1971.03389745
9240.018880 0.0011360 1955.61395119
9250.019160 0.0011520 1940.08291563
9260.019450 0.0011680 1923.87672225
9270.019730 0.0011840 1908.10656374
9280.020020 0.0012000 1891.66297192
9290.020300 0.0012160 1875.66789021
9300.020580 0.0012320 1859.56357196
9310.020870 0.0012490 1842.79468290
9320.021150 0.0012650 1826.50064489
9330.021430 0.0012810 1810.11533702
9340.021720 0.0012970 1793.06840882
9350.022000 0.0013130 1776.51153580
9360.022280 0.0013290 1759.87201249
9370.022570 0.0013460 1742.57354412
9380.022850 0.0013620 1725.79397319
9390.023140 0.0013780 1708.35831550
9400.023420 0.0013940 1691.45256069
9410.023700 0.0014110 1674.48561783
9420.023990 0.0014270 1656.86525366
9430.024270 0.0014430 1639.79847285
9440.024550 0.0014590 1622.68887088
9450.024840 0.0014760 1604.96421100
9460.025120 0.0014920 1587.85768129
9470.025410 0.0015080 1569.99297335
9480.025690 0.0015240 1552.84580279
9490.025970 0.0015410 1535.54074115
9500.026260 0.0015570 1517.75249337
9510.026540 0.0015730 1500.40115023
9520.026820 0.0015900 1483.03632237
9530.027110 0.0016060 1465.05942429
9540.027390 0.0016220 1447.67682181
9550.027670 0.0016390 1430.46495191
9560.027960 0.0016550 1412.49232282
9570.028240 0.0016710 1395.13182318
9580.028520 0.0016880 1377.93439837
9590.028810 0.0017040 1359.99528971
9600.029090 0.0017200 1342.67274512
9610.029370 0.0017370 1325.55375609
962"""
963
964# Slit sphere parameters
965TEST_PARS_SLIT_SPHERE = {
966    'scale': 0.01, 'background': 0.01,
967    'radius': 60000, 'sld': 1, 'sld_solvent': 4,
968    }
969# Q dQ I(Q) I_smeared(Q)
970TEST_DATA_SLIT_SPHERE = """\
9712.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9722.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9732.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9743.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9753.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9763.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9774.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9785.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9795.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9806.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9816.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9827.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9837.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9848.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9858.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9869.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9871.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9881.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9891.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9901.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9911.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9921.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9931.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9941.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9951.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9961.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9972.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9982.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
9992.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
10002.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
10012.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
10022.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
10032.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
10042.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
10053.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
10063.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
10073.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
10083.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
10094.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
10104.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
10114.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
10124.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
10135.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
10145.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
10156.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
10166.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
10177.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
10187.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
10198.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
10208.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
10219.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
10229.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
10231.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
10241.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
10251.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
10261.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
10271.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
10281.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
10291.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
10301.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
10311.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
10321.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
10332.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
10342.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
10352.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
10362.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
10372.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
10382.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
1039"""
1040
1041def main():
1042    """
1043    Run tests given is sys.argv.
1044
1045    Returns 0 if success or 1 if any tests fail.
1046    """
1047    import sys
1048    import xmlrunner  # type: ignore
1049
1050    suite = unittest.TestSuite()
1051    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1052
1053    runner = xmlrunner.XMLTestRunner(output='logs')
1054    result = runner.run(suite)
1055    return 1 if result.failures or result.errors else 0
1056
1057
1058############################################################################
1059# usage demo
1060############################################################################
1061
1062def _eval_demo_1d(resolution, title):
1063    import sys
1064    from sasmodels import core
1065    from sasmodels import direct_model
1066    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1067
1068    if name == 'cylinder':
1069        pars = {'length':210, 'radius':500, 'background': 0}
1070    elif name == 'teubner_strey':
1071        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1072    elif name == 'sphere' or name == 'spherepy':
1073        pars = TEST_PARS_SLIT_SPHERE
1074    elif name == 'ellipsoid':
1075        pars = {
1076            'scale':0.05, 'background': 0,
1077            'r_polar':500, 'r_equatorial':15000,
1078            'sld':6, 'sld_solvent': 1,
1079            }
1080    else:
1081        pars = {}
1082    model_info = core.load_model_info(name)
1083    model = core.build_model(model_info)
1084
1085    kernel = model.make_kernel([resolution.q_calc])
1086    theory = direct_model.call_kernel(kernel, pars)
1087    Iq = resolution.apply(theory)
1088
1089    if isinstance(resolution, Slit1D):
1090        width, height = resolution.dqx, resolution.dqy
1091        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1092    else:
1093        dq = resolution.q_width
1094        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1095
1096    import matplotlib.pyplot as plt  # type: ignore
1097    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1098    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1099    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1100    plt.legend()
1101    plt.title(title)
1102    plt.xlabel("Q (1/Ang)")
1103    plt.ylabel("I(Q) (1/cm)")
1104
1105def demo_pinhole_1d():
1106    """
1107    Show example of pinhole smearing.
1108    """
1109    q = np.logspace(-4, np.log10(0.2), 400)
1110    q_width = 0.1*q
1111    resolution = Pinhole1D(q, q_width)
1112    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1113
1114def demo_slit_1d():
1115    """
1116    Show example of slit smearing.
1117    """
1118    q = np.logspace(-4, np.log10(0.2), 100)
1119    w = h = 0.
1120    #w = 0.000000277790
1121    w = 0.0277790
1122    #h = 0.00277790
1123    #h = 0.0277790
1124    resolution = Slit1D(q, w, h)
1125    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))
1126
1127def demo():
1128    """
1129    Run the resolution demos.
1130    """
1131    import matplotlib.pyplot as plt  # type: ignore
1132    plt.subplot(121)
1133    demo_pinhole_1d()
1134    #plt.yscale('linear')
1135    plt.subplot(122)
1136    demo_slit_1d()
1137    #plt.yscale('linear')
1138    plt.show()
1139
1140
1141if __name__ == "__main__":
1142    #demo()
1143    main()
Note: See TracBrowser for help on using the repository browser.