source: sasmodels/sasmodels/resolution.py @ 630cdd4

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 630cdd4 was 630cdd4, checked in by ajj, 8 years ago

change to resolution.py to accommodate sesans transform

  • Property mode set to 100644
File size: 39.4 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7
8from scipy.special import erf  # type: ignore
9from numpy import sqrt, log, log10, exp, pi  # type: ignore
10import numpy as np  # type: ignore
11
12__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
13           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
14           "pinhole_extend_q", "slit_extend_q", "bin_edges",
15           "interpolate", "linear_extrapolation", "geometric_extrapolation",
16          ]
17
18MINIMUM_RESOLUTION = 1e-8
19
20
21# When extrapolating to -q, what is the minimum positive q relative to q_min
22# that we wish to calculate?
23MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
24
25class Resolution(object):
26    """
27    Abstract base class defining a 1D resolution function.
28
29    *q* is the set of q values at which the data is measured.
30
31    *q_calc* is the set of q values at which the theory needs to be evaluated.
32    This may extend and interpolate the q values.
33
34    *apply* is the method to call with I(q_calc) to compute the resolution
35    smeared theory I(q).
36    """
37    q = None  # type: np.ndarray
38    q_calc = None  # type: np.ndarray
39    def apply(self, theory):
40        """
41        Smear *theory* by the resolution function, returning *Iq*.
42        """
43        raise NotImplementedError("Subclass does not define the apply function")
44
45
46class Perfect1D(Resolution):
47    """
48    Resolution function to use when there is no actual resolution smearing
49    to be applied.  It has the same interface as the other resolution
50    functions, but returns the identity function.
51    """
52    def __init__(self, q):
53        self.q_calc = self.q = q
54
55    def apply(self, theory):
56        return theory
57
58
59class SESANS1D(Resolution):
60
61    def __init__(self, data, q_calc):
62        self.data = data
63        self.q_calc = q_calc
64
65    def apply(self, theory):
66        return sesans.transform(self.data, self.q_calc, theory, None, None)
67
68class Pinhole1D(Resolution):
69    r"""
70    Pinhole aperture with q-dependent gaussian resolution.
71
72    *q* points at which the data is measured.
73
74    *q_width* gaussian 1-sigma resolution at each data point.
75
76    *q_calc* is the list of points to calculate, or None if this should
77    be estimated from the *q* and *q_width*.
78    """
79    def __init__(self, q, q_width, q_calc=None, nsigma=3):
80        #*min_step* is the minimum point spacing to use when computing the
81        #underlying model.  It should be on the order of
82        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
83        #are computed with sufficient density to avoid aliasing effects.
84
85        # Protect against calls with q_width=0.  The extend_q function will
86        # not extend the q if q_width is 0, but q_width must be non-zero when
87        # constructing the weight matrix to avoid division by zero errors.
88        # In practice this should never be needed, since resolution should
89        # default to Perfect1D if the pinhole geometry is not defined.
90        self.q, self.q_width = q, q_width
91        self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma)
92                       if q_calc is None else np.sort(q_calc))
93        self.weight_matrix = pinhole_resolution(self.q_calc, self.q,
94                                np.maximum(q_width, MINIMUM_RESOLUTION))
95
96    def apply(self, theory):
97        return apply_resolution_matrix(self.weight_matrix, theory)
98
99
100class Slit1D(Resolution):
101    """
102    Slit aperture with resolution function.
103
104    *q* points at which the data is measured.
105
106    *dqx* slit width in qx
107
108    *dqy* slit height in qy
109
110    *q_calc* is the list of points to calculate, or None if this should
111    be estimated from the *q* and *q_width*.
112
113    The *weight_matrix* is computed by :func:`slit1d_resolution`
114    """
115    def __init__(self, q, qx_width, qy_width=0., q_calc=None):
116        # Remember what width/dqy was used even though we won't need them
117        # after the weight matrix is constructed
118        self.qx_width, self.qy_width = qx_width, qy_width
119
120        # Allow independent resolution on each point even though it is not
121        # needed in practice.
122        if np.isscalar(qx_width):
123            qx_width = np.ones(len(q))*qx_width
124        else:
125            qx_width = np.asarray(qx_width)
126        if np.isscalar(qy_width):
127            qy_width = np.ones(len(q))*qy_width
128        else:
129            qy_width = np.asarray(qy_width)
130
131        self.q = q.flatten()
132        self.q_calc = slit_extend_q(q, qx_width, qy_width) \
133            if q_calc is None else np.sort(q_calc)
134        self.weight_matrix = \
135            slit_resolution(self.q_calc, self.q, qx_width, qy_width)
136
137    def apply(self, theory):
138        return apply_resolution_matrix(self.weight_matrix, theory)
139
140
141def apply_resolution_matrix(weight_matrix, theory):
142    """
143    Apply the resolution weight matrix to the computed theory function.
144    """
145    #print("apply shapes", theory.shape, weight_matrix.shape)
146    Iq = np.dot(theory[None, :], weight_matrix)
147    #print("result shape",Iq.shape)
148    return Iq.flatten()
149
150
151def pinhole_resolution(q_calc, q, q_width):
152    """
153    Compute the convolution matrix *W* for pinhole resolution 1-D data.
154
155    Each row *W[i]* determines the normalized weight that the corresponding
156    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
157    *W*, the resolution smearing can be computed using *dot(W,q)*.
158
159    *q_calc* must be increasing.  *q_width* must be greater than zero.
160    """
161    # The current algorithm is a midpoint rectangle rule.  In the test case,
162    # neither trapezoid nor Simpson's rule improved the accuracy.
163    edges = bin_edges(q_calc)
164    edges[edges < 0.0] = 0.0 # clip edges below zero
165    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])
166    weights = cdf[1:] - cdf[:-1]
167    weights /= np.sum(weights, axis=0)[None, :]
168    return weights
169
170
171def slit_resolution(q_calc, q, width, height, n_height=30):
172    r"""
173    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
174    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
175    is the number of steps to use in the integration over $q_\parallel$
176    when both $q_\perp$ and $q_\parallel$ are non-zero.
177
178    Each $q$ can have an independent width and height value even though
179    current instruments use the same slit setting for all measured points.
180
181    If slit height is large relative to width, use:
182
183    .. math::
184
185        I_s(q_i) = \frac{1}{\Delta q_\perp}
186            \int_0^{\Delta q_\perp}
187                I\left(\sqrt{q_i^2 + q_\perp^2}\right) \,dq_\perp
188
189    If slit width is large relative to height, use:
190
191    .. math::
192
193        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
194            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
195                I\left(|q_i + q_\parallel|\right) \,dq_\parallel
196
197    For a mixture of slit width and height use:
198
199    .. math::
200
201        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
202            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
203            \int_0^{\Delta q_\perp}
204                I\left(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2}\right)
205                \,dq_\perp dq_\parallel
206
207    **Definition**
208
209    We are using the mid-point integration rule to assign weights to each
210    element of a weight matrix $W$ so that
211
212    .. math::
213
214        I_s(q) = W\,I(q_\text{calc})
215
216    If *q_calc* is at the mid-point, we can infer the bin edges from the
217    pairwise averages of *q_calc*, adding the missing edges before
218    *q_calc[0]* and after *q_calc[-1]*.
219
220    For $q_\parallel = 0$, the smeared value can be computed numerically
221    using the $u$ substitution
222
223    .. math::
224
225        u_j = \sqrt{q_j^2 - q^2}
226
227    This gives
228
229    .. math::
230
231        I_s(q) \approx \sum_j I(u_j) \Delta u_j
232
233    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
234    difference between consecutive edges which have been first converted
235    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
236    to $q_j \in \left[q, \sqrt{q^2 + \Delta q_\perp}\right]$, so
237
238    .. math::
239
240        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
241               = \frac{1}{\Delta q_\perp} \left(
242                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2} \right)
243            \ \text{if}\  q_j \in \left[q_i, \sqrt{q_i^2 + q_\perp^2}\right]
244
245    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
246    mid-points between the calculated values in *q_calc*.  We tweak the
247    edges of the initial and final intervals so that they lie on integration
248    limits.
249
250    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
251    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
252    but it is at least in the interval, so the approximation is going to be
253    a little better than the left or right Riemann sum, and should be
254    good enough for our purposes.)
255
256    For $q_\perp = 0$, the $u$ substitution is simpler:
257
258    .. math::
259
260        u_j = \left|q_j - q\right|
261
262    so
263
264    .. math::
265
266        W_{ij} = \frac{1}{2 \Delta q_\parallel} \Delta u_j
267            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
268            \ \text{if}\ q_j \in
269                \left[q-\Delta q_\parallel, q+\Delta q_\parallel\right]
270
271    However, we need to support cases were $u_j < 0$, which means using
272    $2 (q_{j+1} - q_j)$ when $q_j \in \left[0, q_\parallel-q_i\right]$.
273    This is not an issue for $q_i > q_\parallel$.
274
275    For both $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
276    integration with
277
278    .. math::
279
280        u_{jk} = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
281            \ \text{for}\ k = -L \ldots L
282
283    for $L$ = *n_height*.  This gives
284
285    .. math::
286
287        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
288            \sum_{k=-L}^L \Delta u_{jk}
289                \left(\frac{\Delta q_\parallel}{2 L + 1}\right)
290
291
292    """
293    #np.set_printoptions(precision=6, linewidth=10000)
294
295    # The current algorithm is a midpoint rectangle rule.
296    q_edges = bin_edges(q_calc) # Note: requires q > 0
297    q_edges[q_edges < 0.0] = 0.0 # clip edges below zero
298    weights = np.zeros((len(q), len(q_calc)), 'd')
299
300    #print(q_calc)
301    for i, (qi, w, h) in enumerate(zip(q, width, height)):
302        if w == 0. and h == 0.:
303            # Perfect resolution, so return the theory value directly.
304            # Note: assumes that q is a subset of q_calc.  If qi need not be
305            # in q_calc, then we can do a weighted interpolation by looking
306            # up qi in q_calc, then weighting the result by the relative
307            # distance to the neighbouring points.
308            weights[i, :] = (q_calc == qi)
309        elif h == 0:
310            weights[i, :] = _q_perp_weights(q_edges, qi, w)
311        elif w == 0:
312            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
313            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
314            #print(qi - h, qi + h)
315            #print(in_x + abs_x)
316            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
317        else:
318            for k in range(-n_height, n_height+1):
319                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/n_height, w)
320            weights[i, :] /= 2*n_height + 1
321
322    return weights.T
323
324
325def _q_perp_weights(q_edges, qi, w):
326    # Convert bin edges from q to u
327    u_limit = np.sqrt(qi**2 + w**2)
328    u_edges = q_edges**2 - qi**2
329    u_edges[q_edges < abs(qi)] = 0.
330    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
331    weights = np.diff(np.sqrt(u_edges))/w
332    #print("i, qi",i,qi,qi+width)
333    #print(q_calc)
334    #print(weights)
335    return weights
336
337
338def pinhole_extend_q(q, q_width, nsigma=3):
339    """
340    Given *q* and *q_width*, find a set of sampling points *q_calc* so
341    that each point $I(q)$ has sufficient support from the underlying
342    function.
343    """
344    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
345    return linear_extrapolation(q, q_min, q_max)
346
347
348def slit_extend_q(q, width, height):
349    """
350    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
351    that each point I(q) has sufficient support from the underlying
352    function.
353    """
354    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
355
356    return geometric_extrapolation(q, q_min, q_max)
357
358
359def bin_edges(x):
360    """
361    Determine bin edges from bin centers, assuming that edges are centered
362    between the bins.
363
364    Note: this uses the arithmetic mean, which may not be appropriate for
365    log-scaled data.
366    """
367    if len(x) < 2 or (np.diff(x) < 0).any():
368        raise ValueError("Expected bins to be an increasing set")
369    edges = np.hstack([
370        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
371        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
372        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
373        ])
374    return edges
375
376
377def interpolate(q, max_step):
378    """
379    Returns *q_calc* with points spaced at most max_step apart.
380    """
381    step = np.diff(q)
382    index = step > max_step
383    if np.any(index):
384        inserts = []
385        for q_i, step_i in zip(q[:-1][index], step[index]):
386            n = np.ceil(step_i/max_step)
387            inserts.extend(q_i + np.arange(1, n)*(step_i/n))
388        # Extend a couple of fringes beyond the end of the data
389        inserts.extend(q[-1] + np.arange(1, 8)*max_step)
390        q_calc = np.sort(np.hstack((q, inserts)))
391    else:
392        q_calc = q
393    return q_calc
394
395
396def linear_extrapolation(q, q_min, q_max):
397    """
398    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
399    a guide.  Extrapolation below uses about the same size as the first
400    interval.  Extrapolation above uses about the same size as the final
401    interval.
402
403    if *q_min* is zero or less then *q[0]/10* is used instead.
404    """
405    q = np.sort(q)
406    if q_min + 2*MINIMUM_RESOLUTION < q[0]:
407        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
408        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15
409        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
410    else:
411        q_low = []
412    if q_max - 2*MINIMUM_RESOLUTION > q[-1]:
413        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15
414        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
415    else:
416        q_high = []
417    return np.concatenate([q_low, q, q_high])
418
419
420def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
421    r"""
422    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
423    average geometric step size in *q* as the step size.
424
425    if *q_min* is zero or less then *q[0]/10* is used instead.
426
427    *points_per_decade* sets the ratio between consecutive steps such
428    that there will be $n$ points used for every factor of 10 increase
429    in *q*.
430
431    If *points_per_decade* is not given, it will be estimated as follows.
432    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
433    in $n$ points gives a geometric average of:
434
435    .. math::
436
437         \log \Delta q = (\log q_n - log q_1) / (n - 1)
438
439    From this we can compute the number of steps required to extend $q$
440    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
441
442    .. math::
443
444         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
445
446    Substituting:
447
448    .. math::
449
450         n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
451            / (\log q_n - log q_1)
452    """
453    q = np.sort(q)
454    if points_per_decade is None:
455        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
456    else:
457        log_delta_q = log(10.) / points_per_decade
458    if q_min < q[0]:
459        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
460        n_low = log_delta_q * (log(q[0])-log(q_min))
461        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
462    else:
463        q_low = []
464    if q_max > q[-1]:
465        n_high = log_delta_q * (log(q_max)-log(q[-1]))
466        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
467    else:
468        q_high = []
469    return np.concatenate([q_low, q, q_high])
470
471
472############################################################################
473# unit tests
474############################################################################
475import unittest
476
477
478def eval_form(q, form, pars):
479    """
480    Return the SAS model evaluated at *q*.
481
482    *form* is the SAS model returned from :fun:`core.load_model`.
483
484    *pars* are the parameter values to use when evaluating.
485    """
486    from sasmodels import direct_model
487    kernel = form.make_kernel([q])
488    theory = direct_model.call_kernel(kernel, pars)
489    kernel.release()
490    return theory
491
492
493def gaussian(q, q0, dq):
494    """
495    Return the Gaussian resolution function.
496
497    *q0* is the center, *dq* is the width and *q* are the points to evaluate.
498    """
499    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
500
501
502def romberg_slit_1d(q, width, height, form, pars):
503    """
504    Romberg integration for slit resolution.
505
506    This is an adaptive integration technique.  It is called with settings
507    that make it slow to evaluate but give it good accuracy.
508    """
509    from scipy.integrate import romberg  # type: ignore
510
511    par_set = set([p.name for p in form.info.parameters.call_parameters])
512    if any(k not in par_set for k in pars.keys()):
513        extra = set(pars.keys()) - par_set
514        raise ValueError("bad parameters: [%s] not in [%s]"
515                         % (", ".join(sorted(extra)),
516                            ", ".join(sorted(pars.keys()))))
517
518    if np.isscalar(width):
519        width = [width]*len(q)
520    if np.isscalar(height):
521        height = [height]*len(q)
522    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
523    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
524    # If both width and height are defined, then it is too slow to use dblquad.
525    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
526    # the extended Q range.
527    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
528    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
529    Iq = eval_form(q_calc, form, pars)
530    result = np.empty(len(q))
531    for i, (qi, w, h) in enumerate(zip(q, width, height)):
532        if h == 0.:
533            total = romberg(_int_w, 0, w, args=(qi,),
534                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
535            result[i] = total/w
536        elif w == 0.:
537            total = romberg(_int_h, -h, h, args=(qi,),
538                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
539            result[i] = total/(2*h)
540        else:
541            w_grid = np.linspace(0, w, 21)[None, :]
542            h_grid = np.linspace(-h, h, 23)[:, None]
543            u_sub = sqrt((qi+h_grid)**2 + w_grid**2)
544            f_at_u = np.interp(u_sub, q_calc, Iq)
545            #print(np.trapz(Iu, w_grid, axis=1))
546            total  = np.trapz(np.trapz(f_at_u, w_grid, axis=1), h_grid[:, 0])
547            result[i] = total / (2*h*w)
548            # from scipy.integrate import dblquad
549            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
550            #                  args=(qi,))
551            # result[i] = r/(w*2*h)
552
553    # r should be [float, ...], but it is [array([float]), array([float]),...]
554    return result
555
556
557def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
558    """
559    Romberg integration for pinhole resolution.
560
561    This is an adaptive integration technique.  It is called with settings
562    that make it slow to evaluate but give it good accuracy.
563    """
564    from scipy.integrate import romberg  # type: ignore
565
566    par_set = set([p.name for p in form.info.parameters.call_parameters])
567    if any(k not in par_set for k in pars.keys()):
568        extra = set(pars.keys()) - par_set
569        raise ValueError("bad parameters: [%s] not in [%s]"
570                         % (", ".join(sorted(extra)),
571                            ", ".join(sorted(pars.keys()))))
572
573    func = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
574    total = [romberg(func, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,
575                     args=(qi, dqi), divmax=100, vec_func=True,
576                     tol=0, rtol=1e-8)
577             for qi, dqi in zip(q, q_width)]
578    return np.asarray(total).flatten()
579
580
581class ResolutionTest(unittest.TestCase):
582    """
583    Test the resolution calculations.
584    """
585
586    def setUp(self):
587        self.x = 0.001*np.arange(1, 11)
588        self.y = self.Iq(self.x)
589
590    def Iq(self, q):
591        "Linear function for resolution unit test"
592        return 12.0 - 1000.0*q
593
594    def test_perfect(self):
595        """
596        Perfect resolution and no smearing.
597        """
598        resolution = Perfect1D(self.x)
599        theory = self.Iq(resolution.q_calc)
600        output = resolution.apply(theory)
601        np.testing.assert_equal(output, self.y)
602
603    def test_slit_zero(self):
604        """
605        Slit smearing with perfect resolution.
606        """
607        resolution = Slit1D(self.x, qx_width=0, qy_width=0, q_calc=self.x)
608        theory = self.Iq(resolution.q_calc)
609        output = resolution.apply(theory)
610        np.testing.assert_equal(output, self.y)
611
612    @unittest.skip("not yet supported")
613    def test_slit_high(self):
614        """
615        Slit smearing with height 0.005
616        """
617        resolution = Slit1D(self.x, qx_width=0, qy_width=0.005, q_calc=self.x)
618        theory = self.Iq(resolution.q_calc)
619        output = resolution.apply(theory)
620        answer = [
621            9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
622            5.5555, 4.5584, 3.5606, 2.5623, 2.0000,
623            ]
624        np.testing.assert_allclose(output, answer, atol=1e-4)
625
626    @unittest.skip("not yet supported")
627    def test_slit_both_high(self):
628        """
629        Slit smearing with width < 100*height.
630        """
631        q = np.logspace(-4, -1, 10)
632        resolution = Slit1D(q, qx_width=0.2, qy_width=np.inf)
633        theory = 1000*self.Iq(resolution.q_calc**4)
634        output = resolution.apply(theory)
635        answer = [
636            8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
637            5.40363, 4.40655, 3.40880, 2.41058, 2.00000,
638            ]
639        np.testing.assert_allclose(output, answer, atol=1e-4)
640
641    @unittest.skip("not yet supported")
642    def test_slit_wide(self):
643        """
644        Slit smearing with width 0.0002
645        """
646        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0, q_calc=self.x)
647        theory = self.Iq(resolution.q_calc)
648        output = resolution.apply(theory)
649        answer = [
650            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
651            ]
652        np.testing.assert_allclose(output, answer, atol=1e-4)
653
654    @unittest.skip("not yet supported")
655    def test_slit_both_wide(self):
656        """
657        Slit smearing with width > 100*height.
658        """
659        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0.000001,
660                            q_calc=self.x)
661        theory = self.Iq(resolution.q_calc)
662        output = resolution.apply(theory)
663        answer = [
664            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
665            ]
666        np.testing.assert_allclose(output, answer, atol=1e-4)
667
668    def test_pinhole_zero(self):
669        """
670        Pinhole smearing with perfect resolution
671        """
672        resolution = Pinhole1D(self.x, 0.0*self.x)
673        theory = self.Iq(resolution.q_calc)
674        output = resolution.apply(theory)
675        np.testing.assert_equal(output, self.y)
676
677    def test_pinhole(self):
678        """
679        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
680        """
681        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
682                               q_calc=self.x)
683        theory = 12.0-1000.0*resolution.q_calc
684        output = resolution.apply(theory)
685        answer = [
686            10.44785079, 9.84991299, 8.98101708,
687            7.99906585, 6.99998311, 6.00001689,
688            5.00093415, 4.01898292, 3.15008701, 2.55214921,
689            ]
690        np.testing.assert_allclose(output, answer, atol=1e-8)
691
692
693class IgorComparisonTest(unittest.TestCase):
694    """
695    Test resolution calculations against those returned by Igor.
696    """
697
698    def setUp(self):
699        self.pars = TEST_PARS_PINHOLE_SPHERE
700        from sasmodels import core
701        self.model = core.load_model("sphere", dtype='double')
702
703    def _eval_sphere(self, pars, resolution):
704        from sasmodels import direct_model
705        kernel = self.model.make_kernel([resolution.q_calc])
706        theory = direct_model.call_kernel(kernel, pars)
707        result = resolution.apply(theory)
708        kernel.release()
709        return result
710
711    def _compare(self, q, output, answer, tolerance):
712        #err = (output - answer)/answer
713        #idx = abs(err) >= tolerance
714        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
715        #print("\n".join(str(v) for v in problem))
716        np.testing.assert_allclose(output, answer, rtol=tolerance)
717
718    def test_perfect(self):
719        """
720        Compare sphere model with NIST Igor SANS, no resolution smearing.
721        """
722        pars = TEST_PARS_SLIT_SPHERE
723        data_string = TEST_DATA_SLIT_SPHERE
724
725        data = np.loadtxt(data_string.split('\n')).T
726        q, _, answer, _ = data
727        resolution = Perfect1D(q)
728        output = self._eval_sphere(pars, resolution)
729        self._compare(q, output, answer, 1e-6)
730
731    def test_pinhole(self):
732        """
733        Compare pinhole resolution smearing with NIST Igor SANS
734        """
735        pars = TEST_PARS_PINHOLE_SPHERE
736        data_string = TEST_DATA_PINHOLE_SPHERE
737
738        data = np.loadtxt(data_string.split('\n')).T
739        q, q_width, answer = data
740        resolution = Pinhole1D(q, q_width)
741        output = self._eval_sphere(pars, resolution)
742        # TODO: relative error should be lower
743        self._compare(q, output, answer, 3e-4)
744
745    def test_pinhole_romberg(self):
746        """
747        Compare pinhole resolution smearing with romberg integration result.
748        """
749        pars = TEST_PARS_PINHOLE_SPHERE
750        data_string = TEST_DATA_PINHOLE_SPHERE
751        pars['radius'] *= 5
752
753        data = np.loadtxt(data_string.split('\n')).T
754        q, q_width, answer = data
755        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
756        ## Getting 0.1% requires 5 sigma and 200 points per fringe
757        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
758        #                     2*np.pi/pars['radius']/200)
759        #tol = 0.001
760        ## The default 3 sigma and no extra points gets 1%
761        q_calc = None  # type: np.ndarray
762        tol = 0.01
763        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
764        output = self._eval_sphere(pars, resolution)
765        if 0: # debug plot
766            import matplotlib.pyplot as plt  # type: ignore
767            resolution = Perfect1D(q)
768            source = self._eval_sphere(pars, resolution)
769            plt.loglog(q, source, '.')
770            plt.loglog(q, answer, '-', hold=True)
771            plt.loglog(q, output, '-', hold=True)
772            plt.show()
773        self._compare(q, output, answer, tol)
774
775    def test_slit(self):
776        """
777        Compare slit resolution smearing with NIST Igor SANS
778        """
779        pars = TEST_PARS_SLIT_SPHERE
780        data_string = TEST_DATA_SLIT_SPHERE
781
782        data = np.loadtxt(data_string.split('\n')).T
783        q, delta_qv, _, answer = data
784        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0)
785        output = self._eval_sphere(pars, resolution)
786        # TODO: eliminate Igor test since it is too inaccurate to be useful.
787        # This means we can eliminate the test data as well, and instead
788        # use a generated q vector.
789        self._compare(q, output, answer, 0.5)
790
791    def test_slit_romberg(self):
792        """
793        Compare slit resolution smearing with romberg integration result.
794        """
795        pars = TEST_PARS_SLIT_SPHERE
796        data_string = TEST_DATA_SLIT_SPHERE
797
798        data = np.loadtxt(data_string.split('\n')).T
799        q, delta_qv, _, answer = data
800        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
801        q_calc = slit_extend_q(interpolate(q, 2*np.pi/pars['radius']/20),
802                               delta_qv[0], 0.)
803        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0, q_calc=q_calc)
804        output = self._eval_sphere(pars, resolution)
805        # TODO: relative error should be lower
806        self._compare(q, output, answer, 0.025)
807
808    def test_ellipsoid(self):
809        """
810        Compare romberg integration for ellipsoid model.
811        """
812        from .core import load_model
813        pars = {
814            'scale':0.05,
815            'radius_polar':500, 'radius_equatorial':15000,
816            'sld':6, 'sld_solvent': 1,
817            }
818        form = load_model('ellipsoid', dtype='double')
819        q = np.logspace(log10(4e-5), log10(2.5e-2), 68)
820        width, height = 0.117, 0.
821        resolution = Slit1D(q, qx_width=width, qy_width=height)
822        answer = romberg_slit_1d(q, width, height, form, pars)
823        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
824        # TODO: 10% is too much error; use better algorithm
825        #print(np.max(abs(answer-output)/answer))
826        self._compare(q, output, answer, 0.1)
827
828    #TODO: can sas q spacing be too sparse for the resolution calculation?
829    @unittest.skip("suppress sparse data test; not supported by current code")
830    def test_pinhole_sparse(self):
831        """
832        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
833        """
834        pars = TEST_PARS_PINHOLE_SPHERE
835        data_string = TEST_DATA_PINHOLE_SPHERE
836
837        data = np.loadtxt(data_string.split('\n')).T
838        q, q_width, answer = data[:, ::20] # Take every nth point
839        resolution = Pinhole1D(q, q_width)
840        output = self._eval_sphere(pars, resolution)
841        self._compare(q, output, answer, 1e-6)
842
843
844# pinhole sphere parameters
845TEST_PARS_PINHOLE_SPHERE = {
846    'scale': 1.0, 'background': 0.01,
847    'radius': 60.0, 'sld': 1, 'sld_solvent': 6.3,
848    }
849# Q, dQ, I(Q) calculated by NIST Igor SANS package
850TEST_DATA_PINHOLE_SPHERE = """\
8510.001278 0.0002847 2538.41176383
8520.001562 0.0002905 2536.91820405
8530.001846 0.0002956 2535.13182479
8540.002130 0.0003017 2533.06217813
8550.002414 0.0003087 2530.70378586
8560.002698 0.0003165 2528.05024192
8570.002982 0.0003249 2525.10408349
8580.003266 0.0003340 2521.86667499
8590.003550 0.0003437 2518.33907750
8600.003834 0.0003539 2514.52246995
8610.004118 0.0003646 2510.41798319
8620.004402 0.0003757 2506.02690988
8630.004686 0.0003872 2501.35067884
8640.004970 0.0003990 2496.38678318
8650.005253 0.0004112 2491.16237596
8660.005537 0.0004237 2485.63911673
8670.005821 0.0004365 2479.83657083
8680.006105 0.0004495 2473.75676948
8690.006389 0.0004628 2467.40145990
8700.006673 0.0004762 2460.77293372
8710.006957 0.0004899 2453.86724627
8720.007241 0.0005037 2446.69623838
8730.007525 0.0005177 2439.25775219
8740.007809 0.0005318 2431.55421398
8750.008093 0.0005461 2423.58785521
8760.008377 0.0005605 2415.36158137
8770.008661 0.0005750 2406.87009473
8780.008945 0.0005896 2398.12841186
8790.009229 0.0006044 2389.13360806
8800.009513 0.0006192 2379.88958042
8810.009797 0.0006341 2370.39776774
8820.010080 0.0006491 2360.69528793
8830.010360 0.0006641 2350.85169027
8840.010650 0.0006793 2340.42023633
8850.010930 0.0006945 2330.11206013
8860.011220 0.0007097 2319.20109972
8870.011500 0.0007251 2308.43503981
8880.011780 0.0007404 2297.44820179
8890.012070 0.0007558 2285.83853677
8900.012350 0.0007713 2274.41290746
8910.012640 0.0007868 2262.36219581
8920.012920 0.0008024 2250.51169731
8930.013200 0.0008180 2238.45596231
8940.013490 0.0008336 2225.76495666
8950.013770 0.0008493 2213.29618391
8960.014060 0.0008650 2200.19110751
8970.014340 0.0008807 2187.34050325
8980.014620 0.0008965 2174.30529864
8990.014910 0.0009123 2160.61632548
9000.015190 0.0009281 2147.21038112
9010.015470 0.0009440 2133.62023580
9020.015760 0.0009598 2119.37907426
9030.016040 0.0009757 2105.45234903
9040.016330 0.0009916 2090.86319102
9050.016610 0.0010080 2076.60576032
9060.016890 0.0010240 2062.19214565
9070.017180 0.0010390 2047.10550219
9080.017460 0.0010550 2032.38715621
9090.017740 0.0010710 2017.52560123
9100.018030 0.0010880 2001.99124318
9110.018310 0.0011040 1986.84662060
9120.018600 0.0011200 1971.03389745
9130.018880 0.0011360 1955.61395119
9140.019160 0.0011520 1940.08291563
9150.019450 0.0011680 1923.87672225
9160.019730 0.0011840 1908.10656374
9170.020020 0.0012000 1891.66297192
9180.020300 0.0012160 1875.66789021
9190.020580 0.0012320 1859.56357196
9200.020870 0.0012490 1842.79468290
9210.021150 0.0012650 1826.50064489
9220.021430 0.0012810 1810.11533702
9230.021720 0.0012970 1793.06840882
9240.022000 0.0013130 1776.51153580
9250.022280 0.0013290 1759.87201249
9260.022570 0.0013460 1742.57354412
9270.022850 0.0013620 1725.79397319
9280.023140 0.0013780 1708.35831550
9290.023420 0.0013940 1691.45256069
9300.023700 0.0014110 1674.48561783
9310.023990 0.0014270 1656.86525366
9320.024270 0.0014430 1639.79847285
9330.024550 0.0014590 1622.68887088
9340.024840 0.0014760 1604.96421100
9350.025120 0.0014920 1587.85768129
9360.025410 0.0015080 1569.99297335
9370.025690 0.0015240 1552.84580279
9380.025970 0.0015410 1535.54074115
9390.026260 0.0015570 1517.75249337
9400.026540 0.0015730 1500.40115023
9410.026820 0.0015900 1483.03632237
9420.027110 0.0016060 1465.05942429
9430.027390 0.0016220 1447.67682181
9440.027670 0.0016390 1430.46495191
9450.027960 0.0016550 1412.49232282
9460.028240 0.0016710 1395.13182318
9470.028520 0.0016880 1377.93439837
9480.028810 0.0017040 1359.99528971
9490.029090 0.0017200 1342.67274512
9500.029370 0.0017370 1325.55375609
951"""
952
953# Slit sphere parameters
954TEST_PARS_SLIT_SPHERE = {
955    'scale': 0.01, 'background': 0.01,
956    'radius': 60000, 'sld': 1, 'sld_solvent': 4,
957    }
958# Q dQ I(Q) I_smeared(Q)
959TEST_DATA_SLIT_SPHERE = """\
9602.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9612.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9622.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9633.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9643.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9653.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9664.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9675.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9685.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9696.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9706.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9717.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9727.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9738.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9748.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9759.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9761.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9771.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9781.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9791.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9801.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9811.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9821.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9831.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9841.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9851.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9862.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9872.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
9882.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
9892.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
9902.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
9912.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
9922.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
9932.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
9943.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
9953.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
9963.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
9973.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
9984.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
9994.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
10004.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
10014.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
10025.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
10035.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
10046.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
10056.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
10067.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
10077.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
10088.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
10098.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
10109.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
10119.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
10121.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
10131.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
10141.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
10151.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
10161.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
10171.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
10181.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
10191.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
10201.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
10211.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
10222.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
10232.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
10242.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
10252.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
10262.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
10272.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
1028"""
1029
1030def main():
1031    """
1032    Run tests given is sys.argv.
1033
1034    Returns 0 if success or 1 if any tests fail.
1035    """
1036    import sys
1037    import xmlrunner  # type: ignore
1038
1039    suite = unittest.TestSuite()
1040    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1041
1042    runner = xmlrunner.XMLTestRunner(output='logs')
1043    result = runner.run(suite)
1044    return 1 if result.failures or result.errors else 0
1045
1046
1047############################################################################
1048# usage demo
1049############################################################################
1050
1051def _eval_demo_1d(resolution, title):
1052    import sys
1053    from sasmodels import core
1054    from sasmodels import direct_model
1055    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1056
1057    if name == 'cylinder':
1058        pars = {'length':210, 'radius':500, 'background': 0}
1059    elif name == 'teubner_strey':
1060        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1061    elif name == 'sphere' or name == 'spherepy':
1062        pars = TEST_PARS_SLIT_SPHERE
1063    elif name == 'ellipsoid':
1064        pars = {
1065            'scale':0.05, 'background': 0,
1066            'r_polar':500, 'r_equatorial':15000,
1067            'sld':6, 'sld_solvent': 1,
1068            }
1069    else:
1070        pars = {}
1071    model_info = core.load_model_info(name)
1072    model = core.build_model(model_info)
1073
1074    kernel = model.make_kernel([resolution.q_calc])
1075    theory = direct_model.call_kernel(kernel, pars)
1076    Iq = resolution.apply(theory)
1077
1078    if isinstance(resolution, Slit1D):
1079        width, height = resolution.dqx, resolution.dqy
1080        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1081    else:
1082        dq = resolution.q_width
1083        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1084
1085    import matplotlib.pyplot as plt  # type: ignore
1086    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1087    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1088    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1089    plt.legend()
1090    plt.title(title)
1091    plt.xlabel("Q (1/Ang)")
1092    plt.ylabel("I(Q) (1/cm)")
1093
1094def demo_pinhole_1d():
1095    """
1096    Show example of pinhole smearing.
1097    """
1098    q = np.logspace(-4, np.log10(0.2), 400)
1099    q_width = 0.1*q
1100    resolution = Pinhole1D(q, q_width)
1101    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1102
1103def demo_slit_1d():
1104    """
1105    Show example of slit smearing.
1106    """
1107    q = np.logspace(-4, np.log10(0.2), 100)
1108    w = h = 0.
1109    #w = 0.000000277790
1110    w = 0.0277790
1111    #h = 0.00277790
1112    #h = 0.0277790
1113    resolution = Slit1D(q, w, h)
1114    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))
1115
1116def demo():
1117    """
1118    Run the resolution demos.
1119    """
1120    import matplotlib.pyplot as plt  # type: ignore
1121    plt.subplot(121)
1122    demo_pinhole_1d()
1123    #plt.yscale('linear')
1124    plt.subplot(122)
1125    demo_slit_1d()
1126    #plt.yscale('linear')
1127    plt.show()
1128
1129
1130if __name__ == "__main__":
1131    #demo()
1132    main()
Note: See TracBrowser for help on using the repository browser.