source: sasmodels/sasmodels/resolution.py @ 589b740

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 589b740 was 589b740, checked in by jhbakker, 7 years ago

test branch for SEsans class in sesans.py

  • Property mode set to 100644
File size: 39.7 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7
8from scipy.special import erf  # type: ignore
9from numpy import sqrt, log, log10, exp, pi  # type: ignore
10import numpy as np  # type: ignore
11
12from sasmodels import sesans
13
14__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
15           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
16           "pinhole_extend_q", "slit_extend_q", "bin_edges",
17           "interpolate", "linear_extrapolation", "geometric_extrapolation",
18          ]
19
20MINIMUM_RESOLUTION = 1e-8
21
22
23# When extrapolating to -q, what is the minimum positive q relative to q_min
24# that we wish to calculate?
25MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
26
27class Resolution(object):
28    """
29    Abstract base class defining a 1D resolution function.
30
31    *q* is the set of q values at which the data is measured.
32
33    *q_calc* is the set of q values at which the theory needs to be evaluated.
34    This may extend and interpolate the q values.
35
36    *apply* is the method to call with I(q_calc) to compute the resolution
37    smeared theory I(q).
38    """
39    q = None  # type: np.ndarray
40    q_calc = None  # type: np.ndarray
41    def apply(self, theory):
42        """
43        Smear *theory* by the resolution function, returning *Iq*.
44        """
45        raise NotImplementedError("Subclass does not define the apply function")
46
47class Perfect1D(Resolution):
48    """
49    Resolution function to use when there is no actual resolution smearing
50    to be applied.  It has the same interface as the other resolution
51    functions, but returns the identity function.
52    """
53    def __init__(self, q):
54        self.q_calc = self.q = q
55
56    def apply(self, theory):
57        return theory
58
59
60class SESANS1D(Resolution):
61    def __init__(self, data, H0, H, q_calc):
62        self.q = data.x
63        self.H0 = H0
64        self.H = H
65        self.data=data
66        self.q_calc = q_calc
67    def apply(self, theory):
68        return sesans.SesansTransform.apply(theory)
69        #return sesans.hankeltrafo(self.H0, self.H, theory)
70
71"""
72    def __init__(self, data, q_calc):
73        self.q = data.x
74        self.data = data
75        self.q_calc = q_calc
76
77    def apply(self, theory):
78        return sesans.transform(self.data, self.q_calc, theory, None, None)
79"""
80class Pinhole1D(Resolution):
81    r"""
82    Pinhole aperture with q-dependent gaussian resolution.
83
84    *q* points at which the data is measured.
85
86    *q_width* gaussian 1-sigma resolution at each data point.
87
88    *q_calc* is the list of points to calculate, or None if this should
89    be estimated from the *q* and *q_width*.
90    """
91    def __init__(self, q, q_width, q_calc=None, nsigma=3):
92        #*min_step* is the minimum point spacing to use when computing the
93        #underlying model.  It should be on the order of
94        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
95        #are computed with sufficient density to avoid aliasing effects.
96
97        # Protect against calls with q_width=0.  The extend_q function will
98        # not extend the q if q_width is 0, but q_width must be non-zero when
99        # constructing the weight matrix to avoid division by zero errors.
100        # In practice this should never be needed, since resolution should
101        # default to Perfect1D if the pinhole geometry is not defined.
102        self.q, self.q_width = q, q_width
103        self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma)
104                       if q_calc is None else np.sort(q_calc))
105        self.weight_matrix = pinhole_resolution(self.q_calc, self.q,
106                                np.maximum(q_width, MINIMUM_RESOLUTION))
107
108    def apply(self, theory):
109        return apply_resolution_matrix(self.weight_matrix, theory)
110
111
112class Slit1D(Resolution):
113    """
114    Slit aperture with resolution function.
115
116    *q* points at which the data is measured.
117
118    *dqx* slit width in qx
119
120    *dqy* slit height in qy
121
122    *q_calc* is the list of points to calculate, or None if this should
123    be estimated from the *q* and *q_width*.
124
125    The *weight_matrix* is computed by :func:`slit1d_resolution`
126    """
127    def __init__(self, q, qx_width, qy_width=0., q_calc=None):
128        # Remember what width/dqy was used even though we won't need them
129        # after the weight matrix is constructed
130        self.qx_width, self.qy_width = qx_width, qy_width
131
132        # Allow independent resolution on each point even though it is not
133        # needed in practice.
134        if np.isscalar(qx_width):
135            qx_width = np.ones(len(q))*qx_width
136        else:
137            qx_width = np.asarray(qx_width)
138        if np.isscalar(qy_width):
139            qy_width = np.ones(len(q))*qy_width
140        else:
141            qy_width = np.asarray(qy_width)
142
143        self.q = q.flatten()
144        self.q_calc = slit_extend_q(q, qx_width, qy_width) \
145            if q_calc is None else np.sort(q_calc)
146        self.weight_matrix = \
147            slit_resolution(self.q_calc, self.q, qx_width, qy_width)
148
149    def apply(self, theory):
150        return apply_resolution_matrix(self.weight_matrix, theory)
151
152
153def apply_resolution_matrix(weight_matrix, theory):
154    """
155    Apply the resolution weight matrix to the computed theory function.
156    """
157    #print("apply shapes", theory.shape, weight_matrix.shape)
158    Iq = np.dot(theory[None, :], weight_matrix)
159    #print("result shape",Iq.shape)
160    return Iq.flatten()
161
162
163def pinhole_resolution(q_calc, q, q_width):
164    """
165    Compute the convolution matrix *W* for pinhole resolution 1-D data.
166
167    Each row *W[i]* determines the normalized weight that the corresponding
168    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
169    *W*, the resolution smearing can be computed using *dot(W,q)*.
170
171    *q_calc* must be increasing.  *q_width* must be greater than zero.
172    """
173    # The current algorithm is a midpoint rectangle rule.  In the test case,
174    # neither trapezoid nor Simpson's rule improved the accuracy.
175    edges = bin_edges(q_calc)
176    edges[edges < 0.0] = 0.0 # clip edges below zero
177    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])
178    weights = cdf[1:] - cdf[:-1]
179    weights /= np.sum(weights, axis=0)[None, :]
180    return weights
181
182
183def slit_resolution(q_calc, q, width, height, n_height=30):
184    r"""
185    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
186    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
187    is the number of steps to use in the integration over $q_\parallel$
188    when both $q_\perp$ and $q_\parallel$ are non-zero.
189
190    Each $q$ can have an independent width and height value even though
191    current instruments use the same slit setting for all measured points.
192
193    If slit height is large relative to width, use:
194
195    .. math::
196
197        I_s(q_i) = \frac{1}{\Delta q_\perp}
198            \int_0^{\Delta q_\perp}
199                I\left(\sqrt{q_i^2 + q_\perp^2}\right) \,dq_\perp
200
201    If slit width is large relative to height, use:
202
203    .. math::
204
205        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
206            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
207                I\left(|q_i + q_\parallel|\right) \,dq_\parallel
208
209    For a mixture of slit width and height use:
210
211    .. math::
212
213        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
214            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
215            \int_0^{\Delta q_\perp}
216                I\left(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2}\right)
217                \,dq_\perp dq_\parallel
218
219    **Definition**
220
221    We are using the mid-point integration rule to assign weights to each
222    element of a weight matrix $W$ so that
223
224    .. math::
225
226        I_s(q) = W\,I(q_\text{calc})
227
228    If *q_calc* is at the mid-point, we can infer the bin edges from the
229    pairwise averages of *q_calc*, adding the missing edges before
230    *q_calc[0]* and after *q_calc[-1]*.
231
232    For $q_\parallel = 0$, the smeared value can be computed numerically
233    using the $u$ substitution
234
235    .. math::
236
237        u_j = \sqrt{q_j^2 - q^2}
238
239    This gives
240
241    .. math::
242
243        I_s(q) \approx \sum_j I(u_j) \Delta u_j
244
245    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
246    difference between consecutive edges which have been first converted
247    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
248    to $q_j \in \left[q, \sqrt{q^2 + \Delta q_\perp}\right]$, so
249
250    .. math::
251
252        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
253               = \frac{1}{\Delta q_\perp} \left(
254                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2} \right)
255            \ \text{if}\  q_j \in \left[q_i, \sqrt{q_i^2 + q_\perp^2}\right]
256
257    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
258    mid-points between the calculated values in *q_calc*.  We tweak the
259    edges of the initial and final intervals so that they lie on integration
260    limits.
261
262    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
263    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
264    but it is at least in the interval, so the approximation is going to be
265    a little better than the left or right Riemann sum, and should be
266    good enough for our purposes.)
267
268    For $q_\perp = 0$, the $u$ substitution is simpler:
269
270    .. math::
271
272        u_j = \left|q_j - q\right|
273
274    so
275
276    .. math::
277
278        W_{ij} = \frac{1}{2 \Delta q_\parallel} \Delta u_j
279            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
280            \ \text{if}\ q_j \in
281                \left[q-\Delta q_\parallel, q+\Delta q_\parallel\right]
282
283    However, we need to support cases were $u_j < 0$, which means using
284    $2 (q_{j+1} - q_j)$ when $q_j \in \left[0, q_\parallel-q_i\right]$.
285    This is not an issue for $q_i > q_\parallel$.
286
287    For both $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
288    integration with
289
290    .. math::
291
292        u_{jk} = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
293            \ \text{for}\ k = -L \ldots L
294
295    for $L$ = *n_height*.  This gives
296
297    .. math::
298
299        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
300            \sum_{k=-L}^L \Delta u_{jk}
301                \left(\frac{\Delta q_\parallel}{2 L + 1}\right)
302
303
304    """
305    #np.set_printoptions(precision=6, linewidth=10000)
306
307    # The current algorithm is a midpoint rectangle rule.
308    q_edges = bin_edges(q_calc) # Note: requires q > 0
309    q_edges[q_edges < 0.0] = 0.0 # clip edges below zero
310    weights = np.zeros((len(q), len(q_calc)), 'd')
311
312    #print(q_calc)
313    for i, (qi, w, h) in enumerate(zip(q, width, height)):
314        if w == 0. and h == 0.:
315            # Perfect resolution, so return the theory value directly.
316            # Note: assumes that q is a subset of q_calc.  If qi need not be
317            # in q_calc, then we can do a weighted interpolation by looking
318            # up qi in q_calc, then weighting the result by the relative
319            # distance to the neighbouring points.
320            weights[i, :] = (q_calc == qi)
321        elif h == 0:
322            weights[i, :] = _q_perp_weights(q_edges, qi, w)
323        elif w == 0:
324            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
325            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
326            #print(qi - h, qi + h)
327            #print(in_x + abs_x)
328            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
329        else:
330            for k in range(-n_height, n_height+1):
331                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/n_height, w)
332            weights[i, :] /= 2*n_height + 1
333
334    return weights.T
335
336
337def _q_perp_weights(q_edges, qi, w):
338    # Convert bin edges from q to u
339    u_limit = np.sqrt(qi**2 + w**2)
340    u_edges = q_edges**2 - qi**2
341    u_edges[q_edges < abs(qi)] = 0.
342    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
343    weights = np.diff(np.sqrt(u_edges))/w
344    #print("i, qi",i,qi,qi+width)
345    #print(q_calc)
346    #print(weights)
347    return weights
348
349
350def pinhole_extend_q(q, q_width, nsigma=3):
351    """
352    Given *q* and *q_width*, find a set of sampling points *q_calc* so
353    that each point $I(q)$ has sufficient support from the underlying
354    function.
355    """
356    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
357    return linear_extrapolation(q, q_min, q_max)
358
359
360def slit_extend_q(q, width, height):
361    """
362    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
363    that each point I(q) has sufficient support from the underlying
364    function.
365    """
366    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
367
368    return geometric_extrapolation(q, q_min, q_max)
369
370
371def bin_edges(x):
372    """
373    Determine bin edges from bin centers, assuming that edges are centered
374    between the bins.
375
376    Note: this uses the arithmetic mean, which may not be appropriate for
377    log-scaled data.
378    """
379    if len(x) < 2 or (np.diff(x) < 0).any():
380        raise ValueError("Expected bins to be an increasing set")
381    edges = np.hstack([
382        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
383        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
384        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
385        ])
386    return edges
387
388
389def interpolate(q, max_step):
390    """
391    Returns *q_calc* with points spaced at most max_step apart.
392    """
393    step = np.diff(q)
394    index = step > max_step
395    if np.any(index):
396        inserts = []
397        for q_i, step_i in zip(q[:-1][index], step[index]):
398            n = np.ceil(step_i/max_step)
399            inserts.extend(q_i + np.arange(1, n)*(step_i/n))
400        # Extend a couple of fringes beyond the end of the data
401        inserts.extend(q[-1] + np.arange(1, 8)*max_step)
402        q_calc = np.sort(np.hstack((q, inserts)))
403    else:
404        q_calc = q
405    return q_calc
406
407
408def linear_extrapolation(q, q_min, q_max):
409    """
410    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
411    a guide.  Extrapolation below uses about the same size as the first
412    interval.  Extrapolation above uses about the same size as the final
413    interval.
414
415    if *q_min* is zero or less then *q[0]/10* is used instead.
416    """
417    q = np.sort(q)
418    if q_min + 2*MINIMUM_RESOLUTION < q[0]:
419        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
420        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15
421        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
422    else:
423        q_low = []
424    if q_max - 2*MINIMUM_RESOLUTION > q[-1]:
425        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15
426        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
427    else:
428        q_high = []
429    return np.concatenate([q_low, q, q_high])
430
431
432def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
433    r"""
434    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
435    average geometric step size in *q* as the step size.
436
437    if *q_min* is zero or less then *q[0]/10* is used instead.
438
439    *points_per_decade* sets the ratio between consecutive steps such
440    that there will be $n$ points used for every factor of 10 increase
441    in *q*.
442
443    If *points_per_decade* is not given, it will be estimated as follows.
444    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
445    in $n$ points gives a geometric average of:
446
447    .. math::
448
449         \log \Delta q = (\log q_n - log q_1) / (n - 1)
450
451    From this we can compute the number of steps required to extend $q$
452    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
453
454    .. math::
455
456         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
457
458    Substituting:
459
460    .. math::
461
462         n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
463            / (\log q_n - log q_1)
464    """
465    q = np.sort(q)
466    if points_per_decade is None:
467        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
468    else:
469        log_delta_q = log(10.) / points_per_decade
470    if q_min < q[0]:
471        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
472        n_low = log_delta_q * (log(q[0])-log(q_min))
473        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
474    else:
475        q_low = []
476    if q_max > q[-1]:
477        n_high = log_delta_q * (log(q_max)-log(q[-1]))
478        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
479    else:
480        q_high = []
481    return np.concatenate([q_low, q, q_high])
482
483
484############################################################################
485# unit tests
486############################################################################
487import unittest
488
489
490def eval_form(q, form, pars):
491    """
492    Return the SAS model evaluated at *q*.
493
494    *form* is the SAS model returned from :fun:`core.load_model`.
495
496    *pars* are the parameter values to use when evaluating.
497    """
498    from sasmodels import direct_model
499    kernel = form.make_kernel([q])
500    theory = direct_model.call_kernel(kernel, pars)
501    kernel.release()
502    return theory
503
504
505def gaussian(q, q0, dq):
506    """
507    Return the Gaussian resolution function.
508
509    *q0* is the center, *dq* is the width and *q* are the points to evaluate.
510    """
511    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
512
513
514def romberg_slit_1d(q, width, height, form, pars):
515    """
516    Romberg integration for slit resolution.
517
518    This is an adaptive integration technique.  It is called with settings
519    that make it slow to evaluate but give it good accuracy.
520    """
521    from scipy.integrate import romberg  # type: ignore
522
523    par_set = set([p.name for p in form.info.parameters.call_parameters])
524    if any(k not in par_set for k in pars.keys()):
525        extra = set(pars.keys()) - par_set
526        raise ValueError("bad parameters: [%s] not in [%s]"
527                         % (", ".join(sorted(extra)),
528                            ", ".join(sorted(pars.keys()))))
529
530    if np.isscalar(width):
531        width = [width]*len(q)
532    if np.isscalar(height):
533        height = [height]*len(q)
534    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
535    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
536    # If both width and height are defined, then it is too slow to use dblquad.
537    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
538    # the extended Q range.
539    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
540    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
541    Iq = eval_form(q_calc, form, pars)
542    result = np.empty(len(q))
543    for i, (qi, w, h) in enumerate(zip(q, width, height)):
544        if h == 0.:
545            total = romberg(_int_w, 0, w, args=(qi,),
546                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
547            result[i] = total/w
548        elif w == 0.:
549            total = romberg(_int_h, -h, h, args=(qi,),
550                            divmax=100, vec_func=True, tol=0, rtol=1e-8)
551            result[i] = total/(2*h)
552        else:
553            w_grid = np.linspace(0, w, 21)[None, :]
554            h_grid = np.linspace(-h, h, 23)[:, None]
555            u_sub = sqrt((qi+h_grid)**2 + w_grid**2)
556            f_at_u = np.interp(u_sub, q_calc, Iq)
557            #print(np.trapz(Iu, w_grid, axis=1))
558            total  = np.trapz(np.trapz(f_at_u, w_grid, axis=1), h_grid[:, 0])
559            result[i] = total / (2*h*w)
560            # from scipy.integrate import dblquad
561            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
562            #                  args=(qi,))
563            # result[i] = r/(w*2*h)
564
565    # r should be [float, ...], but it is [array([float]), array([float]),...]
566    return result
567
568
569def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
570    """
571    Romberg integration for pinhole resolution.
572
573    This is an adaptive integration technique.  It is called with settings
574    that make it slow to evaluate but give it good accuracy.
575    """
576    from scipy.integrate import romberg  # type: ignore
577
578    par_set = set([p.name for p in form.info.parameters.call_parameters])
579    if any(k not in par_set for k in pars.keys()):
580        extra = set(pars.keys()) - par_set
581        raise ValueError("bad parameters: [%s] not in [%s]"
582                         % (", ".join(sorted(extra)),
583                            ", ".join(sorted(pars.keys()))))
584
585    func = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
586    total = [romberg(func, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,
587                     args=(qi, dqi), divmax=100, vec_func=True,
588                     tol=0, rtol=1e-8)
589             for qi, dqi in zip(q, q_width)]
590    return np.asarray(total).flatten()
591
592
593class ResolutionTest(unittest.TestCase):
594    """
595    Test the resolution calculations.
596    """
597
598    def setUp(self):
599        self.x = 0.001*np.arange(1, 11)
600        self.y = self.Iq(self.x)
601
602    def Iq(self, q):
603        "Linear function for resolution unit test"
604        return 12.0 - 1000.0*q
605
606    def test_perfect(self):
607        """
608        Perfect resolution and no smearing.
609        """
610        resolution = Perfect1D(self.x)
611        theory = self.Iq(resolution.q_calc)
612        output = resolution.apply(theory)
613        np.testing.assert_equal(output, self.y)
614
615    def test_slit_zero(self):
616        """
617        Slit smearing with perfect resolution.
618        """
619        resolution = Slit1D(self.x, qx_width=0, qy_width=0, q_calc=self.x)
620        theory = self.Iq(resolution.q_calc)
621        output = resolution.apply(theory)
622        np.testing.assert_equal(output, self.y)
623
624    @unittest.skip("not yet supported")
625    def test_slit_high(self):
626        """
627        Slit smearing with height 0.005
628        """
629        resolution = Slit1D(self.x, qx_width=0, qy_width=0.005, q_calc=self.x)
630        theory = self.Iq(resolution.q_calc)
631        output = resolution.apply(theory)
632        answer = [
633            9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
634            5.5555, 4.5584, 3.5606, 2.5623, 2.0000,
635            ]
636        np.testing.assert_allclose(output, answer, atol=1e-4)
637
638    @unittest.skip("not yet supported")
639    def test_slit_both_high(self):
640        """
641        Slit smearing with width < 100*height.
642        """
643        q = np.logspace(-4, -1, 10)
644        resolution = Slit1D(q, qx_width=0.2, qy_width=np.inf)
645        theory = 1000*self.Iq(resolution.q_calc**4)
646        output = resolution.apply(theory)
647        answer = [
648            8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
649            5.40363, 4.40655, 3.40880, 2.41058, 2.00000,
650            ]
651        np.testing.assert_allclose(output, answer, atol=1e-4)
652
653    @unittest.skip("not yet supported")
654    def test_slit_wide(self):
655        """
656        Slit smearing with width 0.0002
657        """
658        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0, q_calc=self.x)
659        theory = self.Iq(resolution.q_calc)
660        output = resolution.apply(theory)
661        answer = [
662            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
663            ]
664        np.testing.assert_allclose(output, answer, atol=1e-4)
665
666    @unittest.skip("not yet supported")
667    def test_slit_both_wide(self):
668        """
669        Slit smearing with width > 100*height.
670        """
671        resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0.000001,
672                            q_calc=self.x)
673        theory = self.Iq(resolution.q_calc)
674        output = resolution.apply(theory)
675        answer = [
676            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
677            ]
678        np.testing.assert_allclose(output, answer, atol=1e-4)
679
680    def test_pinhole_zero(self):
681        """
682        Pinhole smearing with perfect resolution
683        """
684        resolution = Pinhole1D(self.x, 0.0*self.x)
685        theory = self.Iq(resolution.q_calc)
686        output = resolution.apply(theory)
687        np.testing.assert_equal(output, self.y)
688
689    def test_pinhole(self):
690        """
691        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
692        """
693        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
694                               q_calc=self.x)
695        theory = 12.0-1000.0*resolution.q_calc
696        output = resolution.apply(theory)
697        answer = [
698            10.44785079, 9.84991299, 8.98101708,
699            7.99906585, 6.99998311, 6.00001689,
700            5.00093415, 4.01898292, 3.15008701, 2.55214921,
701            ]
702        np.testing.assert_allclose(output, answer, atol=1e-8)
703
704
705class IgorComparisonTest(unittest.TestCase):
706    """
707    Test resolution calculations against those returned by Igor.
708    """
709
710    def setUp(self):
711        self.pars = TEST_PARS_PINHOLE_SPHERE
712        from sasmodels import core
713        self.model = core.load_model("sphere", dtype='double')
714
715    def _eval_sphere(self, pars, resolution):
716        from sasmodels import direct_model
717        kernel = self.model.make_kernel([resolution.q_calc])
718        theory = direct_model.call_kernel(kernel, pars)
719        result = resolution.apply(theory)
720        kernel.release()
721        return result
722
723    def _compare(self, q, output, answer, tolerance):
724        #err = (output - answer)/answer
725        #idx = abs(err) >= tolerance
726        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
727        #print("\n".join(str(v) for v in problem))
728        np.testing.assert_allclose(output, answer, rtol=tolerance)
729
730    def test_perfect(self):
731        """
732        Compare sphere model with NIST Igor SANS, no resolution smearing.
733        """
734        pars = TEST_PARS_SLIT_SPHERE
735        data_string = TEST_DATA_SLIT_SPHERE
736
737        data = np.loadtxt(data_string.split('\n')).T
738        q, _, answer, _ = data
739        resolution = Perfect1D(q)
740        output = self._eval_sphere(pars, resolution)
741        self._compare(q, output, answer, 1e-6)
742
743    def test_pinhole(self):
744        """
745        Compare pinhole resolution smearing with NIST Igor SANS
746        """
747        pars = TEST_PARS_PINHOLE_SPHERE
748        data_string = TEST_DATA_PINHOLE_SPHERE
749
750        data = np.loadtxt(data_string.split('\n')).T
751        q, q_width, answer = data
752        resolution = Pinhole1D(q, q_width)
753        output = self._eval_sphere(pars, resolution)
754        # TODO: relative error should be lower
755        self._compare(q, output, answer, 3e-4)
756
757    def test_pinhole_romberg(self):
758        """
759        Compare pinhole resolution smearing with romberg integration result.
760        """
761        pars = TEST_PARS_PINHOLE_SPHERE
762        data_string = TEST_DATA_PINHOLE_SPHERE
763        pars['radius'] *= 5
764
765        data = np.loadtxt(data_string.split('\n')).T
766        q, q_width, answer = data
767        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
768        ## Getting 0.1% requires 5 sigma and 200 points per fringe
769        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
770        #                     2*np.pi/pars['radius']/200)
771        #tol = 0.001
772        ## The default 3 sigma and no extra points gets 1%
773        q_calc = None  # type: np.ndarray
774        tol = 0.01
775        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
776        output = self._eval_sphere(pars, resolution)
777        if 0: # debug plot
778            import matplotlib.pyplot as plt  # type: ignore
779            resolution = Perfect1D(q)
780            source = self._eval_sphere(pars, resolution)
781            plt.loglog(q, source, '.')
782            plt.loglog(q, answer, '-', hold=True)
783            plt.loglog(q, output, '-', hold=True)
784            plt.show()
785        self._compare(q, output, answer, tol)
786
787    def test_slit(self):
788        """
789        Compare slit resolution smearing with NIST Igor SANS
790        """
791        pars = TEST_PARS_SLIT_SPHERE
792        data_string = TEST_DATA_SLIT_SPHERE
793
794        data = np.loadtxt(data_string.split('\n')).T
795        q, delta_qv, _, answer = data
796        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0)
797        output = self._eval_sphere(pars, resolution)
798        # TODO: eliminate Igor test since it is too inaccurate to be useful.
799        # This means we can eliminate the test data as well, and instead
800        # use a generated q vector.
801        self._compare(q, output, answer, 0.5)
802
803    def test_slit_romberg(self):
804        """
805        Compare slit resolution smearing with romberg integration result.
806        """
807        pars = TEST_PARS_SLIT_SPHERE
808        data_string = TEST_DATA_SLIT_SPHERE
809
810        data = np.loadtxt(data_string.split('\n')).T
811        q, delta_qv, _, answer = data
812        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
813        q_calc = slit_extend_q(interpolate(q, 2*np.pi/pars['radius']/20),
814                               delta_qv[0], 0.)
815        resolution = Slit1D(q, qx_width=delta_qv, qy_width=0, q_calc=q_calc)
816        output = self._eval_sphere(pars, resolution)
817        # TODO: relative error should be lower
818        self._compare(q, output, answer, 0.025)
819
820    def test_ellipsoid(self):
821        """
822        Compare romberg integration for ellipsoid model.
823        """
824        from .core import load_model
825        pars = {
826            'scale':0.05,
827            'radius_polar':500, 'radius_equatorial':15000,
828            'sld':6, 'sld_solvent': 1,
829            }
830        form = load_model('ellipsoid', dtype='double')
831        q = np.logspace(log10(4e-5), log10(2.5e-2), 68)
832        width, height = 0.117, 0.
833        resolution = Slit1D(q, qx_width=width, qy_width=height)
834        answer = romberg_slit_1d(q, width, height, form, pars)
835        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
836        # TODO: 10% is too much error; use better algorithm
837        #print(np.max(abs(answer-output)/answer))
838        self._compare(q, output, answer, 0.1)
839
840    #TODO: can sas q spacing be too sparse for the resolution calculation?
841    @unittest.skip("suppress sparse data test; not supported by current code")
842    def test_pinhole_sparse(self):
843        """
844        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
845        """
846        pars = TEST_PARS_PINHOLE_SPHERE
847        data_string = TEST_DATA_PINHOLE_SPHERE
848
849        data = np.loadtxt(data_string.split('\n')).T
850        q, q_width, answer = data[:, ::20] # Take every nth point
851        resolution = Pinhole1D(q, q_width)
852        output = self._eval_sphere(pars, resolution)
853        self._compare(q, output, answer, 1e-6)
854
855
856# pinhole sphere parameters
857TEST_PARS_PINHOLE_SPHERE = {
858    'scale': 1.0, 'background': 0.01,
859    'radius': 60.0, 'sld': 1, 'sld_solvent': 6.3,
860    }
861# Q, dQ, I(Q) calculated by NIST Igor SANS package
862TEST_DATA_PINHOLE_SPHERE = """\
8630.001278 0.0002847 2538.41176383
8640.001562 0.0002905 2536.91820405
8650.001846 0.0002956 2535.13182479
8660.002130 0.0003017 2533.06217813
8670.002414 0.0003087 2530.70378586
8680.002698 0.0003165 2528.05024192
8690.002982 0.0003249 2525.10408349
8700.003266 0.0003340 2521.86667499
8710.003550 0.0003437 2518.33907750
8720.003834 0.0003539 2514.52246995
8730.004118 0.0003646 2510.41798319
8740.004402 0.0003757 2506.02690988
8750.004686 0.0003872 2501.35067884
8760.004970 0.0003990 2496.38678318
8770.005253 0.0004112 2491.16237596
8780.005537 0.0004237 2485.63911673
8790.005821 0.0004365 2479.83657083
8800.006105 0.0004495 2473.75676948
8810.006389 0.0004628 2467.40145990
8820.006673 0.0004762 2460.77293372
8830.006957 0.0004899 2453.86724627
8840.007241 0.0005037 2446.69623838
8850.007525 0.0005177 2439.25775219
8860.007809 0.0005318 2431.55421398
8870.008093 0.0005461 2423.58785521
8880.008377 0.0005605 2415.36158137
8890.008661 0.0005750 2406.87009473
8900.008945 0.0005896 2398.12841186
8910.009229 0.0006044 2389.13360806
8920.009513 0.0006192 2379.88958042
8930.009797 0.0006341 2370.39776774
8940.010080 0.0006491 2360.69528793
8950.010360 0.0006641 2350.85169027
8960.010650 0.0006793 2340.42023633
8970.010930 0.0006945 2330.11206013
8980.011220 0.0007097 2319.20109972
8990.011500 0.0007251 2308.43503981
9000.011780 0.0007404 2297.44820179
9010.012070 0.0007558 2285.83853677
9020.012350 0.0007713 2274.41290746
9030.012640 0.0007868 2262.36219581
9040.012920 0.0008024 2250.51169731
9050.013200 0.0008180 2238.45596231
9060.013490 0.0008336 2225.76495666
9070.013770 0.0008493 2213.29618391
9080.014060 0.0008650 2200.19110751
9090.014340 0.0008807 2187.34050325
9100.014620 0.0008965 2174.30529864
9110.014910 0.0009123 2160.61632548
9120.015190 0.0009281 2147.21038112
9130.015470 0.0009440 2133.62023580
9140.015760 0.0009598 2119.37907426
9150.016040 0.0009757 2105.45234903
9160.016330 0.0009916 2090.86319102
9170.016610 0.0010080 2076.60576032
9180.016890 0.0010240 2062.19214565
9190.017180 0.0010390 2047.10550219
9200.017460 0.0010550 2032.38715621
9210.017740 0.0010710 2017.52560123
9220.018030 0.0010880 2001.99124318
9230.018310 0.0011040 1986.84662060
9240.018600 0.0011200 1971.03389745
9250.018880 0.0011360 1955.61395119
9260.019160 0.0011520 1940.08291563
9270.019450 0.0011680 1923.87672225
9280.019730 0.0011840 1908.10656374
9290.020020 0.0012000 1891.66297192
9300.020300 0.0012160 1875.66789021
9310.020580 0.0012320 1859.56357196
9320.020870 0.0012490 1842.79468290
9330.021150 0.0012650 1826.50064489
9340.021430 0.0012810 1810.11533702
9350.021720 0.0012970 1793.06840882
9360.022000 0.0013130 1776.51153580
9370.022280 0.0013290 1759.87201249
9380.022570 0.0013460 1742.57354412
9390.022850 0.0013620 1725.79397319
9400.023140 0.0013780 1708.35831550
9410.023420 0.0013940 1691.45256069
9420.023700 0.0014110 1674.48561783
9430.023990 0.0014270 1656.86525366
9440.024270 0.0014430 1639.79847285
9450.024550 0.0014590 1622.68887088
9460.024840 0.0014760 1604.96421100
9470.025120 0.0014920 1587.85768129
9480.025410 0.0015080 1569.99297335
9490.025690 0.0015240 1552.84580279
9500.025970 0.0015410 1535.54074115
9510.026260 0.0015570 1517.75249337
9520.026540 0.0015730 1500.40115023
9530.026820 0.0015900 1483.03632237
9540.027110 0.0016060 1465.05942429
9550.027390 0.0016220 1447.67682181
9560.027670 0.0016390 1430.46495191
9570.027960 0.0016550 1412.49232282
9580.028240 0.0016710 1395.13182318
9590.028520 0.0016880 1377.93439837
9600.028810 0.0017040 1359.99528971
9610.029090 0.0017200 1342.67274512
9620.029370 0.0017370 1325.55375609
963"""
964
965# Slit sphere parameters
966TEST_PARS_SLIT_SPHERE = {
967    'scale': 0.01, 'background': 0.01,
968    'radius': 60000, 'sld': 1, 'sld_solvent': 4,
969    }
970# Q dQ I(Q) I_smeared(Q)
971TEST_DATA_SLIT_SPHERE = """\
9722.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9732.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9742.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9753.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9763.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9773.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9784.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9795.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9805.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9816.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9826.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9837.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9847.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9858.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9868.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9879.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9881.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9891.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9901.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9911.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9921.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9931.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9941.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9951.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9961.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9971.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9982.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9992.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
10002.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
10012.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
10022.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
10032.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
10042.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
10052.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
10063.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
10073.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
10083.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
10093.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
10104.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
10114.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
10124.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
10134.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
10145.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
10155.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
10166.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
10176.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
10187.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
10197.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
10208.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
10218.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
10229.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
10239.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
10241.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
10251.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
10261.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
10271.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
10281.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
10291.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
10301.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
10311.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
10321.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
10331.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
10342.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
10352.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
10362.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
10372.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
10382.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
10392.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
1040"""
1041
1042def main():
1043    """
1044    Run tests given is sys.argv.
1045
1046    Returns 0 if success or 1 if any tests fail.
1047    """
1048    import sys
1049    import xmlrunner  # type: ignore
1050
1051    suite = unittest.TestSuite()
1052    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1053
1054    runner = xmlrunner.XMLTestRunner(output='logs')
1055    result = runner.run(suite)
1056    return 1 if result.failures or result.errors else 0
1057
1058
1059############################################################################
1060# usage demo
1061############################################################################
1062
1063def _eval_demo_1d(resolution, title):
1064    import sys
1065    from sasmodels import core
1066    from sasmodels import direct_model
1067    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1068
1069    if name == 'cylinder':
1070        pars = {'length':210, 'radius':500, 'background': 0}
1071    elif name == 'teubner_strey':
1072        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1073    elif name == 'sphere' or name == 'spherepy':
1074        pars = TEST_PARS_SLIT_SPHERE
1075    elif name == 'ellipsoid':
1076        pars = {
1077            'scale':0.05, 'background': 0,
1078            'r_polar':500, 'r_equatorial':15000,
1079            'sld':6, 'sld_solvent': 1,
1080            }
1081    else:
1082        pars = {}
1083    model_info = core.load_model_info(name)
1084    model = core.build_model(model_info)
1085
1086    kernel = model.make_kernel([resolution.q_calc])
1087    theory = direct_model.call_kernel(kernel, pars)
1088    Iq = resolution.apply(theory)
1089
1090    if isinstance(resolution, Slit1D):
1091        width, height = resolution.dqx, resolution.dqy
1092        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1093    else:
1094        dq = resolution.q_width
1095        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1096
1097    import matplotlib.pyplot as plt  # type: ignore
1098    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1099    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1100    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1101    plt.legend()
1102    plt.title(title)
1103    plt.xlabel("Q (1/Ang)")
1104    plt.ylabel("I(Q) (1/cm)")
1105
1106def demo_pinhole_1d():
1107    """
1108    Show example of pinhole smearing.
1109    """
1110    q = np.logspace(-4, np.log10(0.2), 400)
1111    q_width = 0.1*q
1112    resolution = Pinhole1D(q, q_width)
1113    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1114
1115def demo_slit_1d():
1116    """
1117    Show example of slit smearing.
1118    """
1119    q = np.logspace(-4, np.log10(0.2), 100)
1120    w = h = 0.
1121    #w = 0.000000277790
1122    w = 0.0277790
1123    #h = 0.00277790
1124    #h = 0.0277790
1125    resolution = Slit1D(q, w, h)
1126    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))
1127
1128def demo():
1129    """
1130    Run the resolution demos.
1131    """
1132    import matplotlib.pyplot as plt  # type: ignore
1133    plt.subplot(121)
1134    demo_pinhole_1d()
1135    #plt.yscale('linear')
1136    plt.subplot(122)
1137    demo_slit_1d()
1138    #plt.yscale('linear')
1139    plt.show()
1140
1141
1142if __name__ == "__main__":
1143    #demo()
1144    main()
Note: See TracBrowser for help on using the repository browser.