source: sasmodels/sasmodels/resolution.py @ 48fbd50

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 48fbd50 was 48fbd50, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

twiddle with kernel interface

  • Property mode set to 100644
File size: 38.7 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7
8from scipy.special import erf
9from numpy import sqrt, log, log10
10import numpy as np
11
12__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
13           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
14           "pinhole_extend_q", "slit_extend_q", "bin_edges",
15           "interpolate", "linear_extrapolation", "geometric_extrapolation",
16          ]
17
18MINIMUM_RESOLUTION = 1e-8
19
20
21# When extrapolating to -q, what is the minimum positive q relative to q_min
22# that we wish to calculate?
23MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
24
25class Resolution(object):
26    """
27    Abstract base class defining a 1D resolution function.
28
29    *q* is the set of q values at which the data is measured.
30
31    *q_calc* is the set of q values at which the theory needs to be evaluated.
32    This may extend and interpolate the q values.
33
34    *apply* is the method to call with I(q_calc) to compute the resolution
35    smeared theory I(q).
36    """
37    q = None
38    q_calc = None
39    def apply(self, theory):
40        """
41        Smear *theory* by the resolution function, returning *Iq*.
42        """
43        raise NotImplementedError("Subclass does not define the apply function")
44
45
46class Perfect1D(Resolution):
47    """
48    Resolution function to use when there is no actual resolution smearing
49    to be applied.  It has the same interface as the other resolution
50    functions, but returns the identity function.
51    """
52    def __init__(self, q):
53        self.q_calc = self.q = q
54
55    def apply(self, theory):
56        return theory
57
58
59class Pinhole1D(Resolution):
60    r"""
61    Pinhole aperture with q-dependent gaussian resolution.
62
63    *q* points at which the data is measured.
64
65    *q_width* gaussian 1-sigma resolution at each data point.
66
67    *q_calc* is the list of points to calculate, or None if this should
68    be estimated from the *q* and *q_width*.
69    """
70    def __init__(self, q, q_width, q_calc=None, nsigma=3):
71        #*min_step* is the minimum point spacing to use when computing the
72        #underlying model.  It should be on the order of
73        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
74        #are computed with sufficient density to avoid aliasing effects.
75
76        # Protect against calls with q_width=0.  The extend_q function will
77        # not extend the q if q_width is 0, but q_width must be non-zero when
78        # constructing the weight matrix to avoid division by zero errors.
79        # In practice this should never be needed, since resolution should
80        # default to Perfect1D if the pinhole geometry is not defined.
81        self.q, self.q_width = q, q_width
82        self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma)
83                       if q_calc is None else np.sort(q_calc))
84        self.weight_matrix = pinhole_resolution(
85            self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION))
86
87    def apply(self, theory):
88        return apply_resolution_matrix(self.weight_matrix, theory)
89
90
91class Slit1D(Resolution):
92    """
93    Slit aperture with a complicated resolution function.
94
95    *q* points at which the data is measured.
96
97    *qx_width* slit width
98
99    *qy_height* slit height
100
101    *q_calc* is the list of points to calculate, or None if this should
102    be estimated from the *q* and *q_width*.
103
104    The *weight_matrix* is computed by :func:`slit1d_resolution`
105    """
106    def __init__(self, q, width, height=0., q_calc=None):
107        # Remember what width/height was used even though we won't need them
108        # after the weight matrix is constructed
109        self.width, self.height = width, height
110
111        # Allow independent resolution on each point even though it is not
112        # needed in practice.
113        if np.isscalar(width):
114            width = np.ones(len(q))*width
115        else:
116            width = np.asarray(width)
117        if np.isscalar(height):
118            height = np.ones(len(q))*height
119        else:
120            height = np.asarray(height)
121
122        self.q = q.flatten()
123        self.q_calc = slit_extend_q(q, width, height) \
124            if q_calc is None else np.sort(q_calc)
125        self.weight_matrix = \
126            slit_resolution(self.q_calc, self.q, width, height)
127
128    def apply(self, theory):
129        return apply_resolution_matrix(self.weight_matrix, theory)
130
131
132def apply_resolution_matrix(weight_matrix, theory):
133    """
134    Apply the resolution weight matrix to the computed theory function.
135    """
136    #print("apply shapes", theory.shape, weight_matrix.shape)
137    Iq = np.dot(theory[None, :], weight_matrix)
138    #print("result shape",Iq.shape)
139    return Iq.flatten()
140
141
142def pinhole_resolution(q_calc, q, q_width):
143    """
144    Compute the convolution matrix *W* for pinhole resolution 1-D data.
145
146    Each row *W[i]* determines the normalized weight that the corresponding
147    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
148    *W*, the resolution smearing can be computed using *dot(W,q)*.
149
150    *q_calc* must be increasing.  *q_width* must be greater than zero.
151    """
152    # The current algorithm is a midpoint rectangle rule.  In the test case,
153    # neither trapezoid nor Simpson's rule improved the accuracy.
154    edges = bin_edges(q_calc)
155    edges[edges < 0.0] = 0.0 # clip edges below zero
156    G = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :])
157    weights = G[1:] - G[:-1]
158    weights /= np.sum(weights, axis=0)[None, :]
159    return weights
160
161
162def slit_resolution(q_calc, q, width, height, n_height=30):
163    r"""
164    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
165    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
166    is the number of steps to use in the integration over $q_\parallel$
167    when both $q_\perp$ and $q_\parallel$ are non-zero.
168
169    Each $q$ can have an independent width and height value even though
170    current instruments use the same slit setting for all measured points.
171
172    If slit height is large relative to width, use:
173
174    .. math::
175
176        I_s(q_i) = \frac{1}{\Delta q_\perp}
177            \int_0^{\Delta q_\perp}
178                I\left(\sqrt{q_i^2 + q_\perp^2}\right) \,dq_\perp
179
180    If slit width is large relative to height, use:
181
182    .. math::
183
184        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
185            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
186                I\left(|q_i + q_\parallel|\right) \,dq_\parallel
187
188    For a mixture of slit width and height use:
189
190    .. math::
191
192        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
193            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
194            \int_0^{\Delta q_\perp}
195                I\left(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2}\right)
196                \,dq_\perp dq_\parallel
197
198
199    Definition
200    ----------
201
202    We are using the mid-point integration rule to assign weights to each
203    element of a weight matrix $W$ so that
204
205    .. math::
206
207        I_s(q) = W\,I(q_\text{calc})
208
209    If *q_calc* is at the mid-point, we can infer the bin edges from the
210    pairwise averages of *q_calc*, adding the missing edges before
211    *q_calc[0]* and after *q_calc[-1]*.
212
213    For $q_\parallel = 0$, the smeared value can be computed numerically
214    using the $u$ substitution
215
216    .. math::
217
218        u_j = \sqrt{q_j^2 - q^2}
219
220    This gives
221
222    .. math::
223
224        I_s(q) \approx \sum_j I(u_j) \Delta u_j
225
226    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
227    difference between consecutive edges which have been first converted
228    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
229    to $q_j \in \left[q, \sqrt{q^2 + \Delta q_\perp}\right]$, so
230
231    .. math::
232
233        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
234               = \frac{1}{\Delta q_\perp} \left(
235                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2} \right)
236            \ \text{if}\  q_j \in \left[q_i, \sqrt{q_i^2 + q_\perp^2}\right]
237
238    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
239    mid-points between the calculated values in *q_calc*.  We tweak the
240    edges of the initial and final intervals so that they lie on integration
241    limits.
242
243    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
244    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
245    but it is at least in the interval, so the approximation is going to be
246    a little better than the left or right Riemann sum, and should be
247    good enough for our purposes.)
248
249    For $q_\perp = 0$, the $u$ substitution is simpler:
250
251    .. math::
252
253        u_j = \left|q_j - q\right|
254
255    so
256
257    .. math::
258
259        W_{ij} = \frac{1}{2 \Delta q_\parallel} \Delta u_j
260            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
261            \ \text{if}\ q_j \in
262                \left[q-\Delta q_\parallel, q+\Delta q_\parallel\right]
263
264    However, we need to support cases were $u_j < 0$, which means using
265    $2 (q_{j+1} - q_j)$ when $q_j \in \left[0, q_\parallel-q_i\right]$.
266    This is not an issue for $q_i > q_\parallel$.
267
268    For both $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
269    integration with
270
271    .. math::
272
273        u_{jk} = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
274            \ \text{for}\ k = -L \ldots L
275
276    for $L$ = *n_height*.  This gives
277
278    .. math::
279
280        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
281            \sum_{k=-L}^L \Delta u_{jk}
282                \left(\frac{\Delta q_\parallel}{2 L + 1}\right)
283
284
285    """
286    #np.set_printoptions(precision=6, linewidth=10000)
287
288    # The current algorithm is a midpoint rectangle rule.
289    q_edges = bin_edges(q_calc) # Note: requires q > 0
290    q_edges[q_edges < 0.0] = 0.0 # clip edges below zero
291    weights = np.zeros((len(q), len(q_calc)), 'd')
292
293    #print(q_calc)
294    for i, (qi, w, h) in enumerate(zip(q, width, height)):
295        if w == 0. and h == 0.:
296            # Perfect resolution, so return the theory value directly.
297            # Note: assumes that q is a subset of q_calc.  If qi need not be
298            # in q_calc, then we can do a weighted interpolation by looking
299            # up qi in q_calc, then weighting the result by the relative
300            # distance to the neighbouring points.
301            weights[i, :] = (q_calc == qi)
302        elif h == 0:
303            weights[i, :] = _q_perp_weights(q_edges, qi, w)
304        elif w == 0:
305            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
306            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
307            #print(qi - h, qi + h)
308            #print(in_x + abs_x)
309            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
310        else:
311            L = n_height
312            for k in range(-L, L+1):
313                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/L, w)
314            weights[i, :] /= 2*L + 1
315
316    return weights.T
317
318
319def _q_perp_weights(q_edges, qi, w):
320    # Convert bin edges from q to u
321    u_limit = np.sqrt(qi**2 + w**2)
322    u_edges = q_edges**2 - qi**2
323    u_edges[q_edges < abs(qi)] = 0.
324    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
325    weights = np.diff(np.sqrt(u_edges))/w
326    #print("i, qi",i,qi,qi+width)
327    #print(q_calc)
328    #print(weights)
329    return weights
330
331
332def pinhole_extend_q(q, q_width, nsigma=3):
333    """
334    Given *q* and *q_width*, find a set of sampling points *q_calc* so
335    that each point $I(q)$ has sufficient support from the underlying
336    function.
337    """
338    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
339    return linear_extrapolation(q, q_min, q_max)
340
341
342def slit_extend_q(q, width, height):
343    """
344    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
345    that each point I(q) has sufficient support from the underlying
346    function.
347    """
348    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
349
350    return geometric_extrapolation(q, q_min, q_max)
351
352
353def bin_edges(x):
354    """
355    Determine bin edges from bin centers, assuming that edges are centered
356    between the bins.
357
358    Note: this uses the arithmetic mean, which may not be appropriate for
359    log-scaled data.
360    """
361    if len(x) < 2 or (np.diff(x) < 0).any():
362        raise ValueError("Expected bins to be an increasing set")
363    edges = np.hstack([
364        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
365        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
366        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
367        ])
368    return edges
369
370
371def interpolate(q, max_step):
372    """
373    Returns *q_calc* with points spaced at most max_step apart.
374    """
375    step = np.diff(q)
376    index = step > max_step
377    if np.any(index):
378        inserts = []
379        for q_i, step_i in zip(q[:-1][index], step[index]):
380            n = np.ceil(step_i/max_step)
381            inserts.extend(q_i + np.arange(1, n)*(step_i/n))
382        # Extend a couple of fringes beyond the end of the data
383        inserts.extend(q[-1] + np.arange(1, 8)*max_step)
384        q_calc = np.sort(np.hstack((q, inserts)))
385    else:
386        q_calc = q
387    return q_calc
388
389
390def linear_extrapolation(q, q_min, q_max):
391    """
392    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
393    a guide.  Extrapolation below uses about the same size as the first
394    interval.  Extrapolation above uses about the same size as the final
395    interval.
396
397    if *q_min* is zero or less then *q[0]/10* is used instead.
398    """
399    q = np.sort(q)
400    if q_min < q[0]:
401        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
402        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15
403        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
404    else:
405        q_low = []
406    if q_max > q[-1]:
407        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15
408        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
409    else:
410        q_high = []
411    return np.concatenate([q_low, q, q_high])
412
413
414def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
415    r"""
416    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
417    average geometric step size in *q* as the step size.
418
419    if *q_min* is zero or less then *q[0]/10* is used instead.
420
421    *points_per_decade* sets the ratio between consecutive steps such
422    that there will be $n$ points used for every factor of 10 increase
423    in *q*.
424
425    If *points_per_decade* is not given, it will be estimated as follows.
426    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
427    in $n$ points gives a geometric average of:
428
429    .. math::
430
431         \log \Delta q = (\log q_n - log q_1) / (n - 1)
432
433    From this we can compute the number of steps required to extend $q$
434    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
435
436    .. math::
437
438         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
439
440    Substituting:
441
442    .. math::
443
444         n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
445            / (\log q_n - log q_1)
446    """
447    q = np.sort(q)
448    if points_per_decade is None:
449        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
450    else:
451        log_delta_q = log(10.) / points_per_decade
452    if q_min < q[0]:
453        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
454        n_low = log_delta_q * (log(q[0])-log(q_min))
455        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
456    else:
457        q_low = []
458    if q_max > q[-1]:
459        n_high = log_delta_q * (log(q_max)-log(q[-1]))
460        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
461    else:
462        q_high = []
463    return np.concatenate([q_low, q, q_high])
464
465
466############################################################################
467# unit tests
468############################################################################
469import unittest
470
471
472def eval_form(q, form, pars):
473    """
474    Return the SAS model evaluated at *q*.
475
476    *form* is the SAS model returned from :fun:`core.load_model`.
477
478    *pars* are the parameter values to use when evaluating.
479    """
480    from sasmodels import core
481    kernel = form.make_kernel([q])
482    theory = core.call_kernel(kernel, pars)
483    kernel.release()
484    return theory
485
486
487def gaussian(q, q0, dq):
488    """
489    Return the Gaussian resolution function.
490
491    *q0* is the center, *dq* is the width and *q* are the points to evaluate.
492    """
493    from numpy import exp, pi
494    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
495
496
497def romberg_slit_1d(q, width, height, form, pars):
498    """
499    Romberg integration for slit resolution.
500
501    This is an adaptive integration technique.  It is called with settings
502    that make it slow to evaluate but give it good accuracy.
503    """
504    from scipy.integrate import romberg
505
506    par_set = set([p.name for p in form.info['parameters']])
507    if any(k not in par_set for k in pars.keys()):
508        extra = set(pars.keys()) - par_set
509        raise ValueError("bad parameters: [%s] not in [%s]"%
510                         (", ".join(sorted(extra)), ", ".join(sorted(keys))))
511
512    if np.isscalar(width):
513        width = [width]*len(q)
514    if np.isscalar(height):
515        height = [height]*len(q)
516    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
517    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
518    # If both width and height are defined, then it is too slow to use dblquad.
519    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
520    # the extended Q range.
521    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
522    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
523    Iq = eval_form(q_calc, form, pars)
524    result = np.empty(len(q))
525    for i, (qi, w, h) in enumerate(zip(q, width, height)):
526        if h == 0.:
527            r = romberg(_int_w, 0, w, args=(qi,),
528                        divmax=100, vec_func=True, tol=0, rtol=1e-8)
529            result[i] = r/w
530        elif w == 0.:
531            r = romberg(_int_h, -h, h, args=(qi,),
532                        divmax=100, vec_func=True, tol=0, rtol=1e-8)
533            result[i] = r/(2*h)
534        else:
535            w_grid = np.linspace(0, w, 21)[None, :]
536            h_grid = np.linspace(-h, h, 23)[:, None]
537            u = sqrt((qi+h_grid)**2 + w_grid**2)
538            Iu = np.interp(u, q_calc, Iq)
539            #print(np.trapz(Iu, w_grid, axis=1))
540            Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:, 0])
541            result[i] = Is / (2*h*w)
542            # from scipy.integrate import dblquad
543            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
544            #                  args=(qi,))
545            # result[i] = r/(w*2*h)
546
547    # r should be [float, ...], but it is [array([float]), array([float]),...]
548    return result
549
550
551def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
552    """
553    Romberg integration for pinhole resolution.
554
555    This is an adaptive integration technique.  It is called with settings
556    that make it slow to evaluate but give it good accuracy.
557    """
558    from scipy.integrate import romberg
559
560    par_set = set([p.name for p in form.info['parameters']])
561    if any(k not in par_set for k in pars.keys()):
562        extra = set(pars.keys()) - par_set
563        raise ValueError("bad parameters: [%s] not in [%s]"%
564                         (", ".join(sorted(extra)),
565                          ", ".join(sorted(pars.keys()))))
566
567    _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
568    r = [romberg(_fn, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi,
569                 args=(qi, dqi), divmax=100, vec_func=True, tol=0, rtol=1e-8)
570         for qi, dqi in zip(q, q_width)]
571    return np.asarray(r).flatten()
572
573
574class ResolutionTest(unittest.TestCase):
575    """
576    Test the resolution calculations.
577    """
578
579    def setUp(self):
580        self.x = 0.001*np.arange(1, 11)
581        self.y = self.Iq(self.x)
582
583    def Iq(self, q):
584        "Linear function for resolution unit test"
585        return 12.0 - 1000.0*q
586
587    def test_perfect(self):
588        """
589        Perfect resolution and no smearing.
590        """
591        resolution = Perfect1D(self.x)
592        theory = self.Iq(resolution.q_calc)
593        output = resolution.apply(theory)
594        np.testing.assert_equal(output, self.y)
595
596    def test_slit_zero(self):
597        """
598        Slit smearing with perfect resolution.
599        """
600        resolution = Slit1D(self.x, width=0, height=0, q_calc=self.x)
601        theory = self.Iq(resolution.q_calc)
602        output = resolution.apply(theory)
603        np.testing.assert_equal(output, self.y)
604
605    @unittest.skip("not yet supported")
606    def test_slit_high(self):
607        """
608        Slit smearing with height 0.005
609        """
610        resolution = Slit1D(self.x, width=0, height=0.005, q_calc=self.x)
611        theory = self.Iq(resolution.q_calc)
612        output = resolution.apply(theory)
613        answer = [
614            9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
615            5.5555, 4.5584, 3.5606, 2.5623, 2.0000,
616            ]
617        np.testing.assert_allclose(output, answer, atol=1e-4)
618
619    @unittest.skip("not yet supported")
620    def test_slit_both_high(self):
621        """
622        Slit smearing with width < 100*height.
623        """
624        q = np.logspace(-4, -1, 10)
625        resolution = Slit1D(q, width=0.2, height=np.inf)
626        theory = 1000*self.Iq(resolution.q_calc**4)
627        output = resolution.apply(theory)
628        answer = [
629            8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
630            5.40363, 4.40655, 3.40880, 2.41058, 2.00000,
631            ]
632        np.testing.assert_allclose(output, answer, atol=1e-4)
633
634    @unittest.skip("not yet supported")
635    def test_slit_wide(self):
636        """
637        Slit smearing with width 0.0002
638        """
639        resolution = Slit1D(self.x, width=0.0002, height=0, q_calc=self.x)
640        theory = self.Iq(resolution.q_calc)
641        output = resolution.apply(theory)
642        answer = [
643            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
644            ]
645        np.testing.assert_allclose(output, answer, atol=1e-4)
646
647    @unittest.skip("not yet supported")
648    def test_slit_both_wide(self):
649        """
650        Slit smearing with width > 100*height.
651        """
652        resolution = Slit1D(self.x, width=0.0002, height=0.000001,
653                            q_calc=self.x)
654        theory = self.Iq(resolution.q_calc)
655        output = resolution.apply(theory)
656        answer = [
657            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0,
658            ]
659        np.testing.assert_allclose(output, answer, atol=1e-4)
660
661    def test_pinhole_zero(self):
662        """
663        Pinhole smearing with perfect resolution
664        """
665        resolution = Pinhole1D(self.x, 0.0*self.x)
666        theory = self.Iq(resolution.q_calc)
667        output = resolution.apply(theory)
668        np.testing.assert_equal(output, self.y)
669
670    def test_pinhole(self):
671        """
672        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
673        """
674        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
675                               q_calc=self.x)
676        theory = 12.0-1000.0*resolution.q_calc
677        output = resolution.apply(theory)
678        answer = [
679            10.44785079, 9.84991299, 8.98101708,
680            7.99906585, 6.99998311, 6.00001689,
681            5.00093415, 4.01898292, 3.15008701, 2.55214921,
682            ]
683        np.testing.assert_allclose(output, answer, atol=1e-8)
684
685
686class IgorComparisonTest(unittest.TestCase):
687    """
688    Test resolution calculations against those returned by Igor.
689    """
690
691    def setUp(self):
692        self.pars = TEST_PARS_PINHOLE_SPHERE
693        from sasmodels import core
694        from sasmodels.models import sphere
695        self.model = core.load_model(sphere, dtype='double')
696
697    def _eval_sphere(self, pars, resolution):
698        from sasmodels import core
699        kernel = self.model.make_kernel([resolution.q_calc])
700        theory = core.call_kernel(kernel, pars)
701        result = resolution.apply(theory)
702        kernel.release()
703        return result
704
705    def _compare(self, q, output, answer, tolerance):
706        #err = (output - answer)/answer
707        #idx = abs(err) >= tolerance
708        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
709        #print("\n".join(str(v) for v in problem))
710        np.testing.assert_allclose(output, answer, rtol=tolerance)
711
712    def test_perfect(self):
713        """
714        Compare sphere model with NIST Igor SANS, no resolution smearing.
715        """
716        pars = TEST_PARS_SLIT_SPHERE
717        data_string = TEST_DATA_SLIT_SPHERE
718
719        data = np.loadtxt(data_string.split('\n')).T
720        q, width, answer, _ = data
721        resolution = Perfect1D(q)
722        output = self._eval_sphere(pars, resolution)
723        self._compare(q, output, answer, 1e-6)
724
725    def test_pinhole(self):
726        """
727        Compare pinhole resolution smearing with NIST Igor SANS
728        """
729        pars = TEST_PARS_PINHOLE_SPHERE
730        data_string = TEST_DATA_PINHOLE_SPHERE
731
732        data = np.loadtxt(data_string.split('\n')).T
733        q, q_width, answer = data
734        resolution = Pinhole1D(q, q_width)
735        output = self._eval_sphere(pars, resolution)
736        # TODO: relative error should be lower
737        self._compare(q, output, answer, 3e-4)
738
739    def test_pinhole_romberg(self):
740        """
741        Compare pinhole resolution smearing with romberg integration result.
742        """
743        pars = TEST_PARS_PINHOLE_SPHERE
744        data_string = TEST_DATA_PINHOLE_SPHERE
745        pars['radius'] *= 5
746        radius = pars['radius']
747
748        data = np.loadtxt(data_string.split('\n')).T
749        q, q_width, answer = data
750        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
751        ## Getting 0.1% requires 5 sigma and 200 points per fringe
752        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
753        #                     2*np.pi/radius/200)
754        #tol = 0.001
755        ## The default 3 sigma and no extra points gets 1%
756        q_calc, tol = None, 0.01
757        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
758        output = self._eval_sphere(pars, resolution)
759        if 0: # debug plot
760            import matplotlib.pyplot as plt
761            resolution = Perfect1D(q)
762            source = self._eval_sphere(pars, resolution)
763            plt.loglog(q, source, '.')
764            plt.loglog(q, answer, '-', hold=True)
765            plt.loglog(q, output, '-', hold=True)
766            plt.show()
767        self._compare(q, output, answer, tol)
768
769    def test_slit(self):
770        """
771        Compare slit resolution smearing with NIST Igor SANS
772        """
773        pars = TEST_PARS_SLIT_SPHERE
774        data_string = TEST_DATA_SLIT_SPHERE
775
776        data = np.loadtxt(data_string.split('\n')).T
777        q, delta_qv, _, answer = data
778        resolution = Slit1D(q, width=delta_qv, height=0)
779        output = self._eval_sphere(pars, resolution)
780        # TODO: eliminate Igor test since it is too inaccurate to be useful.
781        # This means we can eliminate the test data as well, and instead
782        # use a generated q vector.
783        self._compare(q, output, answer, 0.5)
784
785    def test_slit_romberg(self):
786        """
787        Compare slit resolution smearing with romberg integration result.
788        """
789        pars = TEST_PARS_SLIT_SPHERE
790        data_string = TEST_DATA_SLIT_SPHERE
791        radius = pars['radius']
792
793        data = np.loadtxt(data_string.split('\n')).T
794        q, delta_qv, _, answer = data
795        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
796        q_calc = slit_extend_q(interpolate(q, 2*np.pi/radius/20),
797                               delta_qv[0], 0.)
798        resolution = Slit1D(q, width=delta_qv, height=0, q_calc=q_calc)
799        output = self._eval_sphere(pars, resolution)
800        # TODO: relative error should be lower
801        self._compare(q, output, answer, 0.025)
802
803    def test_ellipsoid(self):
804        """
805        Compare romberg integration for ellipsoid model.
806        """
807        from .core import load_model
808        pars = {
809            'scale':0.05,
810            'rpolar':500, 'requatorial':15000,
811            'sld':6, 'solvent_sld': 1,
812            }
813        form = load_model('ellipsoid', dtype='double')
814        q = np.logspace(log10(4e-5), log10(2.5e-2), 68)
815        width, height = 0.117, 0.
816        resolution = Slit1D(q, width=width, height=height)
817        answer = romberg_slit_1d(q, width, height, form, pars)
818        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
819        # TODO: 10% is too much error; use better algorithm
820        #print(np.max(abs(answer-output)/answer))
821        self._compare(q, output, answer, 0.1)
822
823    #TODO: can sas q spacing be too sparse for the resolution calculation?
824    @unittest.skip("suppress sparse data test; not supported by current code")
825    def test_pinhole_sparse(self):
826        """
827        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
828        """
829        pars = TEST_PARS_PINHOLE_SPHERE
830        data_string = TEST_DATA_PINHOLE_SPHERE
831
832        data = np.loadtxt(data_string.split('\n')).T
833        q, q_width, answer = data[:, ::20] # Take every nth point
834        resolution = Pinhole1D(q, q_width)
835        output = self._eval_sphere(pars, resolution)
836        self._compare(q, output, answer, 1e-6)
837
838
839# pinhole sphere parameters
840TEST_PARS_PINHOLE_SPHERE = {
841    'scale': 1.0, 'background': 0.01,
842    'radius': 60.0, 'sld': 1, 'solvent_sld': 6.3,
843    }
844# Q, dQ, I(Q) calculated by NIST Igor SANS package
845TEST_DATA_PINHOLE_SPHERE = """\
8460.001278 0.0002847 2538.41176383
8470.001562 0.0002905 2536.91820405
8480.001846 0.0002956 2535.13182479
8490.002130 0.0003017 2533.06217813
8500.002414 0.0003087 2530.70378586
8510.002698 0.0003165 2528.05024192
8520.002982 0.0003249 2525.10408349
8530.003266 0.0003340 2521.86667499
8540.003550 0.0003437 2518.33907750
8550.003834 0.0003539 2514.52246995
8560.004118 0.0003646 2510.41798319
8570.004402 0.0003757 2506.02690988
8580.004686 0.0003872 2501.35067884
8590.004970 0.0003990 2496.38678318
8600.005253 0.0004112 2491.16237596
8610.005537 0.0004237 2485.63911673
8620.005821 0.0004365 2479.83657083
8630.006105 0.0004495 2473.75676948
8640.006389 0.0004628 2467.40145990
8650.006673 0.0004762 2460.77293372
8660.006957 0.0004899 2453.86724627
8670.007241 0.0005037 2446.69623838
8680.007525 0.0005177 2439.25775219
8690.007809 0.0005318 2431.55421398
8700.008093 0.0005461 2423.58785521
8710.008377 0.0005605 2415.36158137
8720.008661 0.0005750 2406.87009473
8730.008945 0.0005896 2398.12841186
8740.009229 0.0006044 2389.13360806
8750.009513 0.0006192 2379.88958042
8760.009797 0.0006341 2370.39776774
8770.010080 0.0006491 2360.69528793
8780.010360 0.0006641 2350.85169027
8790.010650 0.0006793 2340.42023633
8800.010930 0.0006945 2330.11206013
8810.011220 0.0007097 2319.20109972
8820.011500 0.0007251 2308.43503981
8830.011780 0.0007404 2297.44820179
8840.012070 0.0007558 2285.83853677
8850.012350 0.0007713 2274.41290746
8860.012640 0.0007868 2262.36219581
8870.012920 0.0008024 2250.51169731
8880.013200 0.0008180 2238.45596231
8890.013490 0.0008336 2225.76495666
8900.013770 0.0008493 2213.29618391
8910.014060 0.0008650 2200.19110751
8920.014340 0.0008807 2187.34050325
8930.014620 0.0008965 2174.30529864
8940.014910 0.0009123 2160.61632548
8950.015190 0.0009281 2147.21038112
8960.015470 0.0009440 2133.62023580
8970.015760 0.0009598 2119.37907426
8980.016040 0.0009757 2105.45234903
8990.016330 0.0009916 2090.86319102
9000.016610 0.0010080 2076.60576032
9010.016890 0.0010240 2062.19214565
9020.017180 0.0010390 2047.10550219
9030.017460 0.0010550 2032.38715621
9040.017740 0.0010710 2017.52560123
9050.018030 0.0010880 2001.99124318
9060.018310 0.0011040 1986.84662060
9070.018600 0.0011200 1971.03389745
9080.018880 0.0011360 1955.61395119
9090.019160 0.0011520 1940.08291563
9100.019450 0.0011680 1923.87672225
9110.019730 0.0011840 1908.10656374
9120.020020 0.0012000 1891.66297192
9130.020300 0.0012160 1875.66789021
9140.020580 0.0012320 1859.56357196
9150.020870 0.0012490 1842.79468290
9160.021150 0.0012650 1826.50064489
9170.021430 0.0012810 1810.11533702
9180.021720 0.0012970 1793.06840882
9190.022000 0.0013130 1776.51153580
9200.022280 0.0013290 1759.87201249
9210.022570 0.0013460 1742.57354412
9220.022850 0.0013620 1725.79397319
9230.023140 0.0013780 1708.35831550
9240.023420 0.0013940 1691.45256069
9250.023700 0.0014110 1674.48561783
9260.023990 0.0014270 1656.86525366
9270.024270 0.0014430 1639.79847285
9280.024550 0.0014590 1622.68887088
9290.024840 0.0014760 1604.96421100
9300.025120 0.0014920 1587.85768129
9310.025410 0.0015080 1569.99297335
9320.025690 0.0015240 1552.84580279
9330.025970 0.0015410 1535.54074115
9340.026260 0.0015570 1517.75249337
9350.026540 0.0015730 1500.40115023
9360.026820 0.0015900 1483.03632237
9370.027110 0.0016060 1465.05942429
9380.027390 0.0016220 1447.67682181
9390.027670 0.0016390 1430.46495191
9400.027960 0.0016550 1412.49232282
9410.028240 0.0016710 1395.13182318
9420.028520 0.0016880 1377.93439837
9430.028810 0.0017040 1359.99528971
9440.029090 0.0017200 1342.67274512
9450.029370 0.0017370 1325.55375609
946"""
947
948# Slit sphere parameters
949TEST_PARS_SLIT_SPHERE = {
950    'scale': 0.01, 'background': 0.01,
951    'radius': 60000, 'sld': 1, 'solvent_sld': 4,
952    }
953# Q dQ I(Q) I_smeared(Q)
954TEST_DATA_SLIT_SPHERE = """\
9552.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9562.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9572.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9583.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9593.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9603.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9614.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9625.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9635.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9646.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9656.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9667.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9677.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9688.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9698.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9709.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9711.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9721.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9731.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9741.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9751.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9761.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9771.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9781.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9791.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9801.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9812.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9822.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
9832.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
9842.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
9852.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
9862.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
9872.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
9882.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
9893.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
9903.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
9913.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
9923.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
9934.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
9944.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
9954.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
9964.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
9975.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
9985.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
9996.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
10006.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
10017.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
10027.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
10038.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
10048.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
10059.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
10069.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
10071.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
10081.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
10091.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
10101.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
10111.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
10121.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
10131.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
10141.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
10151.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
10161.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
10172.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
10182.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
10192.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
10202.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
10212.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
10222.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
1023"""
1024
1025def main():
1026    """
1027    Run tests given is sys.argv.
1028
1029    Returns 0 if success or 1 if any tests fail.
1030    """
1031    import sys
1032    import xmlrunner
1033
1034    suite = unittest.TestSuite()
1035    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1036
1037    runner = xmlrunner.XMLTestRunner(output='logs')
1038    result = runner.run(suite)
1039    return 1 if result.failures or result.errors else 0
1040
1041
1042############################################################################
1043# usage demo
1044############################################################################
1045
1046def _eval_demo_1d(resolution, title):
1047    import sys
1048    from sasmodels import core
1049    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1050
1051    if name == 'cylinder':
1052        pars = {'length':210, 'radius':500}
1053    elif name == 'teubner_strey':
1054        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1055    elif name == 'sphere' or name == 'spherepy':
1056        pars = TEST_PARS_SLIT_SPHERE
1057    elif name == 'ellipsoid':
1058        pars = {
1059            'scale':0.05,
1060            'rpolar':500, 'requatorial':15000,
1061            'sld':6, 'solvent_sld': 1,
1062            }
1063    else:
1064        pars = {}
1065    model_info = core.load_model_info(name)
1066    model = core.build_model(model_info)
1067
1068    kernel = model.make_kernel([resolution.q_calc])
1069    theory = core.call_kernel(kernel, pars)
1070    Iq = resolution.apply(theory)
1071
1072    if isinstance(resolution, Slit1D):
1073        width, height = resolution.width, resolution.height
1074        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1075    else:
1076        dq = resolution.q_width
1077        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1078
1079    import matplotlib.pyplot as plt
1080    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1081    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1082    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1083    plt.legend()
1084    plt.title(title)
1085    plt.xlabel("Q (1/Ang)")
1086    plt.ylabel("I(Q) (1/cm)")
1087
1088def demo_pinhole_1d():
1089    """
1090    Show example of pinhole smearing.
1091    """
1092    q = np.logspace(-4, np.log10(0.2), 400)
1093    q_width = 0.1*q
1094    resolution = Pinhole1D(q, q_width)
1095    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1096
1097def demo_slit_1d():
1098    """
1099    Show example of slit smearing.
1100    """
1101    q = np.logspace(-4, np.log10(0.2), 100)
1102    w = h = 0.
1103    #w = 0.000000277790
1104    w = 0.0277790
1105    #h = 0.00277790
1106    #h = 0.0277790
1107    resolution = Slit1D(q, w, h)
1108    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h))
1109
1110def demo():
1111    """
1112    Run the resolution demos.
1113    """
1114    import matplotlib.pyplot as plt
1115    plt.subplot(121)
1116    demo_pinhole_1d()
1117    #plt.yscale('linear')
1118    plt.subplot(122)
1119    demo_slit_1d()
1120    #plt.yscale('linear')
1121    plt.show()
1122
1123
1124if __name__ == "__main__":
1125    #demo()
1126    main()
Note: See TracBrowser for help on using the repository browser.