source: sasmodels/sasmodels/resolution.py @ 5d316e9

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 5d316e9 was 9404dd3, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

python 3.x support

  • Property mode set to 100644
File size: 37.7 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7
8__all__ = ["Resolution", "Perfect1D", "Pinhole1D", "Slit1D",
9           "apply_resolution_matrix", "pinhole_resolution", "slit_resolution",
10           "pinhole_extend_q", "slit_extend_q", "bin_edges",
11           "interpolate", "linear_extrapolation", "geometric_extrapolation",
12           ]
13
14from scipy.special import erf
15from numpy import sqrt, log, log10
16import numpy as np
17
18MINIMUM_RESOLUTION = 1e-8
19
20
21# When extrapolating to -q, what is the minimum positive q relative to q_min
22# that we wish to calculate?
23MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
24
25class Resolution(object):
26    """
27    Abstract base class defining a 1D resolution function.
28
29    *q* is the set of q values at which the data is measured.
30
31    *q_calc* is the set of q values at which the theory needs to be evaluated.
32    This may extend and interpolate the q values.
33
34    *apply* is the method to call with I(q_calc) to compute the resolution
35    smeared theory I(q).
36    """
37    q = None
38    q_calc = None
39    def apply(self, theory):
40        """
41        Smear *theory* by the resolution function, returning *Iq*.
42        """
43        raise NotImplementedError("Subclass does not define the apply function")
44
45
46class Perfect1D(Resolution):
47    """
48    Resolution function to use when there is no actual resolution smearing
49    to be applied.  It has the same interface as the other resolution
50    functions, but returns the identity function.
51    """
52    def __init__(self, q):
53        self.q_calc = self.q = q
54
55    def apply(self, theory):
56        return theory
57
58
59class Pinhole1D(Resolution):
60    r"""
61    Pinhole aperture with q-dependent gaussian resolution.
62
63    *q* points at which the data is measured.
64
65    *q_width* gaussian 1-sigma resolution at each data point.
66
67    *q_calc* is the list of points to calculate, or None if this should
68    be estimated from the *q* and *q_width*.
69    """
70    def __init__(self, q, q_width, q_calc=None, nsigma=3):
71        #*min_step* is the minimum point spacing to use when computing the
72        #underlying model.  It should be on the order of
73        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
74        #are computed with sufficient density to avoid aliasing effects.
75
76        # Protect against calls with q_width=0.  The extend_q function will
77        # not extend the q if q_width is 0, but q_width must be non-zero when
78        # constructing the weight matrix to avoid division by zero errors.
79        # In practice this should never be needed, since resolution should
80        # default to Perfect1D if the pinhole geometry is not defined.
81        self.q, self.q_width = q, q_width
82        self.q_calc = pinhole_extend_q(q, q_width, nsigma=nsigma) \
83            if q_calc is None else np.sort(q_calc)
84        self.weight_matrix = pinhole_resolution(self.q_calc,
85                self.q, np.maximum(q_width, MINIMUM_RESOLUTION))
86
87    def apply(self, theory):
88        return apply_resolution_matrix(self.weight_matrix, theory)
89
90
91class Slit1D(Resolution):
92    """
93    Slit aperture with a complicated resolution function.
94
95    *q* points at which the data is measured.
96
97    *qx_width* slit width
98
99    *qy_height* slit height
100
101    *q_calc* is the list of points to calculate, or None if this should
102    be estimated from the *q* and *q_width*.
103
104    The *weight_matrix* is computed by :func:`slit1d_resolution`
105    """
106    def __init__(self, q, width, height=0., q_calc=None):
107        # Remember what width/height was used even though we won't need them
108        # after the weight matrix is constructed
109        self.width, self.height = width, height
110
111        # Allow independent resolution on each point even though it is not
112        # needed in practice.
113        if np.isscalar(width):
114            width = np.ones(len(q))*width
115        else:
116            width = np.asarray(width)
117        if np.isscalar(height):
118            height = np.ones(len(q))*height
119        else:
120            height = np.asarray(height)
121
122        self.q = q.flatten()
123        self.q_calc = slit_extend_q(q, width, height) \
124            if q_calc is None else np.sort(q_calc)
125        self.weight_matrix = \
126            slit_resolution(self.q_calc, self.q, width, height)
127
128    def apply(self, theory):
129        return apply_resolution_matrix(self.weight_matrix, theory)
130
131
132def apply_resolution_matrix(weight_matrix, theory):
133    """
134    Apply the resolution weight matrix to the computed theory function.
135    """
136    #print("apply shapes", theory.shape, weight_matrix.shape)
137    Iq = np.dot(theory[None,:], weight_matrix)
138    #print("result shape",Iq.shape)
139    return Iq.flatten()
140
141
142def pinhole_resolution(q_calc, q, q_width):
143    """
144    Compute the convolution matrix *W* for pinhole resolution 1-D data.
145
146    Each row *W[i]* determines the normalized weight that the corresponding
147    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
148    *W*, the resolution smearing can be computed using *dot(W,q)*.
149
150    *q_calc* must be increasing.  *q_width* must be greater than zero.
151    """
152    # The current algorithm is a midpoint rectangle rule.  In the test case,
153    # neither trapezoid nor Simpson's rule improved the accuracy.
154    edges = bin_edges(q_calc)
155    edges[edges<0.0] = 0.0 # clip edges below zero
156    G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] )
157    weights = G[1:] - G[:-1]
158    weights /= np.sum(weights, axis=0)[None,:]
159    return weights
160
161
162def slit_resolution(q_calc, q, width, height, n_height=30):
163    r"""
164    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
165    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
166    is the number of steps to use in the integration over $q_\parallel$
167    when both $q_\perp$ and $q_\parallel$ are non-zero.
168
169    Each $q$ can have an independent width and height value even though
170    current instruments use the same slit setting for all measured points.
171
172    If slit height is large relative to width, use:
173
174    .. math::
175
176        I_s(q_i) = \frac{1}{\Delta q_\perp}
177            \int_0^{\Delta q_\perp} I(\sqrt{q_i^2 + q_\perp^2} dq_\perp
178
179    If slit width is large relative to height, use:
180
181    .. math::
182
183        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
184            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
185                I(|q_i + q_\parallel|) dq_\parallel
186
187    For a mixture of slit width and height use:
188
189    .. math::
190
191        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
192            \int_{-\Delta q_\parallel)^{\Delta q_parallel}
193            \int_0^[\Delta q_\perp}
194                I(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2})
195                dq_\perp dq_\parallel
196
197
198    **Algorithm**
199
200    We are using the mid-point integration rule to assign weights to each
201    element of a weight matrix $W$ so that
202
203    .. math::
204
205        I_s(q) = W I(q_\text{calc})
206
207    If *q_calc* is at the mid-point, we can infer the bin edges from the
208    pairwise averages of *q_calc*, adding the missing edges before
209    *q_calc[0]* and after *q_calc[-1]*.
210
211    For $q_\parallel = 0$, the smeared value can be computed numerically
212    using the $u$ substitution
213
214    .. math::
215
216        u_j = \sqrt{q_j^2 - q^2}
217
218    This gives
219
220    .. math::
221
222        I_s(q) \approx \sum_j I(u_j) \Delta u_j
223
224    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
225    difference between consecutive edges which have been first converted
226    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
227    to $q_j \in [q, \sqrt{q^2 + \Delta q_\perp}]$, so
228
229    .. math::
230
231        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
232               = \frac{1}{\Delta q_\perp}
233                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2}
234            \text{if} q_j \in [q_i, \sqrt{q_i^2 + q_\perp^2}]
235
236    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
237    mid-points between the calculated values in *q_calc*.  We tweak the
238    edges of the initial and final intervals so that they lie on integration
239    limits.
240
241    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
242    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
243    but it is at least in the interval, so the approximation is going to be
244    a little better than the left or right Riemann sum, and should be
245    good enough for our purposes.)
246
247    For $q_\perp = 0$, the $u$ substitution is simpler:
248
249    .. math::
250
251        u_j = |q_j - q|
252
253    so
254
255    .. math::
256
257        W_ij = \frac{1}{2 \Delta q_\parallel} \Delta u_j
258            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
259            \text{if} q_j \in [q-\Delta q_\parallel, q+\Delta q_\parallel]
260
261    However, we need to support cases were $u_j < 0$, which means using
262    $2 (q_{j+1} - q_j)$ when $q_j \in [0, q_\parallel-q_i]$.  This is not
263    an issue for $q_i > q_\parallel$.
264
265    For bot $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
266    integration with
267
268    .. math::
269
270        u_jk = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
271            \text{for} k = -L \ldots L
272
273    for $L$ = *n_height*.  This gives
274
275    .. math::
276
277        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
278            \sum_{k=-L}^L \Delta u_jk (\frac{\Delta q_\parallel}{2 L + 1}
279
280
281    """
282    #np.set_printoptions(precision=6, linewidth=10000)
283
284    # The current algorithm is a midpoint rectangle rule.
285    q_edges = bin_edges(q_calc) # Note: requires q > 0
286    q_edges[q_edges<0.0] = 0.0 # clip edges below zero
287    weights = np.zeros((len(q), len(q_calc)), 'd')
288
289    #print(q_calc)
290    for i, (qi, w, h) in enumerate(zip(q, width, height)):
291        if w == 0. and h == 0.:
292            # Perfect resolution, so return the theory value directly.
293            # Note: assumes that q is a subset of q_calc.  If qi need not be
294            # in q_calc, then we can do a weighted interpolation by looking
295            # up qi in q_calc, then weighting the result by the relative
296            # distance to the neighbouring points.
297            weights[i, :] = (q_calc == qi)
298        elif h == 0:
299            weights[i, :] = _q_perp_weights(q_edges, qi, w)
300        elif w == 0:
301            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
302            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
303            #print(qi - h, qi + h)
304            #print(in_x + abs_x)
305            weights[i,:] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
306        else:
307            L = n_height
308            for k in range(-L, L+1):
309                weights[i,:] += _q_perp_weights(q_edges, qi+k*h/L, w)
310            weights[i,:] /= 2*L + 1
311
312    return weights.T
313
314
315def _q_perp_weights(q_edges, qi, w):
316    # Convert bin edges from q to u
317    u_limit = np.sqrt(qi**2 + w**2)
318    u_edges = q_edges**2 - qi**2
319    u_edges[q_edges < abs(qi)] = 0.
320    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
321    weights = np.diff(np.sqrt(u_edges))/w
322    #print("i, qi",i,qi,qi+width)
323    #print(q_calc)
324    #print(weights)
325    return weights
326
327
328def pinhole_extend_q(q, q_width, nsigma=3):
329    """
330    Given *q* and *q_width*, find a set of sampling points *q_calc* so
331    that each point I(q) has sufficient support from the underlying
332    function.
333    """
334    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
335    return linear_extrapolation(q, q_min, q_max)
336
337
338def slit_extend_q(q, width, height):
339    """
340    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
341    that each point I(q) has sufficient support from the underlying
342    function.
343    """
344    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
345
346    return geometric_extrapolation(q, q_min, q_max)
347
348
349def bin_edges(x):
350    """
351    Determine bin edges from bin centers, assuming that edges are centered
352    between the bins.
353
354    Note: this uses the arithmetic mean, which may not be appropriate for
355    log-scaled data.
356    """
357    if len(x) < 2 or (np.diff(x)<0).any():
358        raise ValueError("Expected bins to be an increasing set")
359    edges = np.hstack([
360        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
361        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
362        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
363        ])
364    return edges
365
366
367def interpolate(q, max_step):
368    """
369    Returns *q_calc* with points spaced at most max_step apart.
370    """
371    step = np.diff(q)
372    index = step>max_step
373    if np.any(index):
374        inserts = []
375        for q_i,step_i in zip(q[:-1][index],step[index]):
376            n = np.ceil(step_i/max_step)
377            inserts.extend(q_i + np.arange(1,n)*(step_i/n))
378        # Extend a couple of fringes beyond the end of the data
379        inserts.extend(q[-1] + np.arange(1,8)*max_step)
380        q_calc = np.sort(np.hstack((q,inserts)))
381    else:
382        q_calc = q
383    return q_calc
384
385
386def linear_extrapolation(q, q_min, q_max):
387    """
388    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
389    a guide.  Extrapolation below uses about the same size as the first
390    interval.  Extrapolation above uses about the same size as the final
391    interval.
392
393    if *q_min* is zero or less then *q[0]/10* is used instead.
394    """
395    q = np.sort(q)
396    if q_min < q[0]:
397        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
398        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1]>q[0] else 15
399        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
400    else:
401        q_low = []
402    if q_max > q[-1]:
403        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1]>q[-2] else 15
404        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
405    else:
406        q_high = []
407    return np.concatenate([q_low, q, q_high])
408
409
410def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
411    r"""
412    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
413    average geometric step size in *q* as the step size.
414
415    if *q_min* is zero or less then *q[0]/10* is used instead.
416
417    *points_per_decade* sets the ratio between consecutive steps such
418    that there will be $n$ points used for every factor of 10 increase
419    in *q*.
420
421    If *points_per_decade* is not given, it will be estimated as follows.
422    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
423    in $n$ points gives a geometric average of:
424
425    .. math::
426
427         \log \Delta q = (\log q_n - log q_1) / (n - 1)
428
429    From this we can compute the number of steps required to extend $q$
430    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
431
432    .. math::
433
434         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
435
436    Substituting:
437
438    .. math::
439
440        n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
441            / (\log q_n - log q_1)
442    """
443    q = np.sort(q)
444    if points_per_decade is None:
445        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
446    else:
447        log_delta_q = log(10.) / points_per_decade
448    if q_min < q[0]:
449        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
450        n_low = log_delta_q * (log(q[0])-log(q_min))
451        q_low  = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
452    else:
453        q_low = []
454    if q_max > q[-1]:
455        n_high = log_delta_q * (log(q_max)-log(q[-1]))
456        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
457    else:
458        q_high = []
459    return np.concatenate([q_low, q, q_high])
460
461
462############################################################################
463# unit tests
464############################################################################
465import unittest
466
467
468def eval_form(q, form, pars):
469    from sasmodels import core
470    kernel = core.make_kernel(form, [q])
471    theory = core.call_kernel(kernel, pars)
472    kernel.release()
473    return theory
474
475
476def gaussian(q, q0, dq):
477    from numpy import exp, pi
478    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
479
480
481def romberg_slit_1d(q, width, height, form, pars):
482    """
483    Romberg integration for slit resolution.
484
485    This is an adaptive integration technique.  It is called with settings
486    that make it slow to evaluate but give it good accuracy.
487    """
488    from scipy.integrate import romberg, dblquad
489
490    if any(k not in form.info['defaults'] for k in pars.keys()):
491        keys = set(form.info['defaults'].keys())
492        extra = set(pars.keys()) - keys
493        raise ValueError("bad parameters: [%s] not in [%s]"%
494                         (", ".join(sorted(extra)), ", ".join(sorted(keys))))
495
496    if np.isscalar(width):
497        width = [width]*len(q)
498    if np.isscalar(height):
499        height = [height]*len(q)
500    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
501    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
502    # If both width and height are defined, then it is too slow to use dblquad.
503    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
504    # the extended Q range.
505    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
506    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
507    Iq = eval_form(q_calc, form, pars)
508    result = np.empty(len(q))
509    for i, (qi, w, h) in enumerate(zip(q, width, height)):
510        if h == 0.:
511            r = romberg(_int_w, 0, w, args=(qi,),
512                        divmax=100, vec_func=True, tol=0, rtol=1e-8)
513            result[i] = r/w
514        elif w == 0.:
515            r = romberg(_int_h, -h, h, args=(qi,),
516                        divmax=100, vec_func=True, tol=0, rtol=1e-8)
517            result[i] = r/(2*h)
518        else:
519            w_grid = np.linspace(0, w, 21)[None,:]
520            h_grid = np.linspace(-h, h, 23)[:,None]
521            u = sqrt((qi+h_grid)**2 + w_grid**2)
522            Iu = np.interp(u, q_calc, Iq)
523            #print(np.trapz(Iu, w_grid, axis=1))
524            Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:,0])
525            result[i] = Is / (2*h*w)
526            """
527            r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
528                             args=(qi,))
529            result[i] = r/(w*2*h)
530            """
531
532    # r should be [float, ...], but it is [array([float]), array([float]),...]
533    return result
534
535
536def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
537    """
538    Romberg integration for pinhole resolution.
539
540    This is an adaptive integration technique.  It is called with settings
541    that make it slow to evaluate but give it good accuracy.
542    """
543    from scipy.integrate import romberg
544
545    if any(k not in form.info['defaults'] for k in pars.keys()):
546        keys = set(form.info['defaults'].keys())
547        extra = set(pars.keys()) - keys
548        raise ValueError("bad parameters: [%s] not in [%s]"%
549                         (", ".join(sorted(extra)), ", ".join(sorted(keys))))
550
551    _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
552    r = [romberg(_fn, max(qi-nsigma*dqi,1e-10*q[0]), qi+nsigma*dqi, args=(qi, dqi),
553                 divmax=100, vec_func=True, tol=0, rtol=1e-8)
554         for qi,dqi in zip(q,q_width)]
555    return np.asarray(r).flatten()
556
557
558class ResolutionTest(unittest.TestCase):
559
560    def setUp(self):
561        self.x = 0.001*np.arange(1, 11)
562        self.y = self.Iq(self.x)
563
564    def Iq(self, q):
565        "Linear function for resolution unit test"
566        return 12.0 - 1000.0*q
567
568    def test_perfect(self):
569        """
570        Perfect resolution and no smearing.
571        """
572        resolution = Perfect1D(self.x)
573        theory = self.Iq(resolution.q_calc)
574        output = resolution.apply(theory)
575        np.testing.assert_equal(output, self.y)
576
577    def test_slit_zero(self):
578        """
579        Slit smearing with perfect resolution.
580        """
581        resolution = Slit1D(self.x, width=0, height=0, q_calc=self.x)
582        theory = self.Iq(resolution.q_calc)
583        output = resolution.apply(theory)
584        np.testing.assert_equal(output, self.y)
585
586    @unittest.skip("not yet supported")
587    def test_slit_high(self):
588        """
589        Slit smearing with height 0.005
590        """
591        resolution = Slit1D(self.x, width=0, height=0.005, q_calc=self.x)
592        theory = self.Iq(resolution.q_calc)
593        output = resolution.apply(theory)
594        answer = [ 9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
595                   5.5555, 4.5584, 3.5606, 2.5623, 2.0000 ]
596        np.testing.assert_allclose(output, answer, atol=1e-4)
597
598    @unittest.skip("not yet supported")
599    def test_slit_both_high(self):
600        """
601        Slit smearing with width < 100*height.
602        """
603        q = np.logspace(-4, -1, 10)
604        resolution = Slit1D(q, width=0.2, height=np.inf)
605        theory = 1000*self.Iq(resolution.q_calc**4)
606        output = resolution.apply(theory)
607        answer = [ 8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
608                   5.40363, 4.40655, 3.40880, 2.41058, 2.00000 ]
609        np.testing.assert_allclose(output, answer, atol=1e-4)
610
611    @unittest.skip("not yet supported")
612    def test_slit_wide(self):
613        """
614        Slit smearing with width 0.0002
615        """
616        resolution = Slit1D(self.x, width=0.0002, height=0, q_calc=self.x)
617        theory = self.Iq(resolution.q_calc)
618        output = resolution.apply(theory)
619        answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ]
620        np.testing.assert_allclose(output, answer, atol=1e-4)
621
622    @unittest.skip("not yet supported")
623    def test_slit_both_wide(self):
624        """
625        Slit smearing with width > 100*height.
626        """
627        resolution = Slit1D(self.x, width=0.0002, height=0.000001,
628                            q_calc=self.x)
629        theory = self.Iq(resolution.q_calc)
630        output = resolution.apply(theory)
631        answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ]
632        np.testing.assert_allclose(output, answer, atol=1e-4)
633
634    def test_pinhole_zero(self):
635        """
636        Pinhole smearing with perfect resolution
637        """
638        resolution = Pinhole1D(self.x, 0.0*self.x)
639        theory = self.Iq(resolution.q_calc)
640        output = resolution.apply(theory)
641        np.testing.assert_equal(output, self.y)
642
643    def test_pinhole(self):
644        """
645        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
646        """
647        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
648                               q_calc=self.x)
649        theory = 12.0-1000.0*resolution.q_calc
650        output = resolution.apply(theory)
651        answer = [ 10.44785079, 9.84991299, 8.98101708,
652                  7.99906585, 6.99998311, 6.00001689,
653                  5.00093415, 4.01898292, 3.15008701, 2.55214921]
654        np.testing.assert_allclose(output, answer, atol=1e-8)
655
656
657class IgorComparisonTest(unittest.TestCase):
658
659    def setUp(self):
660        self.pars = TEST_PARS_PINHOLE_SPHERE
661        from sasmodels import core
662        from sasmodels.models import sphere
663        self.model = core.load_model(sphere, dtype='double')
664
665    def Iq_sphere(self, pars, resolution):
666        from sasmodels import core
667        kernel = core.make_kernel(self.model, [resolution.q_calc])
668        theory = core.call_kernel(kernel, pars)
669        result = resolution.apply(theory)
670        kernel.release()
671        return result
672
673    def compare(self, q, output, answer, tolerance):
674        #err = (output - answer)/answer
675        #idx = abs(err) >= tolerance
676        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
677        #print("\n".join(str(v) for v in problem))
678        np.testing.assert_allclose(output, answer, rtol=tolerance)
679
680    def test_perfect(self):
681        """
682        Compare sphere model with NIST Igor SANS, no resolution smearing.
683        """
684        pars = TEST_PARS_SLIT_SPHERE
685        data_string = TEST_DATA_SLIT_SPHERE
686
687        data = np.loadtxt(data_string.split('\n')).T
688        q, width, answer, _ = data
689        resolution = Perfect1D(q)
690        output = self.Iq_sphere(pars, resolution)
691        self.compare(q, output, answer, 1e-6)
692
693    def test_pinhole(self):
694        """
695        Compare pinhole resolution smearing with NIST Igor SANS
696        """
697        pars = TEST_PARS_PINHOLE_SPHERE
698        data_string = TEST_DATA_PINHOLE_SPHERE
699
700        data = np.loadtxt(data_string.split('\n')).T
701        q, q_width, answer = data
702        resolution = Pinhole1D(q, q_width)
703        output = self.Iq_sphere(pars, resolution)
704        # TODO: relative error should be lower
705        self.compare(q, output, answer, 3e-4)
706
707    def test_pinhole_romberg(self):
708        """
709        Compare pinhole resolution smearing with romberg integration result.
710        """
711        pars = TEST_PARS_PINHOLE_SPHERE
712        data_string = TEST_DATA_PINHOLE_SPHERE
713        pars['radius'] *= 5
714        radius = pars['radius']
715
716        data = np.loadtxt(data_string.split('\n')).T
717        q, q_width, answer = data
718        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
719        ## Getting 0.1% requires 5 sigma and 200 points per fringe
720        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
721        #                     2*np.pi/radius/200)
722        #tol = 0.001
723        ## The default 3 sigma and no extra points gets 1%
724        q_calc, tol = None, 0.01
725        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
726        output = self.Iq_sphere(pars, resolution)
727        if 0: # debug plot
728            import matplotlib.pyplot as plt
729            resolution = Perfect1D(q)
730            source = self.Iq_sphere(pars, resolution)
731            plt.loglog(q, source, '.')
732            plt.loglog(q, answer, '-', hold=True)
733            plt.loglog(q, output, '-', hold=True)
734            plt.show()
735        self.compare(q, output, answer, tol)
736
737    def test_slit(self):
738        """
739        Compare slit resolution smearing with NIST Igor SANS
740        """
741        pars = TEST_PARS_SLIT_SPHERE
742        data_string = TEST_DATA_SLIT_SPHERE
743
744        data = np.loadtxt(data_string.split('\n')).T
745        q, delta_qv, _, answer = data
746        resolution = Slit1D(q, width=delta_qv, height=0)
747        output = self.Iq_sphere(pars, resolution)
748        # TODO: eliminate Igor test since it is too inaccurate to be useful.
749        # This means we can eliminate the test data as well, and instead
750        # use a generated q vector.
751        self.compare(q, output, answer, 0.5)
752
753    def test_slit_romberg(self):
754        """
755        Compare slit resolution smearing with romberg integration result.
756        """
757        pars = TEST_PARS_SLIT_SPHERE
758        data_string = TEST_DATA_SLIT_SPHERE
759        radius = pars['radius']
760
761        data = np.loadtxt(data_string.split('\n')).T
762        q, delta_qv, _, answer = data
763        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
764        q_calc = slit_extend_q(interpolate(q, 2*np.pi/radius/20),
765                               delta_qv[0], 0.)
766        resolution = Slit1D(q, width=delta_qv, height=0, q_calc=q_calc)
767        output = self.Iq_sphere(pars, resolution)
768        # TODO: relative error should be lower
769        self.compare(q, output, answer, 0.025)
770
771    def test_ellipsoid(self):
772        """
773        Compare romberg integration for ellipsoid model.
774        """
775        from .core import load_model
776        pars = {
777            'scale':0.05,
778            'rpolar':500, 'requatorial':15000,
779            'sld':6, 'solvent_sld': 1,
780            }
781        form = load_model('ellipsoid', dtype='double')
782        q = np.logspace(log10(4e-5),log10(2.5e-2), 68)
783        width, height = 0.117, 0.
784        resolution = Slit1D(q, width=width, height=height)
785        answer = romberg_slit_1d(q, width, height, form, pars)
786        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
787        # TODO: 10% is too much error; use better algorithm
788        #print(np.max(abs(answer-output)/answer))
789        self.compare(q, output, answer, 0.1)
790
791    #TODO: can sas q spacing be too sparse for the resolution calculation?
792    @unittest.skip("suppress sparse data test; not supported by current code")
793    def test_pinhole_sparse(self):
794        """
795        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
796        """
797        pars = TEST_PARS_PINHOLE_SPHERE
798        data_string = TEST_DATA_PINHOLE_SPHERE
799
800        data = np.loadtxt(data_string.split('\n')).T
801        q, q_width, answer = data[:, ::20] # Take every nth point
802        resolution = Pinhole1D(q, q_width)
803        output = self.Iq_sphere(pars, resolution)
804        self.compare(q, output, answer, 1e-6)
805
806
807# pinhole sphere parameters
808TEST_PARS_PINHOLE_SPHERE = {
809    'scale': 1.0, 'background': 0.01,
810    'radius': 60.0, 'sld': 1, 'solvent_sld': 6.3,
811    }
812# Q, dQ, I(Q) calculated by NIST Igor SANS package
813TEST_DATA_PINHOLE_SPHERE = """\
8140.001278 0.0002847 2538.41176383
8150.001562 0.0002905 2536.91820405
8160.001846 0.0002956 2535.13182479
8170.002130 0.0003017 2533.06217813
8180.002414 0.0003087 2530.70378586
8190.002698 0.0003165 2528.05024192
8200.002982 0.0003249 2525.10408349
8210.003266 0.0003340 2521.86667499
8220.003550 0.0003437 2518.33907750
8230.003834 0.0003539 2514.52246995
8240.004118 0.0003646 2510.41798319
8250.004402 0.0003757 2506.02690988
8260.004686 0.0003872 2501.35067884
8270.004970 0.0003990 2496.38678318
8280.005253 0.0004112 2491.16237596
8290.005537 0.0004237 2485.63911673
8300.005821 0.0004365 2479.83657083
8310.006105 0.0004495 2473.75676948
8320.006389 0.0004628 2467.40145990
8330.006673 0.0004762 2460.77293372
8340.006957 0.0004899 2453.86724627
8350.007241 0.0005037 2446.69623838
8360.007525 0.0005177 2439.25775219
8370.007809 0.0005318 2431.55421398
8380.008093 0.0005461 2423.58785521
8390.008377 0.0005605 2415.36158137
8400.008661 0.0005750 2406.87009473
8410.008945 0.0005896 2398.12841186
8420.009229 0.0006044 2389.13360806
8430.009513 0.0006192 2379.88958042
8440.009797 0.0006341 2370.39776774
8450.010080 0.0006491 2360.69528793
8460.010360 0.0006641 2350.85169027
8470.010650 0.0006793 2340.42023633
8480.010930 0.0006945 2330.11206013
8490.011220 0.0007097 2319.20109972
8500.011500 0.0007251 2308.43503981
8510.011780 0.0007404 2297.44820179
8520.012070 0.0007558 2285.83853677
8530.012350 0.0007713 2274.41290746
8540.012640 0.0007868 2262.36219581
8550.012920 0.0008024 2250.51169731
8560.013200 0.0008180 2238.45596231
8570.013490 0.0008336 2225.76495666
8580.013770 0.0008493 2213.29618391
8590.014060 0.0008650 2200.19110751
8600.014340 0.0008807 2187.34050325
8610.014620 0.0008965 2174.30529864
8620.014910 0.0009123 2160.61632548
8630.015190 0.0009281 2147.21038112
8640.015470 0.0009440 2133.62023580
8650.015760 0.0009598 2119.37907426
8660.016040 0.0009757 2105.45234903
8670.016330 0.0009916 2090.86319102
8680.016610 0.0010080 2076.60576032
8690.016890 0.0010240 2062.19214565
8700.017180 0.0010390 2047.10550219
8710.017460 0.0010550 2032.38715621
8720.017740 0.0010710 2017.52560123
8730.018030 0.0010880 2001.99124318
8740.018310 0.0011040 1986.84662060
8750.018600 0.0011200 1971.03389745
8760.018880 0.0011360 1955.61395119
8770.019160 0.0011520 1940.08291563
8780.019450 0.0011680 1923.87672225
8790.019730 0.0011840 1908.10656374
8800.020020 0.0012000 1891.66297192
8810.020300 0.0012160 1875.66789021
8820.020580 0.0012320 1859.56357196
8830.020870 0.0012490 1842.79468290
8840.021150 0.0012650 1826.50064489
8850.021430 0.0012810 1810.11533702
8860.021720 0.0012970 1793.06840882
8870.022000 0.0013130 1776.51153580
8880.022280 0.0013290 1759.87201249
8890.022570 0.0013460 1742.57354412
8900.022850 0.0013620 1725.79397319
8910.023140 0.0013780 1708.35831550
8920.023420 0.0013940 1691.45256069
8930.023700 0.0014110 1674.48561783
8940.023990 0.0014270 1656.86525366
8950.024270 0.0014430 1639.79847285
8960.024550 0.0014590 1622.68887088
8970.024840 0.0014760 1604.96421100
8980.025120 0.0014920 1587.85768129
8990.025410 0.0015080 1569.99297335
9000.025690 0.0015240 1552.84580279
9010.025970 0.0015410 1535.54074115
9020.026260 0.0015570 1517.75249337
9030.026540 0.0015730 1500.40115023
9040.026820 0.0015900 1483.03632237
9050.027110 0.0016060 1465.05942429
9060.027390 0.0016220 1447.67682181
9070.027670 0.0016390 1430.46495191
9080.027960 0.0016550 1412.49232282
9090.028240 0.0016710 1395.13182318
9100.028520 0.0016880 1377.93439837
9110.028810 0.0017040 1359.99528971
9120.029090 0.0017200 1342.67274512
9130.029370 0.0017370 1325.55375609
914"""
915
916# Slit sphere parameters
917TEST_PARS_SLIT_SPHERE = {
918    'scale': 0.01, 'background': 0.01,
919    'radius': 60000, 'sld': 1, 'solvent_sld': 4,
920    }
921# Q dQ I(Q) I_smeared(Q)
922TEST_DATA_SLIT_SPHERE = """\
9232.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9242.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9252.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9263.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9273.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9283.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9294.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9305.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9315.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9326.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9336.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9347.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9357.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9368.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9378.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9389.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9391.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9401.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9411.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9421.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9431.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9441.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9451.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9461.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9471.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9481.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9492.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9502.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
9512.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
9522.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
9532.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
9542.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
9552.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
9562.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
9573.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
9583.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
9593.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
9603.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
9614.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
9624.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
9634.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
9644.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
9655.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
9665.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
9676.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
9686.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
9697.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
9707.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
9718.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
9728.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
9739.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
9749.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
9751.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
9761.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
9771.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
9781.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
9791.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
9801.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
9811.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
9821.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
9831.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
9841.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
9852.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
9862.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
9872.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
9882.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
9892.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
9902.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
991"""
992
993def main():
994    """
995    Run tests given is sys.argv.
996
997    Returns 0 if success or 1 if any tests fail.
998    """
999    import sys
1000    import xmlrunner
1001
1002    suite = unittest.TestSuite()
1003    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
1004
1005    runner = xmlrunner.XMLTestRunner(output='logs')
1006    result = runner.run(suite)
1007    return 1 if result.failures or result.errors else 0
1008
1009
1010############################################################################
1011# usage demo
1012############################################################################
1013
1014def _eval_demo_1d(resolution, title):
1015    import sys
1016    from sasmodels import core
1017    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1018
1019    if name == 'cylinder':
1020        pars = {'length':210, 'radius':500}
1021    elif name == 'teubner_strey':
1022        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1023    elif name == 'sphere' or name == 'spherepy':
1024        pars = TEST_PARS_SLIT_SPHERE
1025    elif name == 'ellipsoid':
1026        pars = {
1027            'scale':0.05,
1028            'rpolar':500, 'requatorial':15000,
1029            'sld':6, 'solvent_sld': 1,
1030            }
1031    else:
1032        pars = {}
1033    defn = core.load_model_definition(name)
1034    model = core.load_model(defn)
1035
1036    kernel = core.make_kernel(model, [resolution.q_calc])
1037    theory = core.call_kernel(kernel, pars)
1038    Iq = resolution.apply(theory)
1039
1040    if isinstance(resolution, Slit1D):
1041        width, height = resolution.width, resolution.height
1042        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1043    else:
1044        dq = resolution.q_width
1045        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1046
1047    import matplotlib.pyplot as plt
1048    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1049    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1050    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1051    plt.legend()
1052    plt.title(title)
1053    plt.xlabel("Q (1/Ang)")
1054    plt.ylabel("I(Q) (1/cm)")
1055
1056def demo_pinhole_1d():
1057    q = np.logspace(-4, np.log10(0.2), 400)
1058    q_width = 0.1*q
1059    resolution = Pinhole1D(q, q_width)
1060    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1061
1062def demo_slit_1d():
1063    q = np.logspace(-4, np.log10(0.2), 100)
1064    w = h = 0.
1065    #w = 0.000000277790
1066    w = 0.0277790
1067    #h = 0.00277790
1068    #h = 0.0277790
1069    resolution = Slit1D(q, w, h)
1070    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w,h))
1071
1072def demo():
1073    import matplotlib.pyplot as plt
1074    plt.subplot(121)
1075    demo_pinhole_1d()
1076    #plt.yscale('linear')
1077    plt.subplot(122)
1078    demo_slit_1d()
1079    #plt.yscale('linear')
1080    plt.show()
1081
1082
1083if __name__ == "__main__":
1084    #demo()
1085    main()
Note: See TracBrowser for help on using the repository browser.