source: sasview/src/sas/sascalc/data_util/resolution.py @ b2669f5

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2release_4.0.1ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since b2669f5 was fc18690, checked in by Piotr Rozyczko <piotr.rozyczko@…>, 9 years ago

Sasmodels integration - moved smearing from models to sascalc.

  • Property mode set to 100755
File size: 37.4 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7from scipy.special import erf
8from numpy import sqrt, log, log10
9import numpy as np
10
11MINIMUM_RESOLUTION = 1e-8
12
13
14# When extrapolating to -q, what is the minimum positive q relative to q_min
15# that we wish to calculate?
16MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION = 0.01
17
18class Resolution(object):
19    """
20    Abstract base class defining a 1D resolution function.
21
22    *q* is the set of q values at which the data is measured.
23
24    *q_calc* is the set of q values at which the theory needs to be evaluated.
25    This may extend and interpolate the q values.
26
27    *apply* is the method to call with I(q_calc) to compute the resolution
28    smeared theory I(q).
29    """
30    q = None
31    q_calc = None
32    def apply(self, theory):
33        """
34        Smear *theory* by the resolution function, returning *Iq*.
35        """
36        raise NotImplementedError("Subclass does not define the apply function")
37
38
39class Perfect1D(Resolution):
40    """
41    Resolution function to use when there is no actual resolution smearing
42    to be applied.  It has the same interface as the other resolution
43    functions, but returns the identity function.
44    """
45    def __init__(self, q):
46        self.q_calc = self.q = q
47
48    def apply(self, theory):
49        return theory
50
51
52class Pinhole1D(Resolution):
53    r"""
54    Pinhole aperture with q-dependent gaussian resolution.
55
56    *q* points at which the data is measured.
57
58    *q_width* gaussian 1-sigma resolution at each data point.
59
60    *q_calc* is the list of points to calculate, or None if this should
61    be estimated from the *q* and *q_width*.
62    """
63    def __init__(self, q, q_width, q_calc=None, nsigma=3):
64        #*min_step* is the minimum point spacing to use when computing the
65        #underlying model.  It should be on the order of
66        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
67        #are computed with sufficient density to avoid aliasing effects.
68
69        # Protect against calls with q_width=0.  The extend_q function will
70        # not extend the q if q_width is 0, but q_width must be non-zero when
71        # constructing the weight matrix to avoid division by zero errors.
72        # In practice this should never be needed, since resolution should
73        # default to Perfect1D if the pinhole geometry is not defined.
74        self.q, self.q_width = q, q_width
75        self.q_calc = pinhole_extend_q(q, q_width, nsigma=nsigma) \
76            if q_calc is None else np.sort(q_calc)
77        self.weight_matrix = pinhole_resolution(self.q_calc,
78                self.q, np.maximum(q_width, MINIMUM_RESOLUTION))
79
80    def apply(self, theory):
81        return apply_resolution_matrix(self.weight_matrix, theory)
82
83
84class Slit1D(Resolution):
85    """
86    Slit aperture with a complicated resolution function.
87
88    *q* points at which the data is measured.
89
90    *qx_width* slit width
91
92    *qy_height* slit height
93
94    *q_calc* is the list of points to calculate, or None if this should
95    be estimated from the *q* and *q_width*.
96
97    The *weight_matrix* is computed by :func:`slit1d_resolution`
98    """
99    def __init__(self, q, width, height=0., q_calc=None):
100        # Remember what width/height was used even though we won't need them
101        # after the weight matrix is constructed
102        self.width, self.height = width, height
103
104        # Allow independent resolution on each point even though it is not
105        # needed in practice.
106        if np.isscalar(width):
107            width = np.ones(len(q))*width
108        else:
109            width = np.asarray(width)
110        if np.isscalar(height):
111            height = np.ones(len(q))*height
112        else:
113            height = np.asarray(height)
114
115        self.q = q.flatten()
116        self.q_calc = slit_extend_q(q, width, height) \
117            if q_calc is None else np.sort(q_calc)
118        self.weight_matrix = \
119            slit_resolution(self.q_calc, self.q, width, height)
120
121    def apply(self, theory):
122        return apply_resolution_matrix(self.weight_matrix, theory)
123
124
125def apply_resolution_matrix(weight_matrix, theory):
126    """
127    Apply the resolution weight matrix to the computed theory function.
128    """
129    #print "apply shapes", theory.shape, weight_matrix.shape
130    Iq = np.dot(theory[None,:], weight_matrix)
131    #print "result shape",Iq.shape
132    return Iq.flatten()
133
134
135def pinhole_resolution(q_calc, q, q_width):
136    """
137    Compute the convolution matrix *W* for pinhole resolution 1-D data.
138
139    Each row *W[i]* determines the normalized weight that the corresponding
140    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
141    *W*, the resolution smearing can be computed using *dot(W,q)*.
142
143    *q_calc* must be increasing.  *q_width* must be greater than zero.
144    """
145    # The current algorithm is a midpoint rectangle rule.  In the test case,
146    # neither trapezoid nor Simpson's rule improved the accuracy.
147    edges = bin_edges(q_calc)
148    edges[edges<0.0] = 0.0 # clip edges below zero
149    G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] )
150    weights = G[1:] - G[:-1]
151    weights /= np.sum(weights, axis=0)[None,:]
152    return weights
153
154
155def slit_resolution(q_calc, q, width, height, n_height=30):
156    r"""
157    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
158    $q_\perp$ = *width* and $q_\parallel$ = *height*.  *n_height* is
159    is the number of steps to use in the integration over $q_\parallel$
160    when both $q_\perp$ and $q_\parallel$ are non-zero.
161
162    Each $q$ can have an independent width and height value even though
163    current instruments use the same slit setting for all measured points.
164
165    If slit height is large relative to width, use:
166
167    .. math::
168
169        I_s(q_i) = \frac{1}{\Delta q_\perp}
170            \int_0^{\Delta q_\perp} I(\sqrt{q_i^2 + q_\perp^2} dq_\perp
171
172    If slit width is large relative to height, use:
173
174    .. math::
175
176        I_s(q_i) = \frac{1}{2 \Delta q_\parallel}
177            \int_{-\Delta q_\parallel}^{\Delta q_\parallel}
178                I(|q_i + q_\parallel|) dq_\parallel
179
180    For a mixture of slit width and height use:
181
182    .. math::
183
184        I_s(q_i) = \frac{1}{2 \Delta q_\parallel \Delta q_\perp}
185            \int_{-\Delta q_\parallel)^{\Delta q_parallel}
186            \int_0^[\Delta q_\perp}
187                I(\sqrt{(q_i + q_\parallel)^2 + q_\perp^2})
188                dq_\perp dq_\parallel
189
190
191    Algorithm
192    ---------
193
194    We are using the mid-point integration rule to assign weights to each
195    element of a weight matrix $W$ so that
196
197    .. math::
198
199        I_s(q) = W I(q_\text{calc})
200
201    If *q_calc* is at the mid-point, we can infer the bin edges from the
202    pairwise averages of *q_calc*, adding the missing edges before
203    *q_calc[0]* and after *q_calc[-1]*.
204
205    For $q_\parallel = 0$, the smeared value can be computed numerically
206    using the $u$ substitution
207
208    .. math::
209
210        u_j = \sqrt{q_j^2 - q^2}
211
212    This gives
213
214    .. math::
215
216        I_s(q) \approx \sum_j I(u_j) \Delta u_j
217
218    where $I(u_j)$ is the value at the mid-point, and $\Delta u_j$ is the
219    difference between consecutive edges which have been first converted
220    to $u$.  Only $u_j \in [0, \Delta q_\perp]$ are used, which corresponds
221    to $q_j \in [q, \sqrt{q^2 + \Delta q_\perp}]$, so
222
223    .. math::
224
225        W_{ij} = \frac{1}{\Delta q_\perp} \Delta u_j
226               = \frac{1}{\Delta q_\perp}
227                    \sqrt{q_{j+1}^2 - q_i^2} - \sqrt{q_j^2 - q_i^2}
228            \text{if} q_j \in [q_i, \sqrt{q_i^2 + q_\perp^2}]
229
230    where $I_s(q_i)$ is the theory function being computed and $q_j$ are the
231    mid-points between the calculated values in *q_calc*.  We tweak the
232    edges of the initial and final intervals so that they lie on integration
233    limits.
234
235    (To be precise, the transformed midpoint $u(q_j)$ is not necessarily the
236    midpoint of the edges $u((q_{j-1}+q_j)/2)$ and $u((q_j + q_{j+1})/2)$,
237    but it is at least in the interval, so the approximation is going to be
238    a little better than the left or right Riemann sum, and should be
239    good enough for our purposes.)
240
241    For $q_\perp = 0$, the $u$ substitution is simpler:
242
243    .. math::
244
245        u_j = |q_j - q|
246
247    so
248
249    .. math::
250
251        W_ij = \frac{1}{2 \Delta q_\parallel} \Delta u_j
252            = \frac{1}{2 \Delta q_\parallel} (q_{j+1} - q_j)
253            \text{if} q_j \in [q-\Delta q_\parallel, q+\Delta q_\parallel]
254
255    However, we need to support cases were $u_j < 0$, which means using
256    $2 (q_{j+1} - q_j)$ when $q_j \in [0, q_\parallel-q_i]$.  This is not
257    an issue for $q_i > q_\parallel$.
258
259    For bot $q_\perp > 0$ and $q_\parallel > 0$ we perform a 2 dimensional
260    integration with
261
262    .. math::
263
264        u_jk = \sqrt{q_j^2 - (q + (k\Delta q_\parallel/L))^2}
265            \text{for} k = -L \ldots L
266
267    for $L$ = *n_height*.  This gives
268
269    .. math::
270
271        W_{ij} = \frac{1}{2 \Delta q_\perp q_\parallel}
272            \sum_{k=-L}^L \Delta u_jk (\frac{\Delta q_\parallel}{2 L + 1}
273
274
275    """
276    #np.set_printoptions(precision=6, linewidth=10000)
277
278    # The current algorithm is a midpoint rectangle rule.
279    q_edges = bin_edges(q_calc) # Note: requires q > 0
280    q_edges[q_edges<0.0] = 0.0 # clip edges below zero
281    weights = np.zeros((len(q), len(q_calc)), 'd')
282
283    #print q_calc
284    for i, (qi, w, h) in enumerate(zip(q, width, height)):
285        if w == 0. and h == 0.:
286            # Perfect resolution, so return the theory value directly.
287            # Note: assumes that q is a subset of q_calc.  If qi need not be
288            # in q_calc, then we can do a weighted interpolation by looking
289            # up qi in q_calc, then weighting the result by the relative
290            # distance to the neighbouring points.
291            weights[i, :] = (q_calc == qi)
292        elif h == 0:
293            weights[i, :] = _q_perp_weights(q_edges, qi, w)
294        elif w == 0:
295            in_x = 1.0 * ((q_calc >= qi-h) & (q_calc <= qi+h))
296            abs_x = 1.0*(q_calc < abs(qi - h)) if qi < h else 0.
297            #print qi - h, qi + h
298            #print in_x + abs_x
299            weights[i,:] = (in_x + abs_x) * np.diff(q_edges) / (2*h)
300        else:
301            L = n_height
302            for k in range(-L, L+1):
303                weights[i,:] += _q_perp_weights(q_edges, qi+k*h/L, w)
304            weights[i,:] /= 2*L + 1
305
306    return weights.T
307
308
309def _q_perp_weights(q_edges, qi, w):
310    # Convert bin edges from q to u
311    u_limit = np.sqrt(qi**2 + w**2)
312    u_edges = q_edges**2 - qi**2
313    u_edges[q_edges < abs(qi)] = 0.
314    u_edges[q_edges > u_limit] = u_limit**2 - qi**2
315    weights = np.diff(np.sqrt(u_edges))/w
316    #print "i, qi",i,qi,qi+width
317    #print q_calc
318    #print weights
319    return weights
320
321
322def pinhole_extend_q(q, q_width, nsigma=3):
323    """
324    Given *q* and *q_width*, find a set of sampling points *q_calc* so
325    that each point I(q) has sufficient support from the underlying
326    function.
327    """
328    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
329    return linear_extrapolation(q, q_min, q_max)
330
331
332def slit_extend_q(q, width, height):
333    """
334    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
335    that each point I(q) has sufficient support from the underlying
336    function.
337    """
338    q_min, q_max = np.min(q-height), np.max(np.sqrt((q+height)**2 + width**2))
339
340    return geometric_extrapolation(q, q_min, q_max)
341
342
343def bin_edges(x):
344    """
345    Determine bin edges from bin centers, assuming that edges are centered
346    between the bins.
347
348    Note: this uses the arithmetic mean, which may not be appropriate for
349    log-scaled data.
350    """
351    if len(x) < 2 or (np.diff(x)<0).any():
352        raise ValueError("Expected bins to be an increasing set")
353    edges = np.hstack([
354        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
355        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
356        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
357        ])
358    return edges
359
360
361def interpolate(q, max_step):
362    """
363    Returns *q_calc* with points spaced at most max_step apart.
364    """
365    step = np.diff(q)
366    index = step>max_step
367    if np.any(index):
368        inserts = []
369        for q_i,step_i in zip(q[:-1][index],step[index]):
370            n = np.ceil(step_i/max_step)
371            inserts.extend(q_i + np.arange(1,n)*(step_i/n))
372        # Extend a couple of fringes beyond the end of the data
373        inserts.extend(q[-1] + np.arange(1,8)*max_step)
374        q_calc = np.sort(np.hstack((q,inserts)))
375    else:
376        q_calc = q
377    return q_calc
378
379
380def linear_extrapolation(q, q_min, q_max):
381    """
382    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
383    a guide.  Extrapolation below uses about the same size as the first
384    interval.  Extrapolation above uses about the same size as the final
385    interval.
386
387    if *q_min* is zero or less then *q[0]/10* is used instead.
388    """
389    q = np.sort(q)
390    if q_min < q[0]:
391        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
392        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1]>q[0] else 15
393        q_low = np.linspace(q_min, q[0], n_low+1)[:-1]
394    else:
395        q_low = []
396    if q_max > q[-1]:
397        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1]>q[-2] else 15
398        q_high = np.linspace(q[-1], q_max, n_high+1)[1:]
399    else:
400        q_high = []
401    return np.concatenate([q_low, q, q_high])
402
403
404def geometric_extrapolation(q, q_min, q_max, points_per_decade=None):
405    r"""
406    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
407    average geometric step size in *q* as the step size.
408
409    if *q_min* is zero or less then *q[0]/10* is used instead.
410
411    *points_per_decade* sets the ratio between consecutive steps such
412    that there will be $n$ points used for every factor of 10 increase
413    in *q*.
414
415    If *points_per_decade* is not given, it will be estimated as follows.
416    Starting at $q_1$ and stepping geometrically by $\Delta q$ to $q_n$
417    in $n$ points gives a geometric average of:
418
419    .. math::
420
421         \log \Delta q = (\log q_n - log q_1) / (n - 1)
422
423    From this we can compute the number of steps required to extend $q$
424    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
425
426    .. math::
427
428         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
429
430    Substituting:
431
432        n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
433            / (\log q_n - log q_1)
434    """
435    q = np.sort(q)
436    if points_per_decade is None:
437        log_delta_q = (len(q) - 1) / (log(q[-1]) - log(q[0]))
438    else:
439        log_delta_q = log(10.) / points_per_decade
440    if q_min < q[0]:
441        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION
442        n_low = log_delta_q * (log(q[0])-log(q_min))
443        q_low  = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
444    else:
445        q_low = []
446    if q_max > q[-1]:
447        n_high = log_delta_q * (log(q_max)-log(q[-1]))
448        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
449    else:
450        q_high = []
451    return np.concatenate([q_low, q, q_high])
452
453
454############################################################################
455# unit tests
456############################################################################
457import unittest
458
459
460def eval_form(q, form, pars):
461    from sasmodels import core
462    kernel = core.make_kernel(form, [q])
463    theory = core.call_kernel(kernel, pars)
464    kernel.release()
465    return theory
466
467
468def gaussian(q, q0, dq):
469    from numpy import exp, pi
470    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)
471
472
473def romberg_slit_1d(q, width, height, form, pars):
474    """
475    Romberg integration for slit resolution.
476
477    This is an adaptive integration technique.  It is called with settings
478    that make it slow to evaluate but give it good accuracy.
479    """
480    from scipy.integrate import romberg, dblquad
481
482    if any(k not in form.info['defaults'] for k in pars.keys()):
483        keys = set(form.info['defaults'].keys())
484        extra = set(pars.keys()) - keys
485        raise ValueError("bad parameters: [%s] not in [%s]"%
486                         (", ".join(sorted(extra)), ", ".join(sorted(keys))))
487
488    if np.isscalar(width):
489        width = [width]*len(q)
490    if np.isscalar(height):
491        height = [height]*len(q)
492    _int_w = lambda w, qi: eval_form(sqrt(qi**2 + w**2), form, pars)
493    _int_h = lambda h, qi: eval_form(abs(qi+h), form, pars)
494    # If both width and height are defined, then it is too slow to use dblquad.
495    # Instead use trapz on a fixed grid, interpolated into the I(Q) for
496    # the extended Q range.
497    #_int_wh = lambda w, h, qi: eval_form(sqrt((qi+h)**2 + w**2), form, pars)
498    q_calc = slit_extend_q(q, np.asarray(width), np.asarray(height))
499    Iq = eval_form(q_calc, form, pars)
500    result = np.empty(len(q))
501    for i, (qi, w, h) in enumerate(zip(q, width, height)):
502        if h == 0.:
503            r = romberg(_int_w, 0, w, args=(qi,),
504                        divmax=100, vec_func=True, tol=0, rtol=1e-8)
505            result[i] = r/w
506        elif w == 0.:
507            r = romberg(_int_h, -h, h, args=(qi,),
508                        divmax=100, vec_func=True, tol=0, rtol=1e-8)
509            result[i] = r/(2*h)
510        else:
511            w_grid = np.linspace(0, w, 21)[None,:]
512            h_grid = np.linspace(-h, h, 23)[:,None]
513            u = sqrt((qi+h_grid)**2 + w_grid**2)
514            Iu = np.interp(u, q_calc, Iq)
515            #print np.trapz(Iu, w_grid, axis=1)
516            Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:,0])
517            result[i] = Is / (2*h*w)
518            """
519            r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w,
520                             args=(qi,))
521            result[i] = r/(w*2*h)
522            """
523
524    # r should be [float, ...], but it is [array([float]), array([float]),...]
525    return result
526
527
528def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5):
529    """
530    Romberg integration for pinhole resolution.
531
532    This is an adaptive integration technique.  It is called with settings
533    that make it slow to evaluate but give it good accuracy.
534    """
535    from scipy.integrate import romberg
536
537    if any(k not in form.info['defaults'] for k in pars.keys()):
538        keys = set(form.info['defaults'].keys())
539        extra = set(pars.keys()) - keys
540        raise ValueError("bad parameters: [%s] not in [%s]"%
541                         (", ".join(sorted(extra)), ", ".join(sorted(keys))))
542
543    _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq)
544    r = [romberg(_fn, max(qi-nsigma*dqi,1e-10*q[0]), qi+nsigma*dqi, args=(qi, dqi),
545                 divmax=100, vec_func=True, tol=0, rtol=1e-8)
546         for qi,dqi in zip(q,q_width)]
547    return np.asarray(r).flatten()
548
549
550class ResolutionTest(unittest.TestCase):
551
552    def setUp(self):
553        self.x = 0.001*np.arange(1, 11)
554        self.y = self.Iq(self.x)
555
556    def Iq(self, q):
557        "Linear function for resolution unit test"
558        return 12.0 - 1000.0*q
559
560    def test_perfect(self):
561        """
562        Perfect resolution and no smearing.
563        """
564        resolution = Perfect1D(self.x)
565        theory = self.Iq(resolution.q_calc)
566        output = resolution.apply(theory)
567        np.testing.assert_equal(output, self.y)
568
569    def test_slit_zero(self):
570        """
571        Slit smearing with perfect resolution.
572        """
573        resolution = Slit1D(self.x, width=0, height=0, q_calc=self.x)
574        theory = self.Iq(resolution.q_calc)
575        output = resolution.apply(theory)
576        np.testing.assert_equal(output, self.y)
577
578    @unittest.skip("not yet supported")
579    def test_slit_high(self):
580        """
581        Slit smearing with height 0.005
582        """
583        resolution = Slit1D(self.x, width=0, height=0.005, q_calc=self.x)
584        theory = self.Iq(resolution.q_calc)
585        output = resolution.apply(theory)
586        answer = [ 9.0618, 8.6402, 8.1187, 7.1392, 6.1528,
587                   5.5555, 4.5584, 3.5606, 2.5623, 2.0000 ]
588        np.testing.assert_allclose(output, answer, atol=1e-4)
589
590    @unittest.skip("not yet supported")
591    def test_slit_both_high(self):
592        """
593        Slit smearing with width < 100*height.
594        """
595        q = np.logspace(-4, -1, 10)
596        resolution = Slit1D(q, width=0.2, height=np.inf)
597        theory = 1000*self.Iq(resolution.q_calc**4)
598        output = resolution.apply(theory)
599        answer = [ 8.85785, 8.43012, 7.92687, 6.94566, 6.03660,
600                   5.40363, 4.40655, 3.40880, 2.41058, 2.00000 ]
601        np.testing.assert_allclose(output, answer, atol=1e-4)
602
603    @unittest.skip("not yet supported")
604    def test_slit_wide(self):
605        """
606        Slit smearing with width 0.0002
607        """
608        resolution = Slit1D(self.x, width=0.0002, height=0, q_calc=self.x)
609        theory = self.Iq(resolution.q_calc)
610        output = resolution.apply(theory)
611        answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ]
612        np.testing.assert_allclose(output, answer, atol=1e-4)
613
614    @unittest.skip("not yet supported")
615    def test_slit_both_wide(self):
616        """
617        Slit smearing with width > 100*height.
618        """
619        resolution = Slit1D(self.x, width=0.0002, height=0.000001,
620                            q_calc=self.x)
621        theory = self.Iq(resolution.q_calc)
622        output = resolution.apply(theory)
623        answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ]
624        np.testing.assert_allclose(output, answer, atol=1e-4)
625
626    def test_pinhole_zero(self):
627        """
628        Pinhole smearing with perfect resolution
629        """
630        resolution = Pinhole1D(self.x, 0.0*self.x)
631        theory = self.Iq(resolution.q_calc)
632        output = resolution.apply(theory)
633        np.testing.assert_equal(output, self.y)
634
635    def test_pinhole(self):
636        """
637        Pinhole smearing with dQ = 0.001 [Note: not dQ/Q = 0.001]
638        """
639        resolution = Pinhole1D(self.x, 0.001*np.ones_like(self.x),
640                               q_calc=self.x)
641        theory = 12.0-1000.0*resolution.q_calc
642        output = resolution.apply(theory)
643        answer = [ 10.44785079, 9.84991299, 8.98101708,
644                  7.99906585, 6.99998311, 6.00001689,
645                  5.00093415, 4.01898292, 3.15008701, 2.55214921]
646        np.testing.assert_allclose(output, answer, atol=1e-8)
647
648
649class IgorComparisonTest(unittest.TestCase):
650
651    def setUp(self):
652        self.pars = TEST_PARS_PINHOLE_SPHERE
653        from sasmodels import core
654        from sasmodels.models import sphere
655        self.model = core.load_model(sphere, dtype='double')
656
657    def Iq_sphere(self, pars, resolution):
658        from sasmodels import core
659        kernel = core.make_kernel(self.model, [resolution.q_calc])
660        theory = core.call_kernel(kernel, pars)
661        result = resolution.apply(theory)
662        kernel.release()
663        return result
664
665    def compare(self, q, output, answer, tolerance):
666        #err = (output - answer)/answer
667        #idx = abs(err) >= tolerance
668        #problem = zip(q[idx], output[idx], answer[idx], err[idx])
669        #print "\n".join(str(v) for v in problem)
670        np.testing.assert_allclose(output, answer, rtol=tolerance)
671
672    def test_perfect(self):
673        """
674        Compare sphere model with NIST Igor SANS, no resolution smearing.
675        """
676        pars = TEST_PARS_SLIT_SPHERE
677        data_string = TEST_DATA_SLIT_SPHERE
678
679        data = np.loadtxt(data_string.split('\n')).T
680        q, width, answer, _ = data
681        resolution = Perfect1D(q)
682        output = self.Iq_sphere(pars, resolution)
683        self.compare(q, output, answer, 1e-6)
684
685    def test_pinhole(self):
686        """
687        Compare pinhole resolution smearing with NIST Igor SANS
688        """
689        pars = TEST_PARS_PINHOLE_SPHERE
690        data_string = TEST_DATA_PINHOLE_SPHERE
691
692        data = np.loadtxt(data_string.split('\n')).T
693        q, q_width, answer = data
694        resolution = Pinhole1D(q, q_width)
695        output = self.Iq_sphere(pars, resolution)
696        # TODO: relative error should be lower
697        self.compare(q, output, answer, 3e-4)
698
699    def test_pinhole_romberg(self):
700        """
701        Compare pinhole resolution smearing with romberg integration result.
702        """
703        pars = TEST_PARS_PINHOLE_SPHERE
704        data_string = TEST_DATA_PINHOLE_SPHERE
705        pars['radius'] *= 5
706        radius = pars['radius']
707
708        data = np.loadtxt(data_string.split('\n')).T
709        q, q_width, answer = data
710        answer = romberg_pinhole_1d(q, q_width, self.model, pars)
711        ## Getting 0.1% requires 5 sigma and 200 points per fringe
712        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5),
713        #                     2*np.pi/radius/200)
714        #tol = 0.001
715        ## The default 3 sigma and no extra points gets 1%
716        q_calc, tol = None, 0.01
717        resolution = Pinhole1D(q, q_width, q_calc=q_calc)
718        output = self.Iq_sphere(pars, resolution)
719        if 0: # debug plot
720            import matplotlib.pyplot as plt
721            resolution = Perfect1D(q)
722            source = self.Iq_sphere(pars, resolution)
723            plt.loglog(q, source, '.')
724            plt.loglog(q, answer, '-', hold=True)
725            plt.loglog(q, output, '-', hold=True)
726            plt.show()
727        self.compare(q, output, answer, tol)
728
729    def test_slit(self):
730        """
731        Compare slit resolution smearing with NIST Igor SANS
732        """
733        pars = TEST_PARS_SLIT_SPHERE
734        data_string = TEST_DATA_SLIT_SPHERE
735
736        data = np.loadtxt(data_string.split('\n')).T
737        q, delta_qv, _, answer = data
738        resolution = Slit1D(q, width=delta_qv, height=0)
739        output = self.Iq_sphere(pars, resolution)
740        # TODO: eliminate Igor test since it is too inaccurate to be useful.
741        # This means we can eliminate the test data as well, and instead
742        # use a generated q vector.
743        self.compare(q, output, answer, 0.5)
744
745    def test_slit_romberg(self):
746        """
747        Compare slit resolution smearing with romberg integration result.
748        """
749        pars = TEST_PARS_SLIT_SPHERE
750        data_string = TEST_DATA_SLIT_SPHERE
751        radius = pars['radius']
752
753        data = np.loadtxt(data_string.split('\n')).T
754        q, delta_qv, _, answer = data
755        answer = romberg_slit_1d(q, delta_qv, 0., self.model, pars)
756        q_calc = slit_extend_q(interpolate(q, 2*np.pi/radius/20),
757                               delta_qv[0], 0.)
758        resolution = Slit1D(q, width=delta_qv, height=0, q_calc=q_calc)
759        output = self.Iq_sphere(pars, resolution)
760        # TODO: relative error should be lower
761        self.compare(q, output, answer, 0.025)
762
763    def test_ellipsoid(self):
764        """
765        Compare romberg integration for ellipsoid model.
766        """
767        from .core import load_model
768        pars = {
769            'scale':0.05,
770            'rpolar':500, 'requatorial':15000,
771            'sld':6, 'solvent_sld': 1,
772            }
773        form = load_model('ellipsoid', dtype='double')
774        q = np.logspace(log10(4e-5),log10(2.5e-2), 68)
775        width, height = 0.117, 0.
776        resolution = Slit1D(q, width=width, height=height)
777        answer = romberg_slit_1d(q, width, height, form, pars)
778        output = resolution.apply(eval_form(resolution.q_calc, form, pars))
779        # TODO: 10% is too much error; use better algorithm
780        #print np.max(abs(answer-output)/answer)
781        self.compare(q, output, answer, 0.1)
782
783    #TODO: can sas q spacing be too sparse for the resolution calculation?
784    @unittest.skip("suppress sparse data test; not supported by current code")
785    def test_pinhole_sparse(self):
786        """
787        Compare pinhole resolution smearing with NIST Igor SANS on sparse data
788        """
789        pars = TEST_PARS_PINHOLE_SPHERE
790        data_string = TEST_DATA_PINHOLE_SPHERE
791
792        data = np.loadtxt(data_string.split('\n')).T
793        q, q_width, answer = data[:, ::20] # Take every nth point
794        resolution = Pinhole1D(q, q_width)
795        output = self.Iq_sphere(pars, resolution)
796        self.compare(q, output, answer, 1e-6)
797
798
799# pinhole sphere parameters
800TEST_PARS_PINHOLE_SPHERE = {
801    'scale': 1.0, 'background': 0.01,
802    'radius': 60.0, 'sld': 1, 'solvent_sld': 6.3,
803    }
804# Q, dQ, I(Q) calculated by NIST Igor SANS package
805TEST_DATA_PINHOLE_SPHERE = """\
8060.001278 0.0002847 2538.41176383
8070.001562 0.0002905 2536.91820405
8080.001846 0.0002956 2535.13182479
8090.002130 0.0003017 2533.06217813
8100.002414 0.0003087 2530.70378586
8110.002698 0.0003165 2528.05024192
8120.002982 0.0003249 2525.10408349
8130.003266 0.0003340 2521.86667499
8140.003550 0.0003437 2518.33907750
8150.003834 0.0003539 2514.52246995
8160.004118 0.0003646 2510.41798319
8170.004402 0.0003757 2506.02690988
8180.004686 0.0003872 2501.35067884
8190.004970 0.0003990 2496.38678318
8200.005253 0.0004112 2491.16237596
8210.005537 0.0004237 2485.63911673
8220.005821 0.0004365 2479.83657083
8230.006105 0.0004495 2473.75676948
8240.006389 0.0004628 2467.40145990
8250.006673 0.0004762 2460.77293372
8260.006957 0.0004899 2453.86724627
8270.007241 0.0005037 2446.69623838
8280.007525 0.0005177 2439.25775219
8290.007809 0.0005318 2431.55421398
8300.008093 0.0005461 2423.58785521
8310.008377 0.0005605 2415.36158137
8320.008661 0.0005750 2406.87009473
8330.008945 0.0005896 2398.12841186
8340.009229 0.0006044 2389.13360806
8350.009513 0.0006192 2379.88958042
8360.009797 0.0006341 2370.39776774
8370.010080 0.0006491 2360.69528793
8380.010360 0.0006641 2350.85169027
8390.010650 0.0006793 2340.42023633
8400.010930 0.0006945 2330.11206013
8410.011220 0.0007097 2319.20109972
8420.011500 0.0007251 2308.43503981
8430.011780 0.0007404 2297.44820179
8440.012070 0.0007558 2285.83853677
8450.012350 0.0007713 2274.41290746
8460.012640 0.0007868 2262.36219581
8470.012920 0.0008024 2250.51169731
8480.013200 0.0008180 2238.45596231
8490.013490 0.0008336 2225.76495666
8500.013770 0.0008493 2213.29618391
8510.014060 0.0008650 2200.19110751
8520.014340 0.0008807 2187.34050325
8530.014620 0.0008965 2174.30529864
8540.014910 0.0009123 2160.61632548
8550.015190 0.0009281 2147.21038112
8560.015470 0.0009440 2133.62023580
8570.015760 0.0009598 2119.37907426
8580.016040 0.0009757 2105.45234903
8590.016330 0.0009916 2090.86319102
8600.016610 0.0010080 2076.60576032
8610.016890 0.0010240 2062.19214565
8620.017180 0.0010390 2047.10550219
8630.017460 0.0010550 2032.38715621
8640.017740 0.0010710 2017.52560123
8650.018030 0.0010880 2001.99124318
8660.018310 0.0011040 1986.84662060
8670.018600 0.0011200 1971.03389745
8680.018880 0.0011360 1955.61395119
8690.019160 0.0011520 1940.08291563
8700.019450 0.0011680 1923.87672225
8710.019730 0.0011840 1908.10656374
8720.020020 0.0012000 1891.66297192
8730.020300 0.0012160 1875.66789021
8740.020580 0.0012320 1859.56357196
8750.020870 0.0012490 1842.79468290
8760.021150 0.0012650 1826.50064489
8770.021430 0.0012810 1810.11533702
8780.021720 0.0012970 1793.06840882
8790.022000 0.0013130 1776.51153580
8800.022280 0.0013290 1759.87201249
8810.022570 0.0013460 1742.57354412
8820.022850 0.0013620 1725.79397319
8830.023140 0.0013780 1708.35831550
8840.023420 0.0013940 1691.45256069
8850.023700 0.0014110 1674.48561783
8860.023990 0.0014270 1656.86525366
8870.024270 0.0014430 1639.79847285
8880.024550 0.0014590 1622.68887088
8890.024840 0.0014760 1604.96421100
8900.025120 0.0014920 1587.85768129
8910.025410 0.0015080 1569.99297335
8920.025690 0.0015240 1552.84580279
8930.025970 0.0015410 1535.54074115
8940.026260 0.0015570 1517.75249337
8950.026540 0.0015730 1500.40115023
8960.026820 0.0015900 1483.03632237
8970.027110 0.0016060 1465.05942429
8980.027390 0.0016220 1447.67682181
8990.027670 0.0016390 1430.46495191
9000.027960 0.0016550 1412.49232282
9010.028240 0.0016710 1395.13182318
9020.028520 0.0016880 1377.93439837
9030.028810 0.0017040 1359.99528971
9040.029090 0.0017200 1342.67274512
9050.029370 0.0017370 1325.55375609
906"""
907
908# Slit sphere parameters
909TEST_PARS_SLIT_SPHERE = {
910    'scale': 0.01, 'background': 0.01,
911    'radius': 60000, 'sld': 1, 'solvent_sld': 4,
912    }
913# Q dQ I(Q) I_smeared(Q)
914TEST_DATA_SLIT_SPHERE = """\
9152.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06
9162.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06
9172.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06
9183.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06
9193.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05
9203.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05
9214.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05
9225.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05
9235.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05
9246.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04
9256.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04
9267.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04
9277.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04
9288.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04
9298.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04
9309.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04
9311.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04
9321.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04
9331.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04
9341.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03
9351.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03
9361.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03
9371.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03
9381.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03
9391.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03
9401.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03
9412.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03
9422.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03
9432.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03
9442.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03
9452.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03
9462.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03
9472.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03
9482.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02
9493.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02
9503.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02
9513.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02
9523.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02
9534.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02
9544.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02
9554.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02
9564.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02
9575.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02
9585.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01
9596.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01
9606.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01
9617.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01
9627.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01
9638.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01
9648.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01
9659.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01
9669.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01
9671.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01
9681.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01
9691.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00
9701.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00
9711.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00
9721.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00
9731.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00
9741.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00
9751.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00
9761.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00
9772.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00
9782.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00
9792.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00
9802.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00
9812.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00
9822.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00
983"""
984
985def main():
986    """
987    Run tests given is sys.argv.
988
989    Returns 0 if success or 1 if any tests fail.
990    """
991    import sys
992    import xmlrunner
993
994    suite = unittest.TestSuite()
995    suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))
996
997    runner = xmlrunner.XMLTestRunner(output='logs')
998    result = runner.run(suite)
999    return 1 if result.failures or result.errors else 0
1000
1001
1002############################################################################
1003# usage demo
1004############################################################################
1005
1006def _eval_demo_1d(resolution, title):
1007    import sys
1008    from sasmodels import core
1009    name = sys.argv[1] if len(sys.argv) > 1 else 'cylinder'
1010
1011    if name == 'cylinder':
1012        pars = {'length':210, 'radius':500}
1013    elif name == 'teubner_strey':
1014        pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643}
1015    elif name == 'sphere' or name == 'spherepy':
1016        pars = TEST_PARS_SLIT_SPHERE
1017    elif name == 'ellipsoid':
1018        pars = {
1019            'scale':0.05,
1020            'rpolar':500, 'requatorial':15000,
1021            'sld':6, 'solvent_sld': 1,
1022            }
1023    else:
1024        pars = {}
1025    defn = core.load_model_definition(name)
1026    model = core.load_model(defn)
1027
1028    kernel = core.make_kernel(model, [resolution.q_calc])
1029    theory = core.call_kernel(kernel, pars)
1030    Iq = resolution.apply(theory)
1031
1032    if isinstance(resolution, Slit1D):
1033        width, height = resolution.width, resolution.height
1034        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars)
1035    else:
1036        dq = resolution.q_width
1037        Iq_romb = romberg_pinhole_1d(resolution.q, dq, model, pars)
1038
1039    import matplotlib.pyplot as plt
1040    plt.loglog(resolution.q_calc, theory, label='unsmeared')
1041    plt.loglog(resolution.q, Iq, label='smeared', hold=True)
1042    plt.loglog(resolution.q, Iq_romb, label='romberg smeared', hold=True)
1043    plt.legend()
1044    plt.title(title)
1045    plt.xlabel("Q (1/Ang)")
1046    plt.ylabel("I(Q) (1/cm)")
1047
1048def demo_pinhole_1d():
1049    q = np.logspace(-4, np.log10(0.2), 400)
1050    q_width = 0.1*q
1051    resolution = Pinhole1D(q, q_width)
1052    _eval_demo_1d(resolution, title="10% dQ/Q Pinhole Resolution")
1053
1054def demo_slit_1d():
1055    q = np.logspace(-4, np.log10(0.2), 100)
1056    w = h = 0.
1057    #w = 0.000000277790
1058    w = 0.0277790
1059    #h = 0.00277790
1060    #h = 0.0277790
1061    resolution = Slit1D(q, w, h)
1062    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w,h))
1063
1064def demo():
1065    import matplotlib.pyplot as plt
1066    plt.subplot(121)
1067    demo_pinhole_1d()
1068    #plt.yscale('linear')
1069    plt.subplot(122)
1070    demo_slit_1d()
1071    #plt.yscale('linear')
1072    plt.show()
1073
1074
1075if __name__ == "__main__":
1076    demo()
1077    #main()
Note: See TracBrowser for help on using the repository browser.