Changeset 7954f5c in sasmodels


Ignore:
Timestamp:
Mar 23, 2015 7:58:11 AM (10 years ago)
Author:
Paul Kienzle <pkienzle@…>
Branches:
master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
Children:
bcd3aa3
Parents:
33c8d73
Message:

rewrite slit resolution algorithm

File:
1 edited

Legend:

Unmodified
Added
Removed
  • sasmodels/resolution.py

    r33c8d73 r7954f5c  
    44This defines classes for 1D and 2D resolution calculations. 
    55""" 
     6from __future__ import division 
    67from scipy.special import erf 
    7 from numpy import sqrt 
     8from numpy import sqrt, log, log10 
    89import numpy as np 
    910 
    10 SLIT_SMEAR_POINTS = 500 
    1111MINIMUM_RESOLUTION = 1e-8 
    12 # TODO: Q_EXTEND_STEPS is much bigger than necessary 
    13 Q_EXTEND_STEPS = 30   # number of extra q points above and below 
    14  
    1512 
    1613class Resolution1D(object): 
     
    3431        raise NotImplementedError("Subclass does not define the apply function") 
    3532 
     33 
    3634class Perfect1D(Resolution1D): 
    3735    """ 
     
    4644        return theory 
    4745 
     46 
    4847class Pinhole1D(Resolution1D): 
    4948    r""" 
     
    5554 
    5655    *q_calc* is the list of points to calculate, or None if this should 
    57     be autogenerated from the *q,q_width*. 
     56    be estimated from the *q* and *q_width*. 
    5857    """ 
    5958    def __init__(self, q, q_width, q_calc=None): 
    60         #TODO: maybe add min_step=np.inf 
    6159        #*min_step* is the minimum point spacing to use when computing the 
    6260        #underlying model.  It should be on the order of 
     
    7068        # default to Perfect1D if the pinhole geometry is not defined. 
    7169        self.q, self.q_width = q, q_width 
    72         self.q_calc = pinhole_extend_q(q, q_width) if q_calc is None else q_calc 
     70        self.q_calc = pinhole_extend_q(q, q_width) \ 
     71            if q_calc is None else np.sort(q_calc) 
    7372        self.weight_matrix = pinhole_resolution(self.q_calc, 
    7473                self.q, np.maximum(q_width, MINIMUM_RESOLUTION)) 
     
    7776        return apply_resolution_matrix(self.weight_matrix, theory) 
    7877 
     78 
    7979class Slit1D(Resolution1D): 
    8080    """ 
     
    8686 
    8787    *qy_height* slit height 
    88     """ 
    89     def __init__(self, q, qx_width, qy_width): 
    90         if np.isscalar(qx_width): 
    91             qx_width = qx_width*np.ones(len(q)) 
    92         if np.isscalar(qy_width): 
    93             qy_width = qy_width*np.ones(len(q)) 
    94         self.q, self.qx_width, self.qy_width = [ 
    95             v.flatten() for v in (q, qx_width, qy_width)] 
    96         self.q_calc = slit_extend_q(q, qx_width, qy_width) 
     88 
     89    *q_calc* is the list of points to calculate, or None if this should 
     90    be estimated from the *q* and *q_width*. 
     91 
     92    The *weight_matrix* is computed by :func:`slit1d_resolution` 
     93    """ 
     94    def __init__(self, q, width, height, q_calc=None): 
     95        # TODO: maybe issue warnings rather than raising errors 
     96        if not np.isscalar(width): 
     97            if np.any(np.diff(width) > 0.0): 
     98                raise ValueError("Slit resolution requires fixed width slits") 
     99            width = width[0] 
     100        if not np.isscalar(height): 
     101            if np.any(np.diff(height) > 0.0): 
     102                raise ValueError("Slit resolution requires fixed height slits") 
     103            height = height[0] 
     104 
     105        # Remember what width/height was used even though we won't need them 
     106        # after the weight matrix is constructed 
     107        self.width, self.height = width, height 
     108 
     109        self.q = q.flatten() 
     110        self.q_calc = slit_extend_q(q, width, height) \ 
     111            if q_calc is None else np.sort(q_calc) 
    97112        self.weight_matrix = \ 
    98             slit_resolution(self.q_calc, self.q, self.qx_width, self.qy_width) 
     113            slit_resolution(self.q_calc, self.q, width, height) 
    99114 
    100115    def apply(self, theory): 
     
    106121    Apply the resolution weight matrix to the computed theory function. 
    107122    """ 
    108     #print "apply shapes",theory.shape, self.weight_matrix.shape 
     123    #print "apply shapes", theory.shape, weight_matrix.shape 
    109124    Iq = np.dot(theory[None,:], weight_matrix) 
    110125    #print "result shape",Iq.shape 
    111126    return Iq.flatten() 
    112127 
     128 
    113129def pinhole_resolution(q_calc, q, q_width): 
    114130    """ 
     
    119135    *W*, the resolution smearing can be computed using *dot(W,q)*. 
    120136 
    121     *q_calc* must be increasing. 
    122     """ 
     137    *q_calc* must be increasing.  *q_width* must be greater than zero. 
     138    """ 
     139    # The current algorithm is a midpoint rectangle rule.  In the test case, 
     140    # neither trapezoid nor Simpson's rule improved the accuracy. 
    123141    edges = bin_edges(q_calc) 
    124142    edges[edges<0.0] = 0.0 # clip edges below zero 
    125     index = (q_width > 0.0) # treat perfect resolution differently 
    126143    G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] ) 
    127144    weights = G[1:] - G[:-1] 
    128     # w = np.sum(weights, axis=0); print "w",w.shape, w 
    129145    weights /= np.sum(weights, axis=0)[None,:] 
    130146    return weights 
    131147 
    132148 
    133 def pinhole_extend_q(q, q_width): 
     149def slit_resolution(q_calc, q, width, height): 
     150    r""" 
     151    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given 
     152    $q_v$ = *width* and $q_h$ = *height*. 
     153 
     154    *width* and *height* are scalars since current instruments use the 
     155    same slit settings for all measurement points. 
     156 
     157    If slit height is large relative to width, use: 
     158 
     159    .. math:: 
     160 
     161        I_s(q_o) = \frac{1}{\Delta q_v} 
     162            \int_0^{\Delta q_v} I(\sqrt{q_o^2 + u^2} du 
     163 
     164    If slit width is large relative to height, use: 
     165 
     166    .. math:: 
     167 
     168        I_s(q_o) = \frac{1}{2 \Delta q_v} 
     169            \int_{-\Delta q_v}^{\Delta q_v} I(u) du 
     170    """ 
     171    if width == 0.0 and height == 0.0: 
     172        #print "condition zero" 
     173        return 1 
     174 
     175    q_edges = bin_edges(q_calc) # Note: requires q > 0 
     176    q_edges[q_edges<0.0] = 0.0 # clip edges below zero 
     177 
     178    #np.set_printoptions(linewidth=10000) 
     179    if width <= 100.0 * height or height == 0: 
     180        # The current algorithm is a midpoint rectangle rule.  In the test case, 
     181        # neither trapezoid nor Simpson's rule improved the accuracy. 
     182        #print "condition h", q_edges.shape, q.shape, q_calc.shape 
     183        weights = np.zeros((len(q), len(q_calc)), 'd') 
     184        for i, qi in enumerate(q): 
     185            weights[i, :] = np.diff(q_to_u(q_edges, qi)) 
     186        weights /= width 
     187        weights = weights.T 
     188    else: 
     189        #print "condition w" 
     190        # Make q_calc into a row vector, and q into a column vector 
     191        q, q_calc = q[None,:], q_calc[:,None] 
     192        in_x = (q_calc >= q-width) * (q_calc <= q+width) 
     193        weights = np.diff(q_edges)[:,None] * in_x 
     194 
     195    return weights 
     196 
     197 
     198def pinhole_extend_q(q, q_width, nsigma=3): 
    134199    """ 
    135200    Given *q* and *q_width*, find a set of sampling points *q_calc* so 
     
    137202    function. 
    138203    """ 
    139     # If using min_step, you could do something like: 
    140     #     q_calc = np.arange(min, max, min_step) 
    141     #     index = np.searchsorted(q_calc, q) 
    142     #     q_calc[index] = q 
    143     # A refinement would be to assign q to the nearest neighbour.  A further 
    144     # refinement would be to guard multiple q points between q_calc points, 
    145     # either by removing duplicates or by only moving the values nearest the 
    146     # edges.  For really sparse samples, you will want to remove all remaining 
    147     # points that are not within three standard deviations of a measured q. 
    148     q_min, q_max = np.min(q), np.max(q) 
    149     extended_min, extended_max = np.min(q - 3*q_width), np.max(q + 3*q_width) 
    150     nbins_low, nbins_high = Q_EXTEND_STEPS, Q_EXTEND_STEPS 
    151     if extended_min < q_min: 
    152         q_low = np.linspace(extended_min, q_min, nbins_low+1)[:-1] 
    153         q_low = q_low[q_low>0.0] 
    154     else: 
    155         q_low = [] 
    156     if extended_max > q_max: 
    157         q_high = np.linspace(q_max, extended_max, nbins_high+1)[:-1] 
    158     else: 
    159         q_high = [] 
    160     return np.concatenate([q_low, q, q_high]) 
    161  
    162  
    163 def slit_resolution(q_calc, q, qx_width, qy_width): 
    164     edges = bin_edges(q_calc) # Note: requires q > 0 
    165     edges[edges<0.0] = 0.0 # clip edges below zero 
    166     qy_min, qy_max = 0.0, edges[-1] 
    167  
    168     # Make q_calc into a row vector, and q, qx_width, qy_width into columns 
    169     # Make weights match [ q_calc X q ] 
    170     weights = np.zeros((len(q),len(q_calc)), 'd') 
    171     q_calc = q_calc[None,:] 
    172     q, qx_width, qy_width, edges = [ 
    173         v[:,None] for v in (q, qx_width, qy_width, edges)] 
    174  
    175     # Loop for width (height is analytical). 
    176     # Condition: height >>> width, otherwise, below is not accurate enough. 
    177     # Smear weight numerical iteration for width>0 when height>0. 
    178     # When width = 0, the numerical iteration will be skipped. 
    179     # The resolution calculation for the height is done by direct integration, 
    180     # assuming the I(q'=sqrt(q_j^2-(q+shift_w)^2)) is constant within 
    181     # a q' bin, [q_high, q_low]. 
    182     # In general, this weight numerical iteration for width>0 might be a rough 
    183     # approximation, but it must be good enough when height >>> width. 
    184     E_sq = edges**2 
    185     y_points = SLIT_SMEAR_POINTS if np.any(qy_width>0) else 1 
    186     qy_step = 0 if y_points == 1 else qy_width/(y_points-1) 
    187     for k in range(-y_points+1,y_points): 
    188         qy = np.clip(q + qy_step*k, qy_min, qy_max) 
    189         qx_low = qy 
    190         qx_high = sqrt(qx_low**2 + qx_width**2) 
    191         in_x = (q_calc>=qx_low)*(q_calc<=qx_high) 
    192         qy_sq = qy**2 
    193         weights += (sqrt(E_sq[1:]-qy_sq) - sqrt(qy_sq - E_sq[:-1]))*in_x 
    194     #w = np.sum(weights, axis=1); print "w",w.shape, w 
    195     weights /= np.sum(weights, axis=1)[:,None] 
    196     return weights 
    197  
    198  
    199 def slit_extend_q(q, qx_width, qy_width): 
    200     return q 
     204    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width) 
     205    return geometric_extrapolation(q, q_min, q_max) 
     206 
     207 
     208def slit_extend_q(q, width, height): 
     209    """ 
     210    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so 
     211    that each point I(q) has sufficient support from the underlying 
     212    function. 
     213    """ 
     214    height # keep lint happy 
     215    q_min, q_max = np.min(q), np.max(np.sqrt(q**2 + width**2)) 
     216    return geometric_extrapolation(q, q_min, q_max) 
    201217 
    202218 
     
    218234    return edges 
    219235 
     236 
     237def q_to_u(q, q0): 
     238    """ 
     239    Convert *q* values to *u* values for the integral computed at *q0*. 
     240    """ 
     241    # array([value])**2 - value**2 is not always zero 
     242    qpsq = q**2 - q0**2 
     243    qpsq[qpsq<0] = 0 
     244    return sqrt(qpsq) 
     245 
     246 
     247def interpolate(q, max_step): 
     248    """ 
     249    Returns *q_calc* with points spaced at most max_step apart. 
     250    """ 
     251    step = np.diff(q) 
     252    index = step>max_step 
     253    if np.any(index): 
     254        inserts = [] 
     255        for q_i,step_i in zip(q[:-1][index],step[index]): 
     256            n = np.ceil(step_i/max_step) 
     257            inserts.extend(q_i + np.arange(1,n)*(step_i/n)) 
     258        # Extend a couple of fringes beyond the end of the data 
     259        inserts.extend(q[-1] + np.arange(1,8)*max_step) 
     260        q_calc = np.sort(np.hstack((q,inserts))) 
     261    else: 
     262        q_calc = q 
     263    return q_calc 
     264 
     265 
     266def linear_extrapolation(q, q_min, q_max): 
     267    """ 
     268    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as 
     269    a guide.  Extrapolation below uses steps the same size as the first 
     270    interval.  Extrapolation above uses steps the same size as the final 
     271    interval. 
     272 
     273    if *q_min* is zero or less then *q[0]/10* is used instead. 
     274    """ 
     275    q = np.sort(q) 
     276    if q_min < q[0]: 
     277        if q_min <= 0: q_min = q[0]/10 
     278        delta = q[1] - q[0] 
     279        q_low = np.arange(q_min, q[0], delta) 
     280    else: 
     281        q_low = [] 
     282    if q_max > q[-1]: 
     283        delta = q[-1] - q[-2] 
     284        q_high = np.arange(q[-1]+delta, q_max, delta) 
     285    else: 
     286        q_high = [] 
     287    return np.concatenate([q_low, q, q_high]) 
     288 
     289 
     290def geometric_extrapolation(q, q_min, q_max): 
     291    r""" 
     292    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the 
     293    average geometric step size in *q* as the step size. 
     294 
     295    if *q_min* is zero or less then *q[0]/10* is used instead. 
     296 
     297    Starting at $q_1$ and stepping geometrically by $\Delta q$ 
     298    to $q_n$ in $n$ points gives a geometric average of: 
     299 
     300    .. math:: 
     301 
     302         \log \Delta q = (\log q_n - log q_1) / (n - 1) 
     303 
     304    From this we can compute the number of steps required to extend $q$ 
     305    from $q_n$ to $q_\text{max}$ by $\Delta q$ as: 
     306 
     307    .. math:: 
     308 
     309         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q 
     310 
     311    Substituting: 
     312 
     313        n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n) 
     314            / (\log q_n - log q_1) 
     315    """ 
     316    q = np.sort(q) 
     317    delta_q = (len(q)-1)/(log(q[-1]) - log(q[0])) 
     318    if q_min < q[0]: 
     319        if q_min < 0: q_min = q[0]/10 
     320        n_low = delta_q * (log(q[0])-log(q_min)) 
     321        q_low  = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1] 
     322    else: 
     323        q_low = [] 
     324    if q_max > q[-1]: 
     325        n_high = delta_q * (log(q_max)-log(q[-1])) 
     326        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:] 
     327    else: 
     328        q_high = [] 
     329    return np.concatenate([q_low, q, q_high]) 
     330 
     331 
    220332############################################################################ 
    221333# unit tests 
     
    223335import unittest 
    224336 
     337 
     338def eval_form(q, form, pars): 
     339    from sasmodels import core 
     340    kernel = core.make_kernel(form, [q]) 
     341    theory = core.call_kernel(kernel, pars) 
     342    kernel.release() 
     343    return theory 
     344 
     345 
     346def gaussian(q, q0, dq): 
     347    from numpy import exp, pi 
     348    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 
     349 
     350 
     351def romberg_slit_1d(q, delta_qv, form, pars): 
     352    """ 
     353    Romberg integration for slit resolution. 
     354 
     355    This is an adaptive integration technique.  It is called with settings 
     356    that make it slow to evaluate but give it good accuracy. 
     357    """ 
     358    from scipy.integrate import romberg 
     359    _fn = lambda u, q0: eval_form(sqrt(q0**2 + u**2), form, pars) 
     360    r = [romberg(_fn, 0, delta_qv[0], args=(qi,), 
     361                 divmax=100, vec_func=True, tol=0, rtol=1e-8) 
     362         for qi in q] 
     363    # r should be [float, ...], but it is [array([float]), array([float]),...] 
     364    return np.asarray(r).flatten()/delta_qv[0] 
     365 
     366 
     367def romberg_pinhole_1d(q, q_width, form, pars): 
     368    """ 
     369    Romberg integration for pinhole resolution. 
     370 
     371    This is an adaptive integration technique.  It is called with settings 
     372    that make it slow to evaluate but give it good accuracy. 
     373    """ 
     374    from scipy.integrate import romberg 
     375 
     376    _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq) 
     377    r = [romberg(_fn, max(qi-5*dqi,0.01*q[0]), qi+5*dqi, args=(qi, dqi), 
     378                 divmax=100, vec_func=True, tol=0, rtol=1e-8) 
     379         for qi,dqi in zip(q,q_width)] 
     380    return np.asarray(r).flatten() 
     381 
     382 
    225383class ResolutionTest(unittest.TestCase): 
    226384 
     
    246404        Slit smearing with perfect resolution. 
    247405        """ 
    248         resolution = Slit1D(self.x, 0., 0.) 
     406        resolution = Slit1D(self.x, width=0, height=0, q_calc=self.x) 
    249407        theory = self.Iq(resolution.q_calc) 
    250408        output = resolution.apply(theory) 
    251409        np.testing.assert_equal(output, self.y) 
    252410 
    253     @unittest.skip("slit smearing is still broken") 
    254     def test_slit(self): 
     411    @unittest.skip("not yet supported") 
     412    def test_slit_high(self): 
    255413        """ 
    256414        Slit smearing with height 0.005 
    257415        """ 
    258         resolution = Slit1D(self.x, 0., 0.005) 
     416        resolution = Slit1D(self.x, width=0, height=0.005, q_calc=self.x) 
    259417        theory = self.Iq(resolution.q_calc) 
    260418        output = resolution.apply(theory) 
    261         answer = [ 9.0618, 8.6401, 8.1186, 7.1391, 6.1528, 
    262                    5.5555, 4.5584, 3.5606, 2.5623, 2. ] 
     419        answer = [ 9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 
     420                   5.5555, 4.5584, 3.5606, 2.5623, 2.0000 ] 
     421        np.testing.assert_allclose(output, answer, atol=1e-4) 
     422 
     423    @unittest.skip("not yet supported") 
     424    def test_slit_both_high(self): 
     425        """ 
     426        Slit smearing with width < 100*height. 
     427        """ 
     428        q = np.logspace(-4, -1, 10) 
     429        resolution = Slit1D(q, width=0.2, height=np.inf) 
     430        theory = 1000*self.Iq(resolution.q_calc**4) 
     431        output = resolution.apply(theory) 
     432        answer = [ 8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 
     433                   5.40363, 4.40655, 3.40880, 2.41058, 2.00000 ] 
     434        np.testing.assert_allclose(output, answer, atol=1e-4) 
     435 
     436    @unittest.skip("not yet supported") 
     437    def test_slit_wide(self): 
     438        """ 
     439        Slit smearing with width 0.0002 
     440        """ 
     441        resolution = Slit1D(self.x, width=0.0002, height=0, q_calc=self.x) 
     442        theory = self.Iq(resolution.q_calc) 
     443        output = resolution.apply(theory) 
     444        answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 
     445        np.testing.assert_allclose(output, answer, atol=1e-4) 
     446 
     447    @unittest.skip("not yet supported") 
     448    def test_slit_both_wide(self): 
     449        """ 
     450        Slit smearing with width > 100*height. 
     451        """ 
     452        resolution = Slit1D(self.x, width=0.0002, height=0.000001, 
     453                            q_calc=self.x) 
     454        theory = self.Iq(resolution.q_calc) 
     455        output = resolution.apply(theory) 
     456        answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 
    263457        np.testing.assert_allclose(output, answer, atol=1e-4) 
    264458 
     
    285479        np.testing.assert_allclose(output, answer, atol=1e-8) 
    286480 
    287 # Q, dQ, I(Q) for Igor default sphere model. 
    288 # combines CMSphere5.txt and CMSphere5smearsphere.txt from sasview/tests 
    289 # TODO: move test data into its own file? 
    290 SPHERE_RESOLUTION_TEST_DATA = """\ 
     481 
     482class IgorComparisonTest(unittest.TestCase): 
     483 
     484    def setUp(self): 
     485        self.pars = TEST_PARS_PINHOLE_SPHERE 
     486        from sasmodels import core 
     487        from sasmodels.models import sphere 
     488        self.model = core.load_model(sphere, dtype='double') 
     489 
     490    def Iq_sphere(self, pars, resolution): 
     491        from sasmodels import core 
     492        kernel = core.make_kernel(self.model, [resolution.q_calc]) 
     493        theory = core.call_kernel(kernel, pars) 
     494        result = resolution.apply(theory) 
     495        kernel.release() 
     496        return result 
     497 
     498    def compare(self, q, output, answer, tolerance): 
     499        err = (output - answer)/answer 
     500        idx = abs(err) >= tolerance 
     501        problem = zip(q[idx], output[idx], answer[idx], err[idx]) 
     502        print "\n".join(str(v) for v in problem) 
     503        np.testing.assert_allclose(output, answer, rtol=tolerance) 
     504 
     505    def test_perfect(self): 
     506        """ 
     507        Compare sphere model with NIST Igor SANS, no resolution smearing. 
     508        """ 
     509        pars = TEST_PARS_SLIT_SPHERE 
     510        data_string = TEST_DATA_SLIT_SPHERE 
     511 
     512        data = np.loadtxt(data_string.split('\n')).T 
     513        q, width, answer, _ = data 
     514        resolution = Perfect1D(q) 
     515        output = self.Iq_sphere(pars, resolution) 
     516        self.compare(q, output, answer, 1e-6) 
     517 
     518    def test_pinhole(self): 
     519        """ 
     520        Compare pinhole resolution smearing with NIST Igor SANS 
     521        """ 
     522        pars = TEST_PARS_PINHOLE_SPHERE 
     523        data_string = TEST_DATA_PINHOLE_SPHERE 
     524 
     525        data = np.loadtxt(data_string.split('\n')).T 
     526        q, q_width, answer = data 
     527        resolution = Pinhole1D(q, q_width) 
     528        output = self.Iq_sphere(pars, resolution) 
     529        # TODO: relative error should be lower 
     530        self.compare(q, output, answer, 3e-4) 
     531 
     532    def test_pinhole_romberg(self): 
     533        """ 
     534        Compare pinhole resolution smearing with romberg integration result. 
     535        """ 
     536        pars = TEST_PARS_PINHOLE_SPHERE 
     537        data_string = TEST_DATA_PINHOLE_SPHERE 
     538        pars['radius'] *= 5 
     539        radius = pars['radius'] 
     540 
     541        data = np.loadtxt(data_string.split('\n')).T 
     542        q, q_width, answer = data 
     543        answer = romberg_pinhole_1d(q, q_width, self.model, pars) 
     544        ## Getting 0.1% requires 5 sigma and 200 points per fringe 
     545        #q_calc = interpolate(pinhole_extend_q(q, q_width, nsigma=5), 
     546        #                     2*np.pi/radius/200) 
     547        #tol = 0.001 
     548        ## The default 3 sigma and no extra points gets 1% 
     549        q_calc, tol = None, 0.01 
     550        resolution = Pinhole1D(q, q_width, q_calc=q_calc) 
     551        output = self.Iq_sphere(pars, resolution) 
     552        if 0: # debug plot 
     553            import matplotlib.pyplot as plt 
     554            resolution = Perfect1D(q) 
     555            source = self.Iq_sphere(pars, resolution) 
     556            plt.loglog(q, source, '.') 
     557            plt.loglog(q, answer, '-', hold=True) 
     558            plt.loglog(q, output, '-', hold=True) 
     559            plt.show() 
     560        self.compare(q, output, answer, tol) 
     561 
     562    def test_slit(self): 
     563        """ 
     564        Compare slit resolution smearing with NIST Igor SANS 
     565        """ 
     566        pars = TEST_PARS_SLIT_SPHERE 
     567        data_string = TEST_DATA_SLIT_SPHERE 
     568 
     569        data = np.loadtxt(data_string.split('\n')).T 
     570        q, delta_qv, _, answer = data 
     571        resolution = Slit1D(q, width=delta_qv, height=0) 
     572        output = self.Iq_sphere(pars, resolution) 
     573        # TODO: eliminate Igor test since it is too inaccurate to be useful. 
     574        # This means we can eliminate the test data as well, and instead 
     575        # use a generated q vector. 
     576        self.compare(q, output, answer, 0.5) 
     577 
     578    def test_slit_romberg(self): 
     579        """ 
     580        Compare slit resolution smearing with romberg integration result. 
     581        """ 
     582        pars = TEST_PARS_SLIT_SPHERE 
     583        data_string = TEST_DATA_SLIT_SPHERE 
     584        radius = pars['radius'] 
     585 
     586        data = np.loadtxt(data_string.split('\n')).T 
     587        q, delta_qv, _, answer = data 
     588        answer = romberg_slit_1d(q, delta_qv, self.model, pars) 
     589        q_calc = slit_extend_q(interpolate(q, 2*np.pi/radius/20), 
     590                               delta_qv[0], delta_qv[0]) 
     591        resolution = Slit1D(q, width=delta_qv, height=0, q_calc=q_calc) 
     592        output = self.Iq_sphere(pars, resolution) 
     593        # TODO: relative error should be lower 
     594        self.compare(q, output, answer, 0.025) 
     595 
     596    #TODO: can sas q spacing be too sparse for the resolution calculation? 
     597    @unittest.skip("suppress sparse data test; not supported by current code") 
     598    def test_pinhole_sparse(self): 
     599        """ 
     600        Compare pinhole resolution smearing with NIST Igor SANS on sparse data 
     601        """ 
     602        pars = TEST_PARS_PINHOLE_SPHERE 
     603        data_string = TEST_DATA_PINHOLE_SPHERE 
     604 
     605        data = np.loadtxt(data_string.split('\n')).T 
     606        q, q_width, answer = data[:, ::20] # Take every nth point 
     607        resolution = Pinhole1D(q, q_width) 
     608        output = self.Iq_sphere(pars, resolution) 
     609        self.compare(q, output, answer, 1e-6) 
     610 
     611 
     612 
     613# pinhole sphere parameters 
     614TEST_PARS_PINHOLE_SPHERE = { 
     615    'scale': 1.0, 'background': 0.01, 
     616    'radius': 60.0, 'sld': 1, 'solvent_sld': 6.3, 
     617    } 
     618# Q, dQ, I(Q) calculated by NIST Igor SANS package 
     619TEST_DATA_PINHOLE_SPHERE = """\ 
    2916200.001278 0.0002847 2538.41176383 
    2926210.001562 0.0002905 2536.91820405 
     
    391720""" 
    392721 
    393 class Pinhole1DTest(unittest.TestCase): 
    394  
    395     def setUp(self): 
    396         # sample q, q_width, I(q) calculated by NIST Igor SANS package 
    397         self.data = np.loadtxt(SPHERE_RESOLUTION_TEST_DATA.split('\n')).T 
    398         self.pars = dict(scale=1.0, background=0.01, radius=60.0, 
    399                          solvent_sld=6.3, sld=1) 
    400  
    401     def Iq(self, q, q_width): 
    402         from sasmodels import core 
    403         from sasmodels.models import sphere 
    404  
    405         model = core.load_model(sphere) 
    406         resolution = Pinhole1D(q, q_width) 
    407         kernel = core.make_kernel(model, [resolution.q_calc]) 
    408         theory = core.call_kernel(kernel, self.pars) 
    409         result = resolution.apply(theory) 
    410         return result 
    411  
    412     def test_sphere(self): 
    413         """ 
    414         Compare pinhole resolution smearing with NIST Igor SANS 
    415         """ 
    416         q, q_width, answer = self.data 
    417         output = self.Iq(q, q_width) 
    418         np.testing.assert_allclose(output, answer, rtol=0.006) 
    419  
    420     #TODO: is all sas data sampled densely enough to support resolution calcs? 
    421     @unittest.skip("suppress sparse data test; not supported by current code") 
    422     def test_sphere_sparse(self): 
    423         """ 
    424         Compare pinhole resolution smearing with NIST Igor SANS on sparse data 
    425         """ 
    426         q, q_width, answer = self.data[:, ::20]  # Take every nth point 
    427         output = self.Iq(q, q_width) 
    428         np.testing.assert_allclose(output, answer, rtol=0.006) 
    429  
     722# Slit sphere parameters 
     723TEST_PARS_SLIT_SPHERE = { 
     724    'scale': 0.01, 'background': 0.01, 
     725    'radius': 60000, 'sld': 1, 'solvent_sld': 4, 
     726    } 
     727# Q dQ I(Q) I_smeared(Q) 
     728TEST_DATA_SLIT_SPHERE = """\ 
     7292.26097e-05 0.117 5.5781372896e+09 1.4626077708e+06 
     7302.53847e-05 0.117 5.0363141458e+09 1.3117318023e+06 
     7312.81597e-05 0.117 4.4850108103e+09 1.1594863713e+06 
     7323.09347e-05 0.117 3.9364658459e+09 1.0094881630e+06 
     7333.37097e-05 0.117 3.4019975074e+09 8.6518941303e+05 
     7343.92597e-05 0.117 2.4139519814e+09 6.0232158311e+05 
     7354.48097e-05 0.117 1.5816877820e+09 3.8739994090e+05 
     7365.03597e-05 0.117 9.3715407224e+08 2.2745304775e+05 
     7375.59097e-05 0.117 4.8387917428e+08 1.2101295768e+05 
     7386.14597e-05 0.117 2.0193586928e+08 6.0055107771e+04 
     7396.70097e-05 0.117 5.5886110911e+07 3.2749521065e+04 
     7407.25597e-05 0.117 3.7782348010e+06 2.6350963616e+04 
     7417.81097e-05 0.117 5.3407817904e+06 2.9624963314e+04 
     7428.36597e-05 0.117 2.7975485523e+07 3.4403962254e+04 
     7438.92097e-05 0.117 4.9845448282e+07 3.6130017903e+04 
     7449.47597e-05 0.117 6.0092588905e+07 3.3495107849e+04 
     7451.00310e-04 0.117 5.6823430831e+07 2.7475726279e+04 
     7461.05860e-04 0.117 4.3857024036e+07 2.0144282226e+04 
     7471.11410e-04 0.117 2.7277144760e+07 1.3647403260e+04 
     7481.22510e-04 0.117 3.3119334113e+06 6.6519711526e+03 
     7491.33610e-04 0.117 1.4412859402e+06 6.9726212813e+03 
     7501.44710e-04 0.117 8.5620162463e+06 8.1441335775e+03 
     7511.55810e-04 0.117 9.6957429033e+06 6.4559996521e+03 
     7521.66910e-04 0.117 4.3818341914e+06 3.6252154156e+03 
     7531.78010e-04 0.117 2.7448997387e+05 2.4006505342e+03 
     7541.89110e-04 0.117 8.0472009936e+05 2.8187789089e+03 
     7552.00210e-04 0.117 2.8149552834e+06 3.0915662855e+03 
     7562.11310e-04 0.117 2.7510907861e+06 2.3722530293e+03 
     7572.22410e-04 0.117 1.0053133293e+06 1.4473468311e+03 
     7582.33510e-04 0.117 5.8428305052e+03 1.2048540556e+03 
     7592.44610e-04 0.117 5.1699305004e+05 1.4625670042e+03 
     7602.55710e-04 0.117 1.2120227268e+06 1.5010705973e+03 
     7612.66810e-04 0.117 9.7896842846e+05 1.1336343426e+03 
     7622.77910e-04 0.117 2.5507264791e+05 8.1848818080e+02 
     7633.05660e-04 0.117 5.2403101181e+05 7.4913374357e+02 
     7643.33410e-04 0.117 5.8699343809e+04 4.4669964560e+02 
     7653.61160e-04 0.117 3.0844327150e+05 4.6774007542e+02 
     7663.88910e-04 0.117 8.3360142970e+03 2.7169550220e+02 
     7674.16660e-04 0.117 1.8630080583e+05 3.0710983679e+02 
     7684.44410e-04 0.117 3.1616804732e-01 1.7959006831e+02 
     7694.72160e-04 0.117 1.1299016314e+05 2.0763952339e+02 
     7704.99910e-04 0.117 2.9952522747e+03 1.2536542765e+02 
     7715.27660e-04 0.117 6.7625695649e+04 1.4013969777e+02 
     7725.55410e-04 0.117 7.6927460089e+03 8.2145593180e+01 
     7736.10910e-04 0.117 1.1229057779e+04 8.4519745643e+01 
     7746.66410e-04 0.117 1.3035567943e+04 8.1554625609e+01 
     7757.21910e-04 0.117 1.3309931343e+04 7.4437319172e+01 
     7767.77410e-04 0.117 1.2462626212e+04 6.4697088261e+01 
     7778.32910e-04 0.117 1.0912927143e+04 5.3773301044e+01 
     7788.88410e-04 0.117 9.0172597469e+03 4.2843375753e+01 
     7799.43910e-04 0.117 7.0496495917e+03 3.2771032724e+01 
     7809.99410e-04 0.117 5.2030483682e+03 2.4113557144e+01 
     7811.05491e-03 0.117 3.5988976711e+03 1.7160773658e+01 
     7821.11041e-03 0.117 2.2996060652e+03 1.2016626459e+01 
     7831.22141e-03 0.117 6.4766590598e+02 6.0373017740e+00 
     7841.33241e-03 0.117 4.1963483264e+01 4.5215452974e+00 
     7851.44341e-03 0.117 6.3370708246e+01 5.1054681903e+00 
     7861.55441e-03 0.117 3.0736750577e+02 5.9176165298e+00 
     7871.66541e-03 0.117 5.0327682399e+02 5.9815000189e+00 
     7881.77641e-03 0.117 5.4084331454e+02 5.1634639625e+00 
     7891.88741e-03 0.117 4.3488671756e+02 3.8535158148e+00 
     7901.99841e-03 0.117 2.6322287860e+02 2.5824997753e+00 
     7912.10941e-03 0.117 1.0793633150e+02 1.7315517194e+00 
     7922.22041e-03 0.117 1.8474448850e+01 1.4077213604e+00 
     7932.33141e-03 0.117 1.5864062279e+00 1.4771560682e+00 
     7942.44241e-03 0.117 3.2267213848e+01 1.6916253448e+00 
     7952.55341e-03 0.117 7.4289116207e+01 1.8274751193e+00 
     7962.66441e-03 0.117 9.9000521929e+01 1.7706812289e+00 
     797""" 
    430798 
    431799def main(): 
     
    494862    #demo() 
    495863    main() 
    496  
    497  
Note: See TracChangeset for help on using the changeset viewer.