source: sasview/src/sas/models/resolution.py @ 9f7fbd9

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2release_4.0.1ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since 9f7fbd9 was 9f7fbd9, checked in by Paul Kienzle <pkienzle@…>, 9 years ago

rewrite slit smearing for usans

  • Property mode set to 100644
File size: 10.7 KB
Line 
1"""
2Define the resolution functions for the data.
3
4This defines classes for 1D and 2D resolution calculations.
5"""
6from __future__ import division
7from scipy.special import erf
8from numpy import sqrt, log, log10
9import numpy as np
10
11MINIMUM_RESOLUTION = 1e-8
12
13class Resolution1D(object):
14    """
15    Abstract base class defining a 1D resolution function.
16
17    *q* is the set of q values at which the data is measured.
18
19    *q_calc* is the set of q values at which the theory needs to be evaluated.
20    This may extend and interpolate the q values.
21
22    *apply* is the method to call with I(q_calc) to compute the resolution
23    smeared theory I(q).
24    """
25    q = None
26    q_calc = None
27    def apply(self, theory):
28        """
29        Smear *theory* by the resolution function, returning *Iq*.
30        """
31        raise NotImplementedError("Subclass does not define the apply function")
32
33
34class Perfect1D(Resolution1D):
35    """
36    Resolution function to use when there is no actual resolution smearing
37    to be applied.  It has the same interface as the other resolution
38    functions, but returns the identity function.
39    """
40    def __init__(self, q):
41        self.q_calc = self.q = q
42
43    def apply(self, theory):
44        return theory
45
46
47class Pinhole1D(Resolution1D):
48    r"""
49    Pinhole aperture with q-dependent gaussian resolution.
50
51    *q* points at which the data is measured.
52
53    *q_width* gaussian 1-sigma resolution at each data point.
54
55    *q_calc* is the list of points to calculate, or None if this should
56    be estimated from the *q* and *q_width*.
57    """
58    def __init__(self, q, q_width, q_calc=None):
59        #*min_step* is the minimum point spacing to use when computing the
60        #underlying model.  It should be on the order of
61        #$\tfrac{1}{10}\tfrac{2\pi}{d_\text{max}}$ to make sure that fringes
62        #are computed with sufficient density to avoid aliasing effects.
63
64        # Protect against calls with q_width=0.  The extend_q function will
65        # not extend the q if q_width is 0, but q_width must be non-zero when
66        # constructing the weight matrix to avoid division by zero errors.
67        # In practice this should never be needed, since resolution should
68        # default to Perfect1D if the pinhole geometry is not defined.
69        self.q, self.q_width = q, q_width
70        self.q_calc = pinhole_extend_q(q, q_width) \
71            if q_calc is None else np.sort(q_calc)
72        self.weight_matrix = pinhole_resolution(self.q_calc,
73                self.q, np.maximum(q_width, MINIMUM_RESOLUTION))
74
75    def apply(self, theory):
76        return apply_resolution_matrix(self.weight_matrix, theory)
77
78
79class Slit1D(Resolution1D):
80    """
81    Slit aperture with a complicated resolution function.
82
83    *q* points at which the data is measured.
84
85    *qx_width* slit width
86
87    *qy_height* slit height
88
89    *q_calc* is the list of points to calculate, or None if this should
90    be estimated from the *q* and *q_width*.
91
92    The *weight_matrix* is computed by :func:`slit1d_resolution`
93    """
94    def __init__(self, q, width, height, q_calc=None):
95        # TODO: maybe issue warnings rather than raising errors
96        if not np.isscalar(width):
97            if np.any(np.diff(width) > 0.0):
98                raise ValueError("Slit resolution requires fixed width slits")
99            width = width[0]
100        if not np.isscalar(height):
101            if np.any(np.diff(height) > 0.0):
102                raise ValueError("Slit resolution requires fixed height slits")
103            height = height[0]
104
105        # Remember what width/height was used even though we won't need them
106        # after the weight matrix is constructed
107        self.width, self.height = width, height
108
109        self.q = q.flatten()
110        self.q_calc = slit_extend_q(q, width, height) \
111            if q_calc is None else np.sort(q_calc)
112        self.weight_matrix = \
113            slit_resolution(self.q_calc, self.q, width, height)
114
115    def apply(self, theory):
116        return apply_resolution_matrix(self.weight_matrix, theory)
117
118
119def apply_resolution_matrix(weight_matrix, theory):
120    """
121    Apply the resolution weight matrix to the computed theory function.
122    """
123    #print "apply shapes", theory.shape, weight_matrix.shape
124    Iq = np.dot(theory[None,:], weight_matrix)
125    #print "result shape",Iq.shape
126    return Iq.flatten()
127
128
129def pinhole_resolution(q_calc, q, q_width):
130    """
131    Compute the convolution matrix *W* for pinhole resolution 1-D data.
132
133    Each row *W[i]* determines the normalized weight that the corresponding
134    points *q_calc* contribute to the resolution smeared point *q[i]*.  Given
135    *W*, the resolution smearing can be computed using *dot(W,q)*.
136
137    *q_calc* must be increasing.  *q_width* must be greater than zero.
138    """
139    # The current algorithm is a midpoint rectangle rule.  In the test case,
140    # neither trapezoid nor Simpson's rule improved the accuracy.
141    edges = bin_edges(q_calc)
142    edges[edges<0.0] = 0.0 # clip edges below zero
143    G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] )
144    weights = G[1:] - G[:-1]
145    weights /= np.sum(weights, axis=0)[None,:]
146    return weights
147
148
149def slit_resolution(q_calc, q, width, height):
150    r"""
151    Build a weight matrix to compute *I_s(q)* from *I(q_calc)*, given
152    $q_v$ = *width* and $q_h$ = *height*.
153
154    *width* and *height* are scalars since current instruments use the
155    same slit settings for all measurement points.
156
157    If slit height is large relative to width, use:
158
159    .. math::
160
161        I_s(q_o) = \frac{1}{\Delta q_v}
162            \int_0^{\Delta q_v} I(\sqrt{q_o^2 + u^2} du
163
164    If slit width is large relative to height, use:
165
166    .. math::
167
168        I_s(q_o) = \frac{1}{2 \Delta q_v}
169            \int_{-\Delta q_v}^{\Delta q_v} I(u) du
170    """
171    if width == 0.0 and height == 0.0:
172        #print "condition zero"
173        return 1
174
175    q_edges = bin_edges(q_calc) # Note: requires q > 0
176    q_edges[q_edges<0.0] = 0.0 # clip edges below zero
177
178    #np.set_printoptions(linewidth=10000)
179    if width <= 100.0 * height or height == 0:
180        # The current algorithm is a midpoint rectangle rule.  In the test case,
181        # neither trapezoid nor Simpson's rule improved the accuracy.
182        #print "condition h", q_edges.shape, q.shape, q_calc.shape
183        weights = np.zeros((len(q), len(q_calc)), 'd')
184        for i, qi in enumerate(q):
185            weights[i, :] = np.diff(q_to_u(q_edges, qi))
186        weights /= width
187        weights = weights.T
188    else:
189        #print "condition w"
190        # Make q_calc into a row vector, and q into a column vector
191        q, q_calc = q[None,:], q_calc[:,None]
192        in_x = (q_calc >= q-width) * (q_calc <= q+width)
193        weights = np.diff(q_edges)[:,None] * in_x
194
195    return weights
196
197
198def pinhole_extend_q(q, q_width, nsigma=3):
199    """
200    Given *q* and *q_width*, find a set of sampling points *q_calc* so
201    that each point I(q) has sufficient support from the underlying
202    function.
203    """
204    q_min, q_max = np.min(q - nsigma*q_width), np.max(q + nsigma*q_width)
205    return geometric_extrapolation(q, q_min, q_max)
206
207
208def slit_extend_q(q, width, height):
209    """
210    Given *q*, *width* and *height*, find a set of sampling points *q_calc* so
211    that each point I(q) has sufficient support from the underlying
212    function.
213    """
214    height # keep lint happy
215    q_min, q_max = np.min(q), np.max(np.sqrt(q**2 + width**2))
216    return geometric_extrapolation(q, q_min, q_max)
217
218
219def bin_edges(x):
220    """
221    Determine bin edges from bin centers, assuming that edges are centered
222    between the bins.
223
224    Note: this uses the arithmetic mean, which may not be appropriate for
225    log-scaled data.
226    """
227    if len(x) < 2 or (np.diff(x)<0).any():
228        raise ValueError("Expected bins to be an increasing set")
229    edges = np.hstack([
230        x[0]  - 0.5*(x[1]  - x[0]),  # first point minus half first interval
231        0.5*(x[1:] + x[:-1]),        # mid points of all central intervals
232        x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
233        ])
234    return edges
235
236
237def q_to_u(q, q0):
238    """
239    Convert *q* values to *u* values for the integral computed at *q0*.
240    """
241    # array([value])**2 - value**2 is not always zero
242    qpsq = q**2 - q0**2
243    qpsq[qpsq<0] = 0
244    return sqrt(qpsq)
245
246
247def interpolate(q, max_step):
248    """
249    Returns *q_calc* with points spaced at most max_step apart.
250    """
251    step = np.diff(q)
252    index = step>max_step
253    if np.any(index):
254        inserts = []
255        for q_i,step_i in zip(q[:-1][index],step[index]):
256            n = np.ceil(step_i/max_step)
257            inserts.extend(q_i + np.arange(1,n)*(step_i/n))
258        # Extend a couple of fringes beyond the end of the data
259        inserts.extend(q[-1] + np.arange(1,8)*max_step)
260        q_calc = np.sort(np.hstack((q,inserts)))
261    else:
262        q_calc = q
263    return q_calc
264
265
266def linear_extrapolation(q, q_min, q_max):
267    """
268    Extrapolate *q* out to [*q_min*, *q_max*] using the step size in *q* as
269    a guide.  Extrapolation below uses steps the same size as the first
270    interval.  Extrapolation above uses steps the same size as the final
271    interval.
272
273    if *q_min* is zero or less then *q[0]/10* is used instead.
274    """
275    q = np.sort(q)
276    if q_min < q[0]:
277        if q_min <= 0: q_min = q[0]/10
278        delta = q[1] - q[0]
279        q_low = np.arange(q_min, q[0], delta)
280    else:
281        q_low = []
282    if q_max > q[-1]:
283        delta = q[-1] - q[-2]
284        q_high = np.arange(q[-1]+delta, q_max, delta)
285    else:
286        q_high = []
287    return np.concatenate([q_low, q, q_high])
288
289
290def geometric_extrapolation(q, q_min, q_max):
291    r"""
292    Extrapolate *q* to [*q_min*, *q_max*] using geometric steps, with the
293    average geometric step size in *q* as the step size.
294
295    if *q_min* is zero or less then *q[0]/10* is used instead.
296
297    Starting at $q_1$ and stepping geometrically by $\Delta q$
298    to $q_n$ in $n$ points gives a geometric average of:
299
300    .. math::
301
302         \log \Delta q = (\log q_n - log q_1) / (n - 1)
303
304    From this we can compute the number of steps required to extend $q$
305    from $q_n$ to $q_\text{max}$ by $\Delta q$ as:
306
307    .. math::
308
309         n_\text{extend} = (\log q_\text{max} - \log q_n) / \log \Delta q
310
311    Substituting:
312
313        n_\text{extend} = (n-1) (\log q_\text{max} - \log q_n)
314            / (\log q_n - log q_1)
315    """
316    q = np.sort(q)
317    delta_q = (len(q)-1)/(log(q[-1]) - log(q[0]))
318    if q_min < q[0]:
319        if q_min < 0: q_min = q[0]/10
320        n_low = delta_q * (log(q[0])-log(q_min))
321        q_low  = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1]
322    else:
323        q_low = []
324    if q_max > q[-1]:
325        n_high = delta_q * (log(q_max)-log(q[-1]))
326        q_high = np.logspace(log10(q[-1]), log10(q_max), np.ceil(n_high)+1)[1:]
327    else:
328        q_high = []
329    return np.concatenate([q_low, q, q_high])
330
Note: See TracBrowser for help on using the repository browser.