source: sasmodels/sasmodels/multiscat.py @ 2c4a190

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 2c4a190 was 2c4a190, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

simplify use of multiple scattering in bumps fits

  • Property mode set to 100644
File size: 26.9 KB
Line 
1#!/usr/bin/env python
2r"""
3Multiple scattering calculator
4
5Calculate multiple scattering using 2D FFT convolution.
6
7Usage:
8
9    -p, --probability: the scattering probability
10    -q, --qmax: that max q that you care about
11    -w, --window: the extension window (q is calculated for qmax*window)
12    -n, --nq: the number of mesh points (dq = qmax*window/nq)
13    -r, --random: generate a random parameter set
14    -2, --2d: perform the calculation for an oriented pattern
15    model_name
16    model_par=value ...
17
18Assume the probability of scattering is $p$. After each scattering event,
19$1-p$ neutrons will leave the system and go to the detector, and the remaining
20$p$ will scatter again.
21
22Let the scattering probability for $n$ scattering event at $q$ be $f_n(q)$,
23where
24.. math:: f_1(q) = \frac{I_1(q)}{\int I_1(q) dq}
25for $I_1(q)$, the single scattering from the system. After two scattering
26events, the scattering probability will be the convolution of the first
27scattering and itself, or $f_2(q) = (f_1*f_1)(q)$.  After $n$ events it will be
28$f_n(q) = (f_1 * \cdots * f_1)(q)$.  The total scattering is calculated
29as the weighted sum of $f_k$, with weights following the Poisson distribution
30.. math:: P(k; \lambda) = \frac{\lambda^k e^{-\lambda}}{k!}
31for $\lambda$ determined by the total thickness divided by the mean
32free path between scattering, giving
33.. math::
34    I(q) = \sum_{k=0}^\infty P(k; \lambda) f_k(q)
35The $k=0$ term is ignored since it involves no scattering.
36We cut the series when cumulative probability is less than cutoff $C=99\%$,
37which is $\max n$ such that
38.. math::
39    \frac{\sum_{k=1}^n \frac{P(k; \lambda)}{1 - P(0; \lambda)} < C
40
41Using the convolution theorem, where
42$F = \mathcal{F}(f)$ is the Fourier transform,
43.. math:: f * g = \mathcal{F}^{-1}\{\mathcal{F}\{f\} \cdot \mathcal{F}\{g\}\}
44so
45.. math:: f * \ldots * f = \mathcal{F}^{-1}\{ F^n \}
46Since the Fourier transform is a linear operator, we can move the polynomial
47expression for the convolution into the transform, giving
48.. math::
49    I(q) = \mathcal{F}^{-1}\left\{ \sum_{k=1}^{n} P(k; \lambda) F^k \right\}
50In the dilute limit $L \rightarrow 0$ only the $k=1$ term is active,
51and so
52.. math::
53    P(1; \lambda) = \lambda e{-\lambda} = \int I_1(q) dq
54therefore we compute
55.. math::
56    I(q) = \int I_1(q) dq \mathcal{F}^{-1}\left\{
57        \sum_{l=1}^{n} \frac{P(k; \lambda)}{P(1; \lambda))} F^k \right\}
58
59For speed we may use the fast fourier transform with a power of two.
60The resulting $I(q)$ will be linearly spaced and likely heavily oversampled.
61The usual pinhole or slit resolution calculation can performed from these
62calculated values.
63
64By default single precision OpenCL is used for the calculation.  Set the
65environment variable *SAS_OPENCL=none* to use double precision numpy FFT
66instead.  The OpenCL versions is about 10x faster on an elderly Mac with
67Intel HD 4000 graphics.  The single precision numerical artifacts don't
68seem to seriously impact overall accuracy, though they look pretty bad.
69"""
70
71from __future__ import print_function, division
72
73import argparse
74import time
75
76import numpy as np
77from numpy import pi
78from scipy.special import gamma
79
80from sasmodels import core
81from sasmodels import compare
82from sasmodels.resolution import Resolution, bin_edges
83from sasmodels.direct_model import call_kernel
84import sasmodels.kernelcl
85
86# TODO: select fast and accurate fft library
87# clFFT: https://github.com/clMathLibraries/clFFT (AMD's OpenCL)
88# - gpyfft: https://github.com/geggo/gpyfft (wraps clFFT)
89# - arrayfire: https://github.com/arrayfire (wraps clFFT and much more; +cuda)
90# pyFFT: https://github.com/fjarri-attic/pyfft (based on Apple's OpenCL; +cuda)
91# - Reikna: https://github.com/fjarri/reikna (evolved from pyfft)
92# genFFT: https://software.intel.com/en-us/articles/genFFT (Intel's OpenCL)
93# VexCL: https://vexcl.readthedocs.io/en/latest/ (c++ library)
94# TODO: switch to fftw when opencl is not available
95
96try:
97    import pyfft.cl
98    import pyopencl.array as cl_array
99    HAVE_OPENCL = sasmodels.kernelcl.use_opencl()
100except ImportError:
101    HAVE_OPENCL = False
102PRECISION = np.dtype('f' if HAVE_OPENCL else 'd')  # 'f' or 'd'
103USE_FAST = True  # OpenCL faster, less accurate math
104
105class ICalculator:
106    """
107    Multiple scattering calculator
108    """
109    def fft(self, Iq):
110        """
111        Compute the forward FFT for an image, real -> complex.
112        """
113        raise NotImplementedError()
114
115    def ifft(self, Iq):
116        """
117        Compute the inverse FFT for an image, complex -> complex.
118        """
119        raise NotImplementedError()
120
121    def mulitple_scattering(self, Iq):
122        r"""
123        Compute multiple scattering for I(q) given scattering probability p.
124
125        Given a probability p of scattering with the thickness, the expected
126        number of scattering events, $\lambda$ is $-\log(1 - p)$, giving a
127        Poisson weighted sum of single, double, triple, etc. scattering patterns.
128        The number of patterns used is based on coverage (default 99%).
129        """
130        raise NotImplementedError()
131
132class NumpyCalculator(ICalculator):
133    """
134    Multiple scattering calculator using numpy fft.
135    """
136    def __init__(self, dims=None, dtype=PRECISION):
137        self.dtype = dtype
138        self.complex_dtype = np.dtype('F') if dtype == np.dtype('f') else np.dtype('D')
139
140    def fft(self, Iq):
141        #t0 = time.time()
142        Iq = np.asarray(Iq, self.dtype)
143        result = np.fft.fft2(Iq)
144        #print("fft time", time.time()-t0)
145        return result
146
147    def ifft(self, fourier_frame):
148        #t0 = time.time()
149        fourier_frame = np.asarray(fourier_frame, self.complex_dtype)
150        result = np.fft.ifft2(fourier_frame)
151        #print("ifft time", time.time()-t0)
152        return result
153
154    def multiple_scattering(self, Iq, p, coverage=0.99):
155        #t0 = time.time()
156        coeffs = scattering_coeffs(p, coverage)
157        poly = np.asarray(coeffs[::-1], dtype=self.dtype)
158        scale = np.sum(Iq)
159        frame = _forward_shift(Iq/scale, dtype=self.dtype)
160        fourier_frame = np.fft.fft2(frame)
161        convolved = fourier_frame * np.polyval(poly, fourier_frame)
162        frame = np.fft.ifft2(convolved)
163        result = scale * _inverse_shift(frame.real, dtype=self.dtype)
164        #print("numpy multiscat time", time.time()-t0)
165        return result
166
167# polyval1(c, x) computes (...((c0 x + c1) x + c2) x ... + cn) x
168# where c is an array of length *degree* and x is an array of
169# complex values (type double2) of length *n*. 2-D arrays can of
170# course be treated as 1-D arrays of length *nx* X *ny*.
171# When compiling with sasmodels.kernelcl.compile_model the double precision
172# types are converted to single precision as needed.  See the code in
173# sasmodels.generate._convert_type for details.
174POLYVAL1_KERNEL = """
175kernel void polyval1(
176    const int degree,
177    global const double *coeff,
178    const int n,
179    global double2 *array)
180{
181    int index = get_global_id(0);
182    if (index < n) {
183        const double2 x = array[index];
184        double2 total = coeff[0];
185        for (int k=1; k < degree; k++) {
186            total = fma(total, x, coeff[k]);
187        }
188        array[index] = total * x;
189    }
190}
191"""
192
193class OpenclCalculator(ICalculator):
194    """
195    Multiple scattering calculator using OpenCL via pyfft.
196    """
197    polyval1f = None
198    polyval1d = None
199    def __init__(self, dims, dtype=PRECISION):
200        env = sasmodels.kernelcl.environment()
201        context = env.get_context(dtype)
202        if dtype == np.dtype('f'):
203            if OpenclCalculator.polyval1f is None:
204                program = sasmodels.kernelcl.compile_model(
205                    context, POLYVAL1_KERNEL, dtype, fast=USE_FAST)
206                # Assume context is always the same for a given dtype
207                OpenclCalculator.polyval1f = program.polyval1
208            self.dtype = dtype
209            self.complex_dtype = np.dtype('F')
210            self.polyval1 = OpenclCalculator.polyval1f
211        else:
212            if OpenclCalculator.polyval1d is None:
213                program = sasmodels.kernelcl.compile_model(
214                    context, POLYVAL1_KERNEL, dtype, fast=False)
215                # Assume context is always the same for a given dtype
216                OpenclCalculator.polyval1d = program.polyval1
217            self.dtype = dtype
218            self.complex_type = np.dtype('D')
219            self.polyval1 = OpenclCalculator.polyval1d
220        self.queue = env.get_queue(dtype)
221        self.plan = pyfft.cl.Plan(dims, queue=self.queue)
222
223    def fft(self, Iq):
224        # forward transform
225        #t0 = time.time()
226        data = np.asarray(Iq, self.complex_dtype)
227        gpu_data = cl_array.to_device(self.queue, data)
228        self.plan.execute(gpu_data.data)
229        result = gpu_data.get()
230        #print("fft time", time.time()-t0)
231        return result
232
233    def ifft(self, fourier_frame):
234        # inverse transform
235        #t0 = time.time()
236        data = np.asarray(fourier_frame, self.complex_dtype)
237        gpu_data = cl_array.to_device(self.queue, data)
238        self.plan.execute(gpu_data.data, inverse=True)
239        result = gpu_data.get()
240        #print("ifft time", time.time()-t0)
241        return result
242
243    def multiple_scattering(self, Iq, p, coverage=0.99):
244        #t0 = time.time()
245        coeffs = scattering_coeffs(p, coverage)
246        scale = np.sum(Iq)
247        poly = np.asarray(coeffs[::-1], self.dtype)
248        frame = _forward_shift(Iq/scale, dtype=self.complex_dtype)
249        gpu_data = cl_array.to_device(self.queue, frame)
250        gpu_poly = cl_array.to_device(self.queue, poly)
251        self.plan.execute(gpu_data.data)
252        degree, data_size= poly.shape[0], frame.shape[0]*frame.shape[1]
253        self.polyval1(
254            self.queue, [data_size], None,
255            np.int32(degree), gpu_poly.data, np.int32(data_size), gpu_data.data)
256        self.plan.execute(gpu_data.data, inverse=True)
257        frame = gpu_data.get()
258        #result = scale * _inverse_shift(frame.real, dtype=self.dtype)
259        result = scale * _inverse_shift(frame.real, dtype=self.dtype)
260        #print("OpenCL multiscat time", time.time()-t0)
261        return result
262
263Calculator = OpenclCalculator if HAVE_OPENCL else NumpyCalculator
264
265def scattering_powers(Iq, n, dtype='f', transform=None):
266    r"""
267    Calculate the scattering powers up to n.
268
269    This includes 1 even though it should just be Iq itself
270
271    The frames are unweighted; to weight scale by $\lambda^k e^{-\lambda}/k!$.
272    """
273    if transform is None:
274        n_x, n_y = Iq.shape
275        transform = Calculator(dims=(n_x*2, n_y*2), dtype=dtype)
276    scale = np.sum(Iq)
277    frame = _forward_shift(Iq/scale, dtype=dtype)
278    F = transform.fft(frame)
279    powers = [scale * _inverse_shift(transform.ifft(F**(k+1)).real, dtype=dtype)
280              for k in range(n)]
281    return powers
282
283def scattering_coeffs(p, coverage=0.99):
284    r"""
285    Return the coefficients of the scattering powers for transmission
286    probability *p*.  This is just the corresponding values for the
287    Poisson distribution for $\lambda = -\ln(1-p)$ such that
288    $\sum_{k = 0 \ldots n} P(k; \lambda)$ is larger than *coverage*.
289    """
290    L = -np.log(1-p)
291    num_scatter = truncated_poisson_invcdf(coverage, L)
292    coeffs = [L**k/gamma(k+2) for k in range(num_scatter)]
293    return coeffs
294
295def truncated_poisson_invcdf(coverage, L):
296    r"""
297    Return smallest k such that cdf(k; L) > coverage from the truncated Poisson
298    probability excluding k=0
299    """
300    # pmf(k; L) = L**k * exp(-L) / (k! * (1-exp(-L))
301    cdf = 0
302    pmf = -np.exp(-L) / np.expm1(-L)
303    k = 0
304    while cdf < coverage:
305        k += 1
306        pmf *= L/k
307        cdf += pmf
308    return k
309
310def _forward_shift(Iq, dtype=PRECISION):
311    # Prepare padded array and forward transform
312    nq = Iq.shape[0]
313    half_nq = nq//2
314    frame = np.zeros((2*nq, 2*nq), dtype=dtype)
315    frame[:half_nq, :half_nq] = Iq[half_nq:, half_nq:]
316    frame[-half_nq:, :half_nq] = Iq[:half_nq, half_nq:]
317    frame[:half_nq, -half_nq:] = Iq[half_nq:, :half_nq]
318    frame[-half_nq:, -half_nq:] = Iq[:half_nq, :half_nq]
319    return frame
320
321def _inverse_shift(frame, dtype=PRECISION):
322    # Invert the transform and recover the transformed data
323    nq = frame.shape[0]//2
324    half_nq = nq//2
325    Iq = np.empty((nq, nq), dtype=dtype)
326    Iq[half_nq:, half_nq:] = frame[:half_nq, :half_nq]
327    Iq[:half_nq, half_nq:] = frame[-half_nq:, :half_nq]
328    Iq[half_nq:, :half_nq] = frame[:half_nq, -half_nq:]
329    Iq[:half_nq, :half_nq] = frame[-half_nq:, -half_nq:]
330    return Iq
331
332
333class MultipleScattering(Resolution):
334    r"""
335    Compute multiple scattering using Fourier convolution.
336
337    The fourier steps are determined by *qmax*, the maximum $q$ value
338    desired, *nq* the number of $q$ steps and *window*, the amount
339    of padding around the circular convolution.  The $q$ spacing
340    will be $\Delta q = 2 q_\mathrm{max} w / n_q$.  If *nq* is not
341    given it will use $n_q = 2^k$ such that $\Delta q < q_\mathrm{min}$.
342
343    *probability* is related to the expected number of scattering
344    events in the sample $\lambda$ as $p = 1 - e^{-\lambda}$.
345    *coverage* determines how many scattering steps to consider.  The
346    default is 0.99, which sets $n$ such that $1 \ldots n$ covers 99%
347    of the Poisson probability mass function.
348
349    *is2d* is True then 2D scattering is used, otherwise it accepts
350    and returns 1D scattering.
351
352    *resolution* is the resolution function to apply after multiple
353    scattering.  If present, then the resolution $q$ vectors will provide
354    default values for *qmin*, *qmax* and *nq*.
355    """
356    def __init__(self, qmin=None, qmax=None, nq=None, window=2,
357                 probability=None, coverage=0.99,
358                 is2d=False, resolution=None,
359                 dtype=PRECISION):
360        # Infer qmin, qmax from instrument resolution calculator, if present
361        if resolution is not None:
362            is2d = hasattr(resolution, 'qx_data')
363            if is2d:
364                # 2D data
365                if qmax is None:
366                    qx_calc, qy_calc = resolution.q_calc
367                    qmax = np.sqrt(np.max(qx_calc**2 + qy_calc**2))
368                if qmin is None and nq is None:
369                    qx, qy = resolution.data.x_bins, resolution.data.y_bins
370                    if qx and qy:
371                        dx = (np.max(qx) - np.min(qx)) / len(qx)
372                        dy = (np.max(qy) - np.min(qy)) / len(qy)
373                    else:
374                        qx, qy = resolution.data.qx_data, resolution.data.qy_data
375                        steps = np.sqrt(len(qx))
376                        dx = (np.max(qx) - np.min(qx)) / steps
377                        dy = (np.max(qy) - np.min(qy)) / steps
378                    qmin = min(dx, dy)
379            else:
380                # 1D data
381                if qmax is None:
382                    qmax = np.max(resolution.q_calc)
383                if qmin is None and nq is None:
384                    qmin = np.min(np.abs(resolution.q_calc))
385
386        # estimate nq from qmin, qmax if not given explicitly
387        q_range = qmax * window
388        if nq is None:
389            nq = 2**np.ceil(np.log2(q_range/qmin))
390        nq = int(nq)
391        # Compute available qmin based on nq
392        qmin = 2*q_range / nq
393        #print(nq)
394
395        # remember input parameters
396        self.qmax = qmax
397        self.qmin = qmin
398        self.nq = nq
399        self.probability = 0. if probability is None else probability
400        self.coverage = coverage
401        self.is2d = is2d
402        self.window = window
403        self.resolution = resolution
404
405        # Determine the q values to calculate
406        q = np.linspace(-q_range, q_range, nq)
407        qx, qy = np.meshgrid(q, q)
408        if is2d:
409            q_calc = (qx.flatten(), qy.flatten())
410        else:
411            # For 1-D patterns, compute q from the center to the corners and
412            # interpolate from there into the individual pixels.  Given that
413            # nq represents the points in [-qmax*windows, qmax*window],
414            # then using 2*sqrt(2)*nq/2 will oversample the points in q by
415            # a factor of two relative to the pixels.
416            q_range_to_corner = np.sqrt(2.) * q_range
417            nq_to_corner = 10*int(np.ceil(np.sqrt(2.) * nq))
418            q_to_corner = np.linspace(0, q_range_to_corner, nq_to_corner+1)[1:]
419            q_calc = (q_to_corner,)
420            # Remember the q radii of the calculated points
421            self._radius = np.sqrt(qx**2 + qy**2)
422            #self._q = q_to_corner
423        self._q_steps = q
424        self.q_calc = q_calc
425
426        # TODO: use cleaner data representation than that from sasview
427        # Resolution function forwards underlying q data (for plotting, etc?)
428        if is2d:
429            if resolution is not None:
430                # forward resolution function info to multiscattering
431                self.qx_data = resolution.qx_data
432                self.qy_data = resolution.qy_data
433            else:
434                # no underlying resolution function, but make it look like there is
435                self.qx_data, self.qy_data = q_calc
436        else:
437            # 1-D radial profile is determined by the q values we need to
438            # compute, either for the calculated q values for the resolution
439            # function (if any) or for the raw q values desired
440            self._q = np.linspace(qmin, qmax, nq//(2*window))
441            self._edges = bin_edges(self._q)
442            self._norm, _ = np.histogram(self._radius, bins=self._edges)
443            if resolution is not None:
444                self.q = resolution.q
445            else:
446                # no underlying resolution function, but make it look like there is
447                self.q = self._q
448
449        # Prepare the multiple scattering calculator (either numpy or OpenCL)
450        self.transform = Calculator((2*nq, 2*nq), dtype=dtype)
451
452        # Iq and Iqxy will be set during apply
453        self.Iq = None # type: np.ndarray
454        self.Iqxy = None # type: np.ndarray
455
456        # Label probability as a fittable parameter, and give its external name
457        # Note that the external name must be a valid python identifier, since
458        # is will be set as an experiment attribute.
459        self.fittable = {'probability': 'scattering_probability'}
460
461    def apply(self, theory):
462        if self.is2d:
463            Iq_calc = theory
464        else:
465            Iq_calc = np.interp(self._radius, self.q_calc[0], theory)
466        Iq_calc = Iq_calc.reshape(self.nq, self.nq)
467
468        # CRUFT: don't need probability as a function anymore
469        probability = self.probability() if callable(self.probability) else self.probability
470        coverage = self.coverage
471        #t0 = time.time()
472        Iqxy = self.transform.multiple_scattering(Iq_calc, probability, coverage)
473        #print("multiple scattering calc time", time.time()-t0)
474
475        # remember the intermediate result in case we want to see it later
476        self.Iqxy = Iqxy
477
478        if self.is2d:
479            if self.resolution is not None:
480                Iqxy = self.resolution.apply(Iqxy)
481            return Iqxy
482        else:
483            # remember the intermediate result in case we want to see it later
484            Iq = self.radial_profile(Iqxy)
485            self.Iq = Iq
486            if self.resolution is not None:
487                q = self._q
488                Iq_res = np.interp(np.abs(self.resolution.q_calc), q, self.Iq)
489                _ = """
490                k = 6
491                print("q theory", q[:k])
492                print("Iq theory", theory[:k])
493                print("interp NaN?", np.any(np.isnan(Iq_calc)))
494                print("convolved NaN?", np.any(np.isnan(Iqxy)))
495                print("Iq intgrated", self.Iq[:k])
496                print("radius", self._radius[self.nq/2,self.nq/2:self.nq/2+k])
497                print("q edges", self._edges[:k+1])
498                print("Iq norm", self._norm[:k])
499                print("res q", self.resolution.q_calc[:k])
500                print("Iq res", Iq_res[:k])
501                #print(Iq)
502                #print(Iq_res)
503                """
504                Iq = self.resolution.apply(Iq_res)
505            return Iq
506
507    def radial_profile(self, Iqxy):
508        """
509        Compute that radial profile for the given Iqxy grid.  The grid should
510        be defined as for
511        """
512        # circular average, no anti-aliasing
513        Iq = np.histogram(self._radius, bins=self._edges, weights=Iqxy)[0]/self._norm
514        return Iq
515
516
517def annular_average(qxy, Iqxy, qbins):
518    """
519    Compute annular average of points in *Iqxy* at *qbins*.  The $q_x$, $q_y$
520    coordinates for *Iqxy* are given in *qxy*.
521    """
522    qxy, Iqxy = qxy.flatten(), Iqxy.flatten()
523    index = np.argsort(qxy)
524    qxy, Iqxy = qxy[index], Iqxy[index]
525    print(qxy.shape, Iqxy.shape, index.shape, qbins.shape)
526    #values = rebin(np.vstack((0., qxy)), Iqxy, qbins)
527    integral = np.cumsum(Iqxy)
528    Io = np.diff(np.interp(qbins, qxy, integral, left=0.))
529    # normalize by area of annulus
530    # TODO: doesn't properly account for box.
531    # Need to chop off chords that are greater than the box edges
532    # (left, right, top and bottom), then add back in the corners
533    # chopped off by both. https://en.wikipedia.org/wiki/Circular_segment
534    norms = np.diff(pi*qbins**2)
535    return Io/norms
536
537def rebin(x, I, xo):
538    """
539    Rebin from edges *x*, bins I into edges *xo*.
540
541    *x* and *xo* should be monotonically increasing.
542
543    If *x* has duplicate values, then all corresponding values at I(x) will
544    be effectively summed into the same bin.  If *xo* has duplicate values,
545    the first bin will contain the entire contents and subsequent bins will
546    contain zeros.
547    """
548    integral = np.cumsum(I)
549    Io = np.diff(np.interp(xo, x[1:], integral, left=0.))
550    return Io
551
552
553def parse_pars(model, opts):
554    # type: (ModelInfo, argparse.Namespace) -> Dict[str, float]
555    """
556    Parse par=val arguments from the command line.
557    """
558
559    seed = np.random.randint(1000000) if opts.random and opts.seed < 0 else opts.seed
560    compare_opts = {
561        'info': (model.info, model.info),
562        'use_demo': False,
563        'seed': seed,
564        'mono': True,
565        'magnetic': False,
566        'values': opts.pars,
567        #'show_pars': True,
568        'show_pars': False,
569        'is2d': opts.is2d,
570    }
571    # Note: sascomp allows comparison on a pair of models, so ignore the second.
572    pars, _ = compare.parse_pars(compare_opts)
573    return pars
574
575
576def main():
577    parser = argparse.ArgumentParser(
578        description="Compute multiple scattering",
579        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
580        )
581    parser.add_argument('-p', '--probability', type=float, default=0.1,
582                        help="scattering probability")
583    parser.add_argument('-n', '--nq', type=int, default=1024,
584                        help='number of mesh points')
585    parser.add_argument('-q', '--qmax', type=float, default=0.5,
586                        help='max q')
587    parser.add_argument('-w', '--window', type=float, default=2.0,
588                        help='q calc = q max * window')
589    parser.add_argument('-2', '--2d', dest='is2d', action='store_true',
590                        help='oriented sample')
591    parser.add_argument('-s', '--seed', default=-1,
592                        help='random pars with given seed')
593    parser.add_argument('-r', '--random', action='store_true',
594                        help='random pars with random seed')
595    parser.add_argument('-o', '--outfile', type=str, default="",
596                        help='random pars with random seed')
597    parser.add_argument('model', type=str,
598                        help='sas model name such as cylinder')
599    parser.add_argument('pars', type=str, nargs='*',
600                        help='model parameters such as radius=30')
601    opts = parser.parse_args()
602    assert opts.nq%2 == 0, "require even # points"
603
604    model = core.load_model(opts.model)
605    pars = parse_pars(model, opts)
606    res = MultipleScattering(qmax=opts.qmax, nq=opts.nq, window=opts.window,
607                             probability=opts.probability, is2d=opts.is2d)
608    kernel = model.make_kernel(res.q_calc)
609    #print(pars)
610    bg = pars.get('background', 0.0)
611    pars['background'] = 0.0
612    theory = call_kernel(kernel, pars)
613    Iq = res.apply(theory) + bg
614    plot_and_save_powers(res, theory, Iq, outfile=opts.outfile, background=bg)
615
616def plot_and_save_powers(res, theory, result, plot=True, outfile="", background=0.):
617    import pylab
618    probability, coverage = res.probability, res.coverage
619    weights = scattering_coeffs(probability, coverage)
620
621    # cribbed from MultipleScattering.apply
622    if res.is2d:
623        Iq_calc = theory
624    else:
625        Iq_calc = np.interp(res._radius, res.q_calc[0], theory)
626    Iq_calc = Iq_calc.reshape(res.nq, res.nq)
627
628    # Compute the scattering powers for 1, 2, ... n scattering events
629    powers = scattering_powers(Iq_calc, len(weights))
630
631    #plotxy(Iqxy); import pylab; pylab.figure()
632    if res.is2d:
633        if outfile:
634            data = np.vstack([Ipower.flatten() for Ipower in powers]).T
635            np.savetxt(outfile + "_powers.txt", data)
636            data = np.vstack(Iq_calc).T
637            np.savetxt(outfile + ".txt", data)
638        if plot:
639            plotxy((res._q_steps, res._q_steps), Iq_calc)
640            pylab.title("single scattering F")
641            for k, v in enumerate(powers[1:]):
642                pylab.figure()
643                plotxy((res._q_steps, res._q_steps), v+background)
644                pylab.title("multiple scattering F^%d" % (k+2))
645            pylab.figure()
646            plotxy((res._q_steps, res._q_steps), res.Iqxy+background)
647            pylab.title("total scattering for p=%g" % probability)
648            if res.resolution is not None:
649                pylab.figure()
650                plotxy((res._q_steps, res._q_steps), result)
651                pylab.title("total scattering with resolution")
652    else:
653        q = res._q
654        Iq_powers = [res.radial_profile(Iqxy) for Iqxy in powers]
655        if outfile:
656            data = np.vstack([q, theory, res.Iq]).T
657            np.savetxt(outfile + ".txt", data)
658            # circular average, no anti-aliasing for individual powers
659            data = np.vstack([q] + Iq_powers).T
660            np.savetxt(outfile + "_powers.txt", data)
661        if plot:
662            # Plot 2D pattern for total scattering
663            plotxy((res._q_steps, res._q_steps), res.Iqxy+background)
664            pylab.title("total scattering for p=%g" % probability)
665            pylab.figure()
666
667            # Plot 1D pattern for partial scattering
668            pylab.loglog(q, res.Iq+background, label="total for p=%g"%probability)
669            if res.resolution is not None:
670                pylab.loglog(q, result, label="total with dQ")
671            #new_annulus = annular_average(res._radius, res.Iqxy, res._edges)
672            #pylab.loglog(q, new_annulus+background, label="new total for p=%g"%probability)
673            for n, (w, Ipower) in enumerate(zip(weights, Iq_powers)):
674                pylab.loglog(q, w*Ipower+background, label="scattering^%d"%(n+1))
675            pylab.legend()
676            pylab.title('total scattering for p=%g' % probability)
677    pylab.show()
678
679def plotxy(q, Iq):
680    import pylab
681    # q is a tuple of (q,) or (qx, qy)
682    if len(q) == 1:
683        pylab.loglog(q[0], Iq)
684    else:
685        data = Iq.copy()
686        data[Iq <= 0] = np.min(Iq[Iq > 0])/2
687        pylab.imshow(np.log10(data))
688
689if __name__ == "__main__":
690    main()
Note: See TracBrowser for help on using the repository browser.