source: sasmodels/sasmodels/multiscat.py @ b3703f5

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since b3703f5 was b3703f5, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

lint reduction

  • Property mode set to 100644
File size: 26.7 KB
Line 
1#!/usr/bin/env python
2r"""
3Multiple scattering calculator
4
5Calculate multiple scattering using 2D FFT convolution.
6
7Usage:
8
9    -p, --probability: the scattering probability
10    -q, --qmax: that max q that you care about
11    -w, --window: the extension window (q is calculated for qmax*window)
12    -n, --nq: the number of mesh points (dq = qmax*window/nq)
13    -r, --random: generate a random parameter set
14    -2, --2d: perform the calculation for an oriented pattern
15    model_name
16    model_par=value ...
17
18Assume the probability of scattering is $p$. After each scattering event,
19$1-p$ neutrons will leave the system and go to the detector, and the remaining
20$p$ will scatter again.
21
22Let the scattering probability for $n$ scattering event at $q$ be $f_n(q)$,
23where
24.. math:: f_1(q) = \frac{I_1(q)}{\int I_1(q) dq}
25for $I_1(q)$, the single scattering from the system. After two scattering
26events, the scattering probability will be the convolution of the first
27scattering and itself, or $f_2(q) = (f_1*f_1)(q)$.  After $n$ events it will be
28$f_n(q) = (f_1 * \cdots * f_1)(q)$.  The total scattering is calculated
29as the weighted sum of $f_k$, with weights following the Poisson distribution
30.. math:: P(k; \lambda) = \frac{\lambda^k e^{-\lambda}}{k!}
31for $\lambda$ determined by the total thickness divided by the mean
32free path between scattering, giving
33.. math::
34    I(q) = \sum_{k=0}^\infty P(k; \lambda) f_k(q)
35The $k=0$ term is ignored since it involves no scattering.
36We cut the series when cumulative probability is less than cutoff $C=99\%$,
37which is $\max n$ such that
38.. math::
39    \frac{\sum_{k=1}^n \frac{P(k; \lambda)}{1 - P(0; \lambda)} < C
40
41Using the convolution theorem, where
42$F = \mathcal{F}(f)$ is the Fourier transform,
43.. math:: f * g = \mathcal{F}^{-1}\{\mathcal{F}\{f\} \cdot \mathcal{F}\{g\}\}
44so
45.. math:: f * \ldots * f = \mathcal{F}^{-1}\{ F^n \}
46Since the Fourier transform is a linear operator, we can move the polynomial
47expression for the convolution into the transform, giving
48.. math::
49    I(q) = \mathcal{F}^{-1}\left\{ \sum_{k=1}^{n} P(k; \lambda) F^k \right\}
50In the dilute limit $L \rightarrow 0$ only the $k=1$ term is active,
51and so
52.. math::
53    P(1; \lambda) = \lambda e{-\lambda} = \int I_1(q) dq
54therefore we compute
55.. math::
56    I(q) = \int I_1(q) dq \mathcal{F}^{-1}\left\{
57        \sum_{l=1}^{n} \frac{P(k; \lambda)}{P(1; \lambda))} F^k \right\}
58
59For speed we may use the fast fourier transform with a power of two.
60The resulting $I(q)$ will be linearly spaced and likely heavily oversampled.
61The usual pinhole or slit resolution calculation can performed from these
62calculated values.
63
64By default single precision OpenCL is used for the calculation.  Set the
65environment variable *SAS_OPENCL=none* to use double precision numpy FFT
66instead.  The OpenCL versions is about 10x faster on an elderly Mac with
67Intel HD 4000 graphics.  The single precision numerical artifacts don't
68seem to seriously impact overall accuracy, though they look pretty bad.
69"""
70
71from __future__ import print_function, division
72
73import argparse
74import time
75
76import numpy as np
77from numpy import pi
78from scipy.special import gamma
79
80from sasmodels import core
81from sasmodels import compare
82from sasmodels.resolution import Resolution, bin_edges
83from sasmodels.direct_model import call_kernel
84import sasmodels.kernelcl
85
86# TODO: select fast and accurate fft library
87# clFFT: https://github.com/clMathLibraries/clFFT (AMD's OpenCL)
88# - gpyfft: https://github.com/geggo/gpyfft (wraps clFFT)
89# - arrayfire: https://github.com/arrayfire (wraps clFFT and much more; +cuda)
90# pyFFT: https://github.com/fjarri-attic/pyfft (based on Apple's OpenCL; +cuda)
91# - Reikna: https://github.com/fjarri/reikna (evolved from pyfft)
92# genFFT: https://software.intel.com/en-us/articles/genFFT (Intel's OpenCL)
93# VexCL: https://vexcl.readthedocs.io/en/latest/ (c++ library)
94# TODO: switch to fftw when opencl is not available
95
96try:
97    import pyfft.cl
98    import pyopencl.array as cl_array
99    HAVE_OPENCL = sasmodels.kernelcl.use_opencl()
100except ImportError:
101    HAVE_OPENCL = False
102PRECISION = np.dtype('f' if HAVE_OPENCL else 'd')  # 'f' or 'd'
103USE_FAST = True  # OpenCL faster, less accurate math
104
105class ICalculator:
106    """
107    Multiple scattering calculator
108    """
109    def fft(self, Iq):
110        """
111        Compute the forward FFT for an image, real -> complex.
112        """
113        raise NotImplementedError()
114
115    def ifft(self, Iq):
116        """
117        Compute the inverse FFT for an image, complex -> complex.
118        """
119        raise NotImplementedError()
120
121    def mulitple_scattering(self, Iq):
122        r"""
123        Compute multiple scattering for I(q) given scattering probability p.
124
125        Given a probability p of scattering with the thickness, the expected
126        number of scattering events, $\lambda$ is $-\log(1 - p)$, giving a
127        Poisson weighted sum of single, double, triple, etc. scattering patterns.
128        The number of patterns used is based on coverage (default 99%).
129        """
130        raise NotImplementedError()
131
132class NumpyCalculator(ICalculator):
133    """
134    Multiple scattering calculator using numpy fft.
135    """
136    def __init__(self, dims=None, dtype=PRECISION):
137        self.dtype = dtype
138        self.complex_dtype = np.dtype('F') if dtype == np.dtype('f') else np.dtype('D')
139
140    def fft(self, Iq):
141        #t0 = time.time()
142        Iq = np.asarray(Iq, self.dtype)
143        result = np.fft.fft2(Iq)
144        #print("fft time", time.time()-t0)
145        return result
146
147    def ifft(self, fourier_frame):
148        #t0 = time.time()
149        fourier_frame = np.asarray(fourier_frame, self.complex_dtype)
150        result = np.fft.ifft2(fourier_frame)
151        #print("ifft time", time.time()-t0)
152        return result
153
154    def multiple_scattering(self, Iq, p, coverage=0.99):
155        #t0 = time.time()
156        coeffs = scattering_coeffs(p, coverage)
157        poly = np.asarray(coeffs[::-1], dtype=self.dtype)
158        scale = np.sum(Iq)
159        frame = _forward_shift(Iq/scale, dtype=self.dtype)
160        fourier_frame = np.fft.fft2(frame)
161        convolved = fourier_frame * np.polyval(poly, fourier_frame)
162        frame = np.fft.ifft2(convolved)
163        result = scale * _inverse_shift(frame.real, dtype=self.dtype)
164        #print("numpy multiscat time", time.time()-t0)
165        return result
166
167# polyval1(c, x) computes (...((c0 x + c1) x + c2) x ... + cn) x
168# where c is an array of length *degree* and x is an array of
169# complex values (type double2) of length *n*. 2-D arrays can of
170# course be treated as 1-D arrays of length *nx* X *ny*.
171# When compiling with sasmodels.kernelcl.compile_model the double precision
172# types are converted to single precision as needed.  See the code in
173# sasmodels.generate._convert_type for details.
174POLYVAL1_KERNEL = """
175kernel void polyval1(
176    const int degree,
177    global const double *coeff,
178    const int n,
179    global double2 *array)
180{
181    int index = get_global_id(0);
182    if (index < n) {
183        const double2 x = array[index];
184        double2 total = coeff[0];
185        for (int k=1; k < degree; k++) {
186            total = fma(total, x, coeff[k]);
187        }
188        array[index] = total * x;
189    }
190}
191"""
192
193class OpenclCalculator(ICalculator):
194    """
195    Multiple scattering calculator using OpenCL via pyfft.
196    """
197    polyval1f = None
198    polyval1d = None
199    def __init__(self, dims, dtype=PRECISION):
200        env = sasmodels.kernelcl.environment()
201        context = env.get_context(dtype)
202        if dtype == np.dtype('f'):
203            if OpenclCalculator.polyval1f is None:
204                program = sasmodels.kernelcl.compile_model(
205                    context, POLYVAL1_KERNEL, dtype, fast=USE_FAST)
206                # Assume context is always the same for a given dtype
207                OpenclCalculator.polyval1f = program.polyval1
208            self.dtype = dtype
209            self.complex_dtype = np.dtype('F')
210            self.polyval1 = OpenclCalculator.polyval1f
211        else:
212            if OpenclCalculator.polyval1d is None:
213                program = sasmodels.kernelcl.compile_model(
214                    context, POLYVAL1_KERNEL, dtype, fast=False)
215                # Assume context is always the same for a given dtype
216                OpenclCalculator.polyval1d = program.polyval1
217            self.dtype = dtype
218            self.complex_type = np.dtype('D')
219            self.polyval1 = OpenclCalculator.polyval1d
220        self.queue = env.get_queue(dtype)
221        self.plan = pyfft.cl.Plan(dims, queue=self.queue)
222
223    def fft(self, Iq):
224        # forward transform
225        #t0 = time.time()
226        data = np.asarray(Iq, self.complex_dtype)
227        gpu_data = cl_array.to_device(self.queue, data)
228        self.plan.execute(gpu_data.data)
229        result = gpu_data.get()
230        #print("fft time", time.time()-t0)
231        return result
232
233    def ifft(self, fourier_frame):
234        # inverse transform
235        #t0 = time.time()
236        data = np.asarray(fourier_frame, self.complex_dtype)
237        gpu_data = cl_array.to_device(self.queue, data)
238        self.plan.execute(gpu_data.data, inverse=True)
239        result = gpu_data.get()
240        #print("ifft time", time.time()-t0)
241        return result
242
243    def multiple_scattering(self, Iq, p, coverage=0.99):
244        #t0 = time.time()
245        coeffs = scattering_coeffs(p, coverage)
246        scale = np.sum(Iq)
247        poly = np.asarray(coeffs[::-1], self.dtype)
248        frame = _forward_shift(Iq/scale, dtype=self.complex_dtype)
249        gpu_data = cl_array.to_device(self.queue, frame)
250        gpu_poly = cl_array.to_device(self.queue, poly)
251        self.plan.execute(gpu_data.data)
252        degree, data_size= poly.shape[0], frame.shape[0]*frame.shape[1]
253        self.polyval1(
254            self.queue, [data_size], None,
255            np.int32(degree), gpu_poly.data, np.int32(data_size), gpu_data.data)
256        self.plan.execute(gpu_data.data, inverse=True)
257        frame = gpu_data.get()
258        #result = scale * _inverse_shift(frame.real, dtype=self.dtype)
259        result = scale * _inverse_shift(frame.real, dtype=self.dtype)
260        #print("OpenCL multiscat time", time.time()-t0)
261        return result
262
263Calculator = OpenclCalculator if HAVE_OPENCL else NumpyCalculator
264
265def scattering_powers(Iq, n, dtype='f', transform=None):
266    r"""
267    Calculate the scattering powers up to n.
268
269    This includes 1 even though it should just be Iq itself
270
271    The frames are unweighted; to weight scale by $\lambda^k e^{-\lambda}/k!$.
272    """
273    if transform is None:
274        n_x, n_y = Iq.shape
275        transform = Calculator(dims=(n_x*2, n_y*2), dtype=dtype)
276    scale = np.sum(Iq)
277    frame = _forward_shift(Iq/scale, dtype=dtype)
278    F = transform.fft(frame)
279    powers = [scale * _inverse_shift(transform.ifft(F**(k+1)).real, dtype=dtype)
280              for k in range(n)]
281    return powers
282
283def scattering_coeffs(p, coverage=0.99):
284    r"""
285    Return the coefficients of the scattering powers for transmission
286    probability *p*.  This is just the corresponding values for the
287    Poisson distribution for $\lambda = -\ln(1-p)$ such that
288    $\sum_{k = 0 \ldots n} P(k; \lambda)$ is larger than *coverage*.
289    """
290    L = -np.log(1-p)
291    num_scatter = truncated_poisson_invcdf(coverage, L)
292    coeffs = [L**k/gamma(k+2) for k in range(num_scatter)]
293    return coeffs
294
295def truncated_poisson_invcdf(coverage, L):
296    r"""
297    Return smallest k such that cdf(k; L) > coverage from the truncated Poisson
298    probability excluding k=0
299    """
300    # pmf(k; L) = L**k * exp(-L) / (k! * (1-exp(-L))
301    cdf = 0
302    pmf = -np.exp(-L) / np.expm1(-L)
303    k = 0
304    while cdf < coverage:
305        k += 1
306        pmf *= L/k
307        cdf += pmf
308    return k
309
310def _forward_shift(Iq, dtype=PRECISION):
311    # Prepare padded array and forward transform
312    nq = Iq.shape[0]
313    half_nq = nq//2
314    frame = np.zeros((2*nq, 2*nq), dtype=dtype)
315    frame[:half_nq, :half_nq] = Iq[half_nq:, half_nq:]
316    frame[-half_nq:, :half_nq] = Iq[:half_nq, half_nq:]
317    frame[:half_nq, -half_nq:] = Iq[half_nq:, :half_nq]
318    frame[-half_nq:, -half_nq:] = Iq[:half_nq, :half_nq]
319    return frame
320
321def _inverse_shift(frame, dtype=PRECISION):
322    # Invert the transform and recover the transformed data
323    nq = frame.shape[0]//2
324    half_nq = nq//2
325    Iq = np.empty((nq, nq), dtype=dtype)
326    Iq[half_nq:, half_nq:] = frame[:half_nq, :half_nq]
327    Iq[:half_nq, half_nq:] = frame[-half_nq:, :half_nq]
328    Iq[half_nq:, :half_nq] = frame[:half_nq, -half_nq:]
329    Iq[:half_nq, :half_nq] = frame[-half_nq:, -half_nq:]
330    return Iq
331
332
333class MultipleScattering(Resolution):
334    r"""
335    Compute multiple scattering using Fourier convolution.
336
337    The fourier steps are determined by *qmax*, the maximum $q$ value
338    desired, *nq* the number of $q$ steps and *window*, the amount
339    of padding around the circular convolution.  The $q$ spacing
340    will be $\Delta q = 2 q_\mathrm{max} w / n_q$.  If *nq* is not
341    given it will use $n_q = 2^k$ such that $\Delta q < q_\mathrm{min}$.
342
343    *probability* is related to the expected number of scattering
344    events in the sample $\lambda$ as $p = 1 = e^{-\lambda}$.  As a
345    hack to allow probability to be a fitted parameter, the "value"
346    can be a function that takes no parameters and returns the current
347    value of the probability.  *coverage* determines how many scattering
348    steps to consider.  The default is 0.99, which sets $n$ such that
349    $1 \ldots n$ covers 99% of the Poisson probability mass function.
350
351    *is2d* is True then 2D scattering is used, otherwise it accepts
352    and returns 1D scattering.
353
354    *resolution* is the resolution function to apply after multiple
355    scattering.  If present, then the resolution $q$ vectors will provide
356    default values for *qmin*, *qmax* and *nq*.
357    """
358    def __init__(self, qmin=None, qmax=None, nq=None, window=2,
359                 probability=None, coverage=0.99,
360                 is2d=False, resolution=None,
361                 dtype=PRECISION):
362        # Infer qmin, qmax from instrument resolution calculator, if present
363        if resolution is not None:
364            is2d = hasattr(resolution, 'qx_data')
365            if is2d:
366                # 2D data
367                if qmax is None:
368                    qx_calc, qy_calc = resolution.q_calc
369                    qmax = np.sqrt(np.max(qx_calc**2 + qy_calc**2))
370                if qmin is None and nq is None:
371                    qx, qy = resolution.data.x_bins, resolution.data.y_bins
372                    if qx and qy:
373                        dx = (np.max(qx) - np.min(qx)) / len(qx)
374                        dy = (np.max(qy) - np.min(qy)) / len(qy)
375                    else:
376                        qx, qy = resolution.data.qx_data, resolution.data.qy_data
377                        steps = np.sqrt(len(qx))
378                        dx = (np.max(qx) - np.min(qx)) / steps
379                        dy = (np.max(qy) - np.min(qy)) / steps
380                    qmin = min(dx, dy)
381            else:
382                # 1D data
383                if qmax is None:
384                    qmax = np.max(resolution.q_calc)
385                if qmin is None and nq is None:
386                    qmin = np.min(np.abs(resolution.q_calc))
387
388        # estimate nq from qmin, qmax if not given explicitly
389        q_range = qmax * window
390        if nq is None:
391            nq = 2**np.ceil(np.log2(q_range/qmin))
392        nq = int(nq)
393        # Compute available qmin based on nq
394        qmin = 2*q_range / nq
395        #print(nq)
396
397        # remember input parameters
398        self.qmax = qmax
399        self.qmin = qmin
400        self.nq = nq
401        self.probability = probability
402        self.coverage = coverage
403        self.is2d = is2d
404        self.window = window
405        self.resolution = resolution
406
407        # Determine the q values to calculate
408        q = np.linspace(-q_range, q_range, nq)
409        qx, qy = np.meshgrid(q, q)
410        if is2d:
411            q_calc = (qx.flatten(), qy.flatten())
412        else:
413            # For 1-D patterns, compute q from the center to the corners and
414            # interpolate from there into the individual pixels.  Given that
415            # nq represents the points in [-qmax*windows, qmax*window],
416            # then using 2*sqrt(2)*nq/2 will oversample the points in q by
417            # a factor of two relative to the pixels.
418            q_range_to_corner = np.sqrt(2.) * q_range
419            nq_to_corner = 10*int(np.ceil(np.sqrt(2.) * nq))
420            q_to_corner = np.linspace(0, q_range_to_corner, nq_to_corner+1)[1:]
421            q_calc = (q_to_corner,)
422            # Remember the q radii of the calculated points
423            self._radius = np.sqrt(qx**2 + qy**2)
424            #self._q = q_to_corner
425        self._q_steps = q
426        self.q_calc = q_calc
427
428        # TODO: use cleaner data representation than that from sasview
429        # Resolution function forwards underlying q data (for plotting, etc?)
430        if is2d:
431            if resolution is not None:
432                # forward resolution function info to multiscattering
433                self.qx_data = resolution.qx_data
434                self.qy_data = resolution.qy_data
435            else:
436                # no underlying resolution function, but make it look like there is
437                self.qx_data, self.qy_data = q_calc
438        else:
439            # 1-D radial profile is determined by the q values we need to
440            # compute, either for the calculated q values for the resolution
441            # function (if any) or for the raw q values desired
442            self._q = np.linspace(qmin, qmax, nq//(2*window))
443            self._edges = bin_edges(self._q)
444            self._norm, _ = np.histogram(self._radius, bins=self._edges)
445            if resolution is not None:
446                self.q = resolution.q
447            else:
448                # no underlying resolution function, but make it look like there is
449                self.q = self._q
450
451        # Prepare the multiple scattering calculator (either numpy or OpenCL)
452        self.transform = Calculator((2*nq, 2*nq), dtype=dtype)
453
454        # Iq and Iqxy will be set during apply
455        self.Iq = None # type: np.ndarray
456        self.Iqxy = None # type: np.ndarray
457
458    def apply(self, theory):
459        if self.is2d:
460            Iq_calc = theory
461        else:
462            Iq_calc = np.interp(self._radius, self.q_calc[0], theory)
463        Iq_calc = Iq_calc.reshape(self.nq, self.nq)
464
465        probability = self.probability() if callable(self.probability) else self.probability
466        coverage = self.coverage
467        #t0 = time.time()
468        Iqxy = self.transform.multiple_scattering(Iq_calc, probability, coverage)
469        #print("multiple scattering calc time", time.time()-t0)
470
471        # remember the intermediate result in case we want to see it later
472        self.Iqxy = Iqxy
473
474        if self.is2d:
475            if self.resolution is not None:
476                Iqxy = self.resolution.apply(Iqxy)
477            return Iqxy
478        else:
479            # remember the intermediate result in case we want to see it later
480            Iq = self.radial_profile(Iqxy)
481            self.Iq = Iq
482            if self.resolution is not None:
483                q = self._q
484                Iq_res = np.interp(np.abs(self.resolution.q_calc), q, self.Iq)
485                _ = """
486                k = 6
487                print("q theory", q[:k])
488                print("Iq theory", theory[:k])
489                print("interp NaN?", np.any(np.isnan(Iq_calc)))
490                print("convolved NaN?", np.any(np.isnan(Iqxy)))
491                print("Iq intgrated", self.Iq[:k])
492                print("radius", self._radius[self.nq/2,self.nq/2:self.nq/2+k])
493                print("q edges", self._edges[:k+1])
494                print("Iq norm", self._norm[:k])
495                print("res q", self.resolution.q_calc[:k])
496                print("Iq res", Iq_res[:k])
497                #print(Iq)
498                #print(Iq_res)
499                """
500                Iq = self.resolution.apply(Iq_res)
501            return Iq
502
503    def radial_profile(self, Iqxy):
504        """
505        Compute that radial profile for the given Iqxy grid.  The grid should
506        be defined as for
507        """
508        # circular average, no anti-aliasing
509        Iq = np.histogram(self._radius, bins=self._edges, weights=Iqxy)[0]/self._norm
510        return Iq
511
512
513def annular_average(qxy, Iqxy, qbins):
514    """
515    Compute annular average of points in *Iqxy* at *qbins*.  The $q_x$, $q_y$
516    coordinates for *Iqxy* are given in *qxy*.
517    """
518    qxy, Iqxy = qxy.flatten(), Iqxy.flatten()
519    index = np.argsort(qxy)
520    qxy, Iqxy = qxy[index], Iqxy[index]
521    print(qxy.shape, Iqxy.shape, index.shape, qbins.shape)
522    #values = rebin(np.vstack((0., qxy)), Iqxy, qbins)
523    integral = np.cumsum(Iqxy)
524    Io = np.diff(np.interp(qbins, qxy, integral, left=0.))
525    # normalize by area of annulus
526    # TODO: doesn't properly account for box.
527    # Need to chop off chords that are greater than the box edges
528    # (left, right, top and bottom), then add back in the corners
529    # chopped off by both. https://en.wikipedia.org/wiki/Circular_segment
530    norms = np.diff(pi*qbins**2)
531    return Io/norms
532
533def rebin(x, I, xo):
534    """
535    Rebin from edges *x*, bins I into edges *xo*.
536
537    *x* and *xo* should be monotonically increasing.
538
539    If *x* has duplicate values, then all corresponding values at I(x) will
540    be effectively summed into the same bin.  If *xo* has duplicate values,
541    the first bin will contain the entire contents and subsequent bins will
542    contain zeros.
543    """
544    integral = np.cumsum(I)
545    Io = np.diff(np.interp(xo, x[1:], integral, left=0.))
546    return Io
547
548
549def parse_pars(model, opts):
550    # type: (ModelInfo, argparse.Namespace) -> Dict[str, float]
551    """
552    Parse par=val arguments from the command line.
553    """
554
555    seed = np.random.randint(1000000) if opts.random and opts.seed < 0 else opts.seed
556    compare_opts = {
557        'info': (model.info, model.info),
558        'use_demo': False,
559        'seed': seed,
560        'mono': True,
561        'magnetic': False,
562        'values': opts.pars,
563        #'show_pars': True,
564        'show_pars': False,
565        'is2d': opts.is2d,
566    }
567    # Note: sascomp allows comparison on a pair of models, so ignore the second.
568    pars, _ = compare.parse_pars(compare_opts)
569    return pars
570
571
572def main():
573    parser = argparse.ArgumentParser(
574        description="Compute multiple scattering",
575        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
576        )
577    parser.add_argument('-p', '--probability', type=float, default=0.1,
578                        help="scattering probability")
579    parser.add_argument('-n', '--nq', type=int, default=1024,
580                        help='number of mesh points')
581    parser.add_argument('-q', '--qmax', type=float, default=0.5,
582                        help='max q')
583    parser.add_argument('-w', '--window', type=float, default=2.0,
584                        help='q calc = q max * window')
585    parser.add_argument('-2', '--2d', dest='is2d', action='store_true',
586                        help='oriented sample')
587    parser.add_argument('-s', '--seed', default=-1,
588                        help='random pars with given seed')
589    parser.add_argument('-r', '--random', action='store_true',
590                        help='random pars with random seed')
591    parser.add_argument('-o', '--outfile', type=str, default="",
592                        help='random pars with random seed')
593    parser.add_argument('model', type=str,
594                        help='sas model name such as cylinder')
595    parser.add_argument('pars', type=str, nargs='*',
596                        help='model parameters such as radius=30')
597    opts = parser.parse_args()
598    assert opts.nq%2 == 0, "require even # points"
599
600    model = core.load_model(opts.model)
601    pars = parse_pars(model, opts)
602    res = MultipleScattering(qmax=opts.qmax, nq=opts.nq, window=opts.window,
603                             probability=opts.probability, is2d=opts.is2d)
604    kernel = model.make_kernel(res.q_calc)
605    #print(pars)
606    bg = pars.get('background', 0.0)
607    pars['background'] = 0.0
608    theory = call_kernel(kernel, pars)
609    Iq = res.apply(theory) + bg
610    plot_and_save_powers(res, theory, Iq, outfile=opts.outfile, background=bg)
611
612def plot_and_save_powers(res, theory, result, plot=True, outfile="", background=0.):
613    import pylab
614    probability, coverage = res.probability, res.coverage
615    weights = scattering_coeffs(probability, coverage)
616
617    # cribbed from MultipleScattering.apply
618    if res.is2d:
619        Iq_calc = theory
620    else:
621        Iq_calc = np.interp(res._radius, res.q_calc[0], theory)
622    Iq_calc = Iq_calc.reshape(res.nq, res.nq)
623
624    # Compute the scattering powers for 1, 2, ... n scattering events
625    powers = scattering_powers(Iq_calc, len(weights))
626
627    #plotxy(Iqxy); import pylab; pylab.figure()
628    if res.is2d:
629        if outfile:
630            data = np.vstack([Ipower.flatten() for Ipower in powers]).T
631            np.savetxt(outfile + "_powers.txt", data)
632            data = np.vstack(Iq_calc).T
633            np.savetxt(outfile + ".txt", data)
634        if plot:
635            plotxy((res._q_steps, res._q_steps), Iq_calc)
636            pylab.title("single scattering F")
637            for k, v in enumerate(powers[1:]):
638                pylab.figure()
639                plotxy((res._q_steps, res._q_steps), v+background)
640                pylab.title("multiple scattering F^%d" % (k+2))
641            pylab.figure()
642            plotxy((res._q_steps, res._q_steps), res.Iqxy+background)
643            pylab.title("total scattering for p=%g" % probability)
644            if res.resolution is not None:
645                pylab.figure()
646                plotxy((res._q_steps, res._q_steps), result)
647                pylab.title("total scattering with resolution")
648    else:
649        q = res._q
650        Iq_powers = [res.radial_profile(Iqxy) for Iqxy in powers]
651        if outfile:
652            data = np.vstack([q, theory, res.Iq]).T
653            np.savetxt(outfile + ".txt", data)
654            # circular average, no anti-aliasing for individual powers
655            data = np.vstack([q] + Iq_powers).T
656            np.savetxt(outfile + "_powers.txt", data)
657        if plot:
658            # Plot 2D pattern for total scattering
659            plotxy((res._q_steps, res._q_steps), res.Iqxy+background)
660            pylab.title("total scattering for p=%g" % probability)
661            pylab.figure()
662
663            # Plot 1D pattern for partial scattering
664            pylab.loglog(q, res.Iq+background, label="total for p=%g"%probability)
665            if res.resolution is not None:
666                pylab.loglog(q, result, label="total with dQ")
667            #new_annulus = annular_average(res._radius, res.Iqxy, res._edges)
668            #pylab.loglog(q, new_annulus+background, label="new total for p=%g"%probability)
669            for n, (w, Ipower) in enumerate(zip(weights, Iq_powers)):
670                pylab.loglog(q, w*Ipower+background, label="scattering^%d"%(n+1))
671            pylab.legend()
672            pylab.title('total scattering for p=%g' % probability)
673    pylab.show()
674
675def plotxy(q, Iq):
676    import pylab
677    # q is a tuple of (q,) or (qx, qy)
678    if len(q) == 1:
679        pylab.loglog(q[0], Iq)
680    else:
681        data = Iq.copy()
682        data[Iq <= 0] = np.min(Iq[Iq > 0])/2
683        pylab.imshow(np.log10(data))
684
685if __name__ == "__main__":
686    main()
Note: See TracBrowser for help on using the repository browser.