1 | #!/usr/bin/env python |
---|
2 | r""" |
---|
3 | Multiple scattering calculator |
---|
4 | |
---|
5 | Calculate multiple scattering using 2D FFT convolution. |
---|
6 | |
---|
7 | Usage: |
---|
8 | |
---|
9 | -p, --probability: the scattering probability |
---|
10 | -q, --qmax: that max q that you care about |
---|
11 | -w, --window: the extension window (q is calculated for qmax*window) |
---|
12 | -n, --nq: the number of mesh points (dq = qmax*window/nq) |
---|
13 | -r, --random: generate a random parameter set |
---|
14 | -2, --2d: perform the calculation for an oriented pattern |
---|
15 | model_name |
---|
16 | model_par=value ... |
---|
17 | |
---|
18 | Assume the probability of scattering is $p$. After each scattering event, |
---|
19 | $1-p$ neutrons will leave the system and go to the detector, and the remaining |
---|
20 | $p$ will scatter again. |
---|
21 | |
---|
22 | Let the scattering probability for $n$ scattering event at $q$ be $f_n(q)$, |
---|
23 | where |
---|
24 | .. math:: f_1(q) = \frac{I_1(q)}{\int I_1(q) dq} |
---|
25 | for $I_1(q)$, the single scattering from the system. After two scattering |
---|
26 | events, the scattering probability will be the convolution of the first |
---|
27 | scattering and itself, or $f_2(q) = (f_1*f_1)(q)$. After $n$ events it will be |
---|
28 | $f_n(q) = (f_1 * \cdots * f_1)(q)$. The total scattering is calculated |
---|
29 | as the weighted sum of $f_k$, with weights following the Poisson distribution |
---|
30 | .. math:: P(k; \lambda) = \frac{\lambda^k e^{-\lambda}}{k!} |
---|
31 | for $\lambda$ determined by the total thickness divided by the mean |
---|
32 | free path between scattering, giving |
---|
33 | .. math:: |
---|
34 | I(q) = \sum_{k=0}^\infty P(k; \lambda) f_k(q) |
---|
35 | The $k=0$ term is ignored since it involves no scattering. |
---|
36 | We cut the series when cumulative probability is less than cutoff $C=99\%$, |
---|
37 | which is $\max n$ such that |
---|
38 | .. math:: |
---|
39 | \frac{\sum_{k=1}^n \frac{P(k; \lambda)}{1 - P(0; \lambda)} < C |
---|
40 | |
---|
41 | Using the convolution theorem, where |
---|
42 | $F = \mathcal{F}(f)$ is the Fourier transform, |
---|
43 | .. math:: f * g = \mathcal{F}^{-1}\{\mathcal{F}\{f\} \cdot \mathcal{F}\{g\}\} |
---|
44 | so |
---|
45 | .. math:: f * \ldots * f = \mathcal{F}^{-1}\{ F^n \} |
---|
46 | Since the Fourier transform is a linear operator, we can move the polynomial |
---|
47 | expression for the convolution into the transform, giving |
---|
48 | .. math:: |
---|
49 | I(q) = \mathcal{F}^{-1}\left\{ \sum_{k=1}^{n} P(k; \lambda) F^k \right\} |
---|
50 | In the dilute limit $L \rightarrow 0$ only the $k=1$ term is active, |
---|
51 | and so |
---|
52 | .. math:: |
---|
53 | P(1; \lambda) = \lambda e{-\lambda} = \int I_1(q) dq |
---|
54 | therefore we compute |
---|
55 | .. math:: |
---|
56 | I(q) = \int I_1(q) dq \mathcal{F}^{-1}\left\{ |
---|
57 | \sum_{l=1}^{n} \frac{P(k; \lambda)}{P(1; \lambda))} F^k \right\} |
---|
58 | |
---|
59 | For speed we may use the fast fourier transform with a power of two. |
---|
60 | The resulting $I(q)$ will be linearly spaced and likely heavily oversampled. |
---|
61 | The usual pinhole or slit resolution calculation can performed from these |
---|
62 | calculated values. |
---|
63 | """ |
---|
64 | |
---|
65 | from __future__ import print_function, division |
---|
66 | |
---|
67 | import argparse |
---|
68 | import time |
---|
69 | |
---|
70 | import numpy as np |
---|
71 | from scipy.special import gamma |
---|
72 | |
---|
73 | from sasmodels import core |
---|
74 | from sasmodels import compare |
---|
75 | from sasmodels import resolution2d |
---|
76 | from sasmodels.resolution import Resolution, bin_edges |
---|
77 | from sasmodels.data import empty_data1D, empty_data2D, plot_data |
---|
78 | from sasmodels.direct_model import call_kernel |
---|
79 | |
---|
80 | |
---|
81 | class MultipleScattering(Resolution): |
---|
82 | def __init__(self, qmax, nq, probability, is2d, window=2, power=0): |
---|
83 | self.qmax = qmax |
---|
84 | self.nq = nq |
---|
85 | self.power = power |
---|
86 | self.probability = probability |
---|
87 | self.is2d = is2d |
---|
88 | self.window = window |
---|
89 | |
---|
90 | q_range = qmax * window |
---|
91 | q = np.linspace(-q_range, q_range, nq) |
---|
92 | qx, qy = np.meshgrid(q, q) |
---|
93 | |
---|
94 | if is2d: |
---|
95 | q_calc = [qx.flatten(), qy.flatten()] |
---|
96 | else: |
---|
97 | q_range_corners = np.sqrt(2.) * q_range |
---|
98 | nq_corners = int(np.sqrt(2.) * nq/2) |
---|
99 | q_corners = np.linspace(0, q_range_corners, nq_corners+1)[1:] |
---|
100 | q_calc = [q_corners] |
---|
101 | self._qxy = np.sqrt(qx**2 + qy**2) |
---|
102 | self._edges = bin_edges(q_corners) |
---|
103 | self._norm = np.histogram(self._qxy, bins=self._edges)[0] |
---|
104 | |
---|
105 | self.q_calc = q_calc |
---|
106 | self.q_range = q_range |
---|
107 | |
---|
108 | def apply(self, theory, background=0.0, outfile="", plot=False): |
---|
109 | #t0 = time.time() |
---|
110 | if self.is2d: |
---|
111 | Iq_calc = theory |
---|
112 | else: |
---|
113 | q_corners = self.q_calc[0] |
---|
114 | Iq_calc = np.interp(self._qxy, q_corners, theory) |
---|
115 | Iq_calc = Iq_calc.reshape(self.nq, self.nq) |
---|
116 | #plotxy(Iq_calc); import pylab; pylab.figure() |
---|
117 | if self.power > 0: |
---|
118 | Iqxy = scattering_power(Iq_calc, self.power) |
---|
119 | powers = [] |
---|
120 | else: |
---|
121 | Iqxy, powers = multiple_scattering( |
---|
122 | Iq_calc, self.probability, |
---|
123 | coverage=0.99, |
---|
124 | return_powers=(outfile!="") or plot, |
---|
125 | ) |
---|
126 | #print("multiple scattering calc time", time.time()-t0) |
---|
127 | |
---|
128 | #plotxy(Iqxy); import pylab; pylab.figure() |
---|
129 | if self.is2d: |
---|
130 | if outfile and powers: |
---|
131 | data = np.vstack([Ipower.flatten() for Ipower in powers]).T |
---|
132 | np.savetxt(outfile + "_powers.txt", data) |
---|
133 | if outfile: |
---|
134 | data = np.vstack(Iq_calc).T |
---|
135 | np.savetxt(outfile + ".txt", data) |
---|
136 | if plot: |
---|
137 | import pylab |
---|
138 | plotxy(Iq_calc) |
---|
139 | pylab.title("single scattering") |
---|
140 | pylab.figure() |
---|
141 | |
---|
142 | return Iqxy + background |
---|
143 | else: |
---|
144 | # circular average, no anti-aliasing |
---|
145 | Iq = np.histogram(self._qxy, bins=self._edges, weights=Iqxy)[0]/self._norm |
---|
146 | if powers: |
---|
147 | Iq_powers = [np.histogram(self._qxy, bins=self._edges, weights=Ipower)[0]/self._norm |
---|
148 | for Ipower in powers]# or powers[:5] for the first five |
---|
149 | |
---|
150 | if outfile: |
---|
151 | data = np.vstack([q_corners, theory, Iq]).T |
---|
152 | np.savetxt(outfile + ".txt", data) |
---|
153 | if outfile and powers: |
---|
154 | # circular average, no anti-aliasing for individual powers |
---|
155 | data = np.vstack([q_corners] + Iq_powers).T |
---|
156 | np.savetxt(outfile + "_powers.txt", data) |
---|
157 | if plot: |
---|
158 | import pylab |
---|
159 | plotxy(Iqxy) |
---|
160 | pylab.title("multiple scattering") |
---|
161 | pylab.figure() |
---|
162 | if plot and powers: |
---|
163 | import pylab |
---|
164 | for k, Ipower in enumerate(Iq_powers): |
---|
165 | pylab.loglog(q_corners, Ipower, label="scattering=%d"%(k+1)) |
---|
166 | pylab.legend() |
---|
167 | pylab.figure() |
---|
168 | |
---|
169 | return q_corners, Iq + background |
---|
170 | |
---|
171 | def scattering_power(Iq, n): |
---|
172 | scale = np.sum(Iq) |
---|
173 | F = _forward_fft(Iq/scale) |
---|
174 | result = scale * _inverse_fft(F**n) |
---|
175 | return result |
---|
176 | |
---|
177 | def multiple_scattering(Iq, p, coverage=0.99, return_powers=False): |
---|
178 | """ |
---|
179 | Compute multiple scattering for I(q) given scattering probability p. |
---|
180 | |
---|
181 | Given a probability p of scattering with the thickness, the expected |
---|
182 | number of scattering events, $\lambda$ is $-\log(1 - p)$, giving a |
---|
183 | Poisson weighted sum of single, double, triple, ... scattering patterns. |
---|
184 | The number of patterns used is based on coverage (default 99%). If |
---|
185 | return_poi |
---|
186 | """ |
---|
187 | L = -np.log(1-p) |
---|
188 | num_scatter = truncated_poisson_invcdf(coverage, L) |
---|
189 | |
---|
190 | # Compute multiple scattering via convolution. |
---|
191 | scale = np.sum(Iq) |
---|
192 | F = _forward_fft(Iq/scale) |
---|
193 | coeffs = [L**(k-1)/gamma(k+1) for k in range(1, num_scatter+1)] |
---|
194 | multiple_scattering = F * np.polyval(coeffs[::-1], F) |
---|
195 | result = scale * _inverse_fft(multiple_scattering) |
---|
196 | |
---|
197 | if return_powers: |
---|
198 | powers = [scale * _inverse_fft(F**k) for k in range(1, num_scatter+1)] |
---|
199 | else: |
---|
200 | powers = [] |
---|
201 | |
---|
202 | return result, powers |
---|
203 | |
---|
204 | def truncated_poisson_invcdf(p, L): |
---|
205 | """ |
---|
206 | Return smallest k such that cdf(k; L) > p from the truncated Poisson |
---|
207 | probability excluding k=0 |
---|
208 | """ |
---|
209 | # pmf(k; L) = L**k * exp(-L) / (k! * (1-exp(-L)) |
---|
210 | cdf = 0 |
---|
211 | pmf = -np.exp(-L) / np.expm1(-L) |
---|
212 | k = 0 |
---|
213 | while cdf < p: |
---|
214 | k += 1 |
---|
215 | pmf *= L/k |
---|
216 | cdf += pmf |
---|
217 | return k |
---|
218 | |
---|
219 | def _forward_fft(Iq): |
---|
220 | # Prepare padded array and forward transform |
---|
221 | nq = Iq.shape[0] |
---|
222 | half_nq = nq//2 |
---|
223 | frame = np.zeros((2*nq, 2*nq)) |
---|
224 | frame[:half_nq, :half_nq] = Iq[half_nq:, half_nq:] |
---|
225 | frame[-half_nq:, :half_nq] = Iq[:half_nq, half_nq:] |
---|
226 | frame[:half_nq, -half_nq:] = Iq[half_nq:, :half_nq] |
---|
227 | frame[-half_nq:, -half_nq:] = Iq[:half_nq, :half_nq] |
---|
228 | fourier_frame = np.fft.fft2(frame) |
---|
229 | return fourier_frame |
---|
230 | |
---|
231 | def _inverse_fft(fourier_frame): |
---|
232 | # Invert the transform and recover the transformed data |
---|
233 | nq = fourier_frame.shape[0]//2 |
---|
234 | half_nq = nq//2 |
---|
235 | frame = np.fft.ifft2(fourier_frame).real |
---|
236 | Iq = np.empty((nq, nq)) |
---|
237 | Iq[half_nq:, half_nq:] = frame[:half_nq, :half_nq] |
---|
238 | Iq[:half_nq, half_nq:] = frame[-half_nq:, :half_nq] |
---|
239 | Iq[half_nq:, :half_nq] = frame[:half_nq, -half_nq:] |
---|
240 | Iq[:half_nq, :half_nq] = frame[-half_nq:, -half_nq:] |
---|
241 | return Iq |
---|
242 | |
---|
243 | def parse_pars(model, opts): |
---|
244 | # type: (ModelInfo, argparse.Namespace) -> Dict[str, float] |
---|
245 | |
---|
246 | seed = np.random.randint(1000000) if opts.random and opts.seed < 0 else opts.seed |
---|
247 | compare_opts = { |
---|
248 | 'info': (model.info, model.info), |
---|
249 | 'use_demo': False, |
---|
250 | 'seed': seed, |
---|
251 | 'mono': True, |
---|
252 | 'magnetic': False, |
---|
253 | 'values': opts.pars, |
---|
254 | 'show_pars': True, |
---|
255 | 'is2d': opts.is2d, |
---|
256 | } |
---|
257 | pars, pars2 = compare.parse_pars(compare_opts) |
---|
258 | return pars |
---|
259 | |
---|
260 | |
---|
261 | def main(): |
---|
262 | parser = argparse.ArgumentParser( |
---|
263 | description="Compute multiple scattering", |
---|
264 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
---|
265 | ) |
---|
266 | parser.add_argument('-k', '--power', type=int, default=0, help="show pattern for nth scattering") |
---|
267 | parser.add_argument('-p', '--probability', type=float, default=0.1, help="scattering probability") |
---|
268 | parser.add_argument('-n', '--nq', type=int, default=1024, help='number of mesh points') |
---|
269 | parser.add_argument('-q', '--qmax', type=float, default=0.5, help='max q') |
---|
270 | parser.add_argument('-w', '--window', type=float, default=2.0, help='q calc = q max * window') |
---|
271 | parser.add_argument('-2', '--2d', dest='is2d', action='store_true', help='oriented sample') |
---|
272 | parser.add_argument('-s', '--seed', default=-1, help='random pars with given seed') |
---|
273 | parser.add_argument('-r', '--random', action='store_true', help='random pars with random seed') |
---|
274 | parser.add_argument('-o', '--outfile', type=str, default="", help='random pars with random seed') |
---|
275 | parser.add_argument('model', type=str, help='sas model name such as cylinder') |
---|
276 | parser.add_argument('pars', type=str, nargs='*', help='model parameters such as radius=30') |
---|
277 | opts = parser.parse_args() |
---|
278 | assert opts.nq%2 == 0, "require even # points" |
---|
279 | |
---|
280 | model = core.load_model(opts.model) |
---|
281 | pars = parse_pars(model, opts) |
---|
282 | res = MultipleScattering(opts.qmax, opts.nq, opts.probability, opts.is2d, |
---|
283 | window=opts.window, power=opts.power) |
---|
284 | kernel = model.make_kernel(res.q_calc) |
---|
285 | #print(pars) |
---|
286 | bg = pars.get('background', 0.0) |
---|
287 | pars['background'] = 0.0 |
---|
288 | Iq_calc = call_kernel(kernel, pars) |
---|
289 | Iq = res.apply(Iq_calc, background=bg, outfile=opts.outfile, plot=True) |
---|
290 | plotxy(Iq) |
---|
291 | import pylab |
---|
292 | if opts.power > 0: |
---|
293 | pylab.title('scattering power %d'%opts.power) |
---|
294 | else: |
---|
295 | pylab.title('multiple scattering with fraction %g'%opts.probability) |
---|
296 | pylab.show() |
---|
297 | |
---|
298 | def plotxy(Iq): |
---|
299 | import pylab |
---|
300 | if isinstance(Iq, tuple): |
---|
301 | q, Iq = Iq |
---|
302 | pylab.loglog(q, Iq) |
---|
303 | else: |
---|
304 | data = Iq+0. |
---|
305 | data[Iq <= 0] = np.min(Iq[Iq>0])/2 |
---|
306 | pylab.imshow(np.log10(data)) |
---|
307 | #import pylab; pylab.show() |
---|
308 | |
---|
309 | if __name__ == "__main__": |
---|
310 | main() |
---|