[ae91fce] | 1 | #!/usr/bin/env python |
---|
| 2 | r""" |
---|
| 3 | Multiple scattering calculator |
---|
| 4 | |
---|
| 5 | Calculate multiple scattering using 2D FFT convolution. |
---|
| 6 | |
---|
| 7 | Usage: |
---|
| 8 | |
---|
[a09d55d] | 9 | -p, --probability: the scattering probability |
---|
[ae91fce] | 10 | -q, --qmax: that max q that you care about |
---|
| 11 | -w, --window: the extension window (q is calculated for qmax*window) |
---|
| 12 | -n, --nq: the number of mesh points (dq = qmax*window/nq) |
---|
| 13 | -r, --random: generate a random parameter set |
---|
| 14 | -2, --2d: perform the calculation for an oriented pattern |
---|
| 15 | model_name |
---|
| 16 | model_par=value ... |
---|
| 17 | |
---|
| 18 | Assume the probability of scattering is $p$. After each scattering event, |
---|
| 19 | $1-p$ neutrons will leave the system and go to the detector, and the remaining |
---|
| 20 | $p$ will scatter again. |
---|
| 21 | |
---|
[a09d55d] | 22 | Let the scattering probability for $n$ scattering event at $q$ be $f_n(q)$, |
---|
[ae91fce] | 23 | where |
---|
[a09d55d] | 24 | .. math:: f_1(q) = \frac{I_1(q)}{\int I_1(q) dq} |
---|
[ae91fce] | 25 | for $I_1(q)$, the single scattering from the system. After two scattering |
---|
[a09d55d] | 26 | events, the scattering probability will be the convolution of the first |
---|
| 27 | scattering and itself, or $f_2(q) = (f_1*f_1)(q)$. After $n$ events it will be |
---|
| 28 | $f_n(q) = (f_1 * \cdots * f_1)(q)$. The total scattering is calculated |
---|
| 29 | as the weighted sum of $f_k$, with weights following the Poisson distribution |
---|
| 30 | .. math:: P(k; \lambda) = \frac{\lambda^k e^{-\lambda}}{k!} |
---|
| 31 | for $\lambda$ determined by the total thickness divided by the mean |
---|
| 32 | free path between scattering, giving |
---|
| 33 | .. math:: |
---|
| 34 | I(q) = \sum_{k=0}^\infty P(k; \lambda) f_k(q) |
---|
| 35 | The $k=0$ term is ignored since it involves no scattering. |
---|
| 36 | We cut the series when cumulative probability is less than cutoff $C=99\%$, |
---|
| 37 | which is $\max n$ such that |
---|
[ae91fce] | 38 | .. math:: |
---|
[a09d55d] | 39 | \frac{\sum_{k=1}^n \frac{P(k; \lambda)}{1 - P(0; \lambda)} < C |
---|
[ae91fce] | 40 | |
---|
| 41 | Using the convolution theorem, where |
---|
| 42 | $F = \mathcal{F}(f)$ is the Fourier transform, |
---|
[a09d55d] | 43 | .. math:: f * g = \mathcal{F}^{-1}\{\mathcal{F}\{f\} \cdot \mathcal{F}\{g\}\} |
---|
[ae91fce] | 44 | so |
---|
[a09d55d] | 45 | .. math:: f * \ldots * f = \mathcal{F}^{-1}\{ F^n \} |
---|
[ae91fce] | 46 | Since the Fourier transform is a linear operator, we can move the polynomial |
---|
| 47 | expression for the convolution into the transform, giving |
---|
| 48 | .. math:: |
---|
[a09d55d] | 49 | I(q) = \mathcal{F}^{-1}\left\{ \sum_{k=1}^{n} P(k; \lambda) F^k \right\} |
---|
| 50 | In the dilute limit $L \rightarrow 0$ only the $k=1$ term is active, |
---|
| 51 | and so |
---|
| 52 | .. math:: |
---|
| 53 | P(1; \lambda) = \lambda e{-\lambda} = \int I_1(q) dq |
---|
| 54 | therefore we compute |
---|
| 55 | .. math:: |
---|
| 56 | I(q) = \int I_1(q) dq \mathcal{F}^{-1}\left\{ |
---|
| 57 | \sum_{l=1}^{n} \frac{P(k; \lambda)}{P(1; \lambda))} F^k \right\} |
---|
[ae91fce] | 58 | |
---|
[a09d55d] | 59 | For speed we may use the fast fourier transform with a power of two. |
---|
| 60 | The resulting $I(q)$ will be linearly spaced and likely heavily oversampled. |
---|
| 61 | The usual pinhole or slit resolution calculation can performed from these |
---|
| 62 | calculated values. |
---|
[ae91fce] | 63 | """ |
---|
| 64 | |
---|
[a09d55d] | 65 | from __future__ import print_function, division |
---|
[ae91fce] | 66 | |
---|
| 67 | import argparse |
---|
| 68 | import time |
---|
| 69 | |
---|
| 70 | import numpy as np |
---|
[a09d55d] | 71 | from scipy.special import gamma |
---|
[ae91fce] | 72 | |
---|
| 73 | from sasmodels import core |
---|
| 74 | from sasmodels import compare |
---|
| 75 | from sasmodels import resolution2d |
---|
| 76 | from sasmodels.resolution import Resolution, bin_edges |
---|
| 77 | from sasmodels.data import empty_data1D, empty_data2D, plot_data |
---|
| 78 | from sasmodels.direct_model import call_kernel |
---|
| 79 | |
---|
| 80 | |
---|
| 81 | class MultipleScattering(Resolution): |
---|
[a09d55d] | 82 | def __init__(self, qmax, nq, probability, is2d, window=2, power=0): |
---|
[ae91fce] | 83 | self.qmax = qmax |
---|
| 84 | self.nq = nq |
---|
| 85 | self.power = power |
---|
[a09d55d] | 86 | self.probability = probability |
---|
[ae91fce] | 87 | self.is2d = is2d |
---|
| 88 | self.window = window |
---|
| 89 | |
---|
| 90 | q_range = qmax * window |
---|
| 91 | q = np.linspace(-q_range, q_range, nq) |
---|
| 92 | qx, qy = np.meshgrid(q, q) |
---|
| 93 | |
---|
| 94 | if is2d: |
---|
| 95 | q_calc = [qx.flatten(), qy.flatten()] |
---|
| 96 | else: |
---|
| 97 | q_range_corners = np.sqrt(2.) * q_range |
---|
| 98 | nq_corners = int(np.sqrt(2.) * nq/2) |
---|
| 99 | q_corners = np.linspace(0, q_range_corners, nq_corners+1)[1:] |
---|
| 100 | q_calc = [q_corners] |
---|
| 101 | self._qxy = np.sqrt(qx**2 + qy**2) |
---|
| 102 | self._edges = bin_edges(q_corners) |
---|
| 103 | self._norm = np.histogram(self._qxy, bins=self._edges)[0] |
---|
| 104 | |
---|
| 105 | self.q_calc = q_calc |
---|
| 106 | self.q_range = q_range |
---|
| 107 | |
---|
[a09d55d] | 108 | def apply(self, theory, background=0.0, outfile="", plot=False): |
---|
| 109 | #t0 = time.time() |
---|
[ae91fce] | 110 | if self.is2d: |
---|
| 111 | Iq_calc = theory |
---|
| 112 | else: |
---|
| 113 | q_corners = self.q_calc[0] |
---|
| 114 | Iq_calc = np.interp(self._qxy, q_corners, theory) |
---|
| 115 | Iq_calc = Iq_calc.reshape(self.nq, self.nq) |
---|
| 116 | #plotxy(Iq_calc); import pylab; pylab.figure() |
---|
| 117 | if self.power > 0: |
---|
[a09d55d] | 118 | Iqxy = scattering_power(Iq_calc, self.power) |
---|
| 119 | powers = [] |
---|
[ae91fce] | 120 | else: |
---|
[a09d55d] | 121 | Iqxy, powers = multiple_scattering( |
---|
| 122 | Iq_calc, self.probability, |
---|
| 123 | coverage=0.99, |
---|
| 124 | return_powers=(outfile!="") or plot, |
---|
| 125 | ) |
---|
| 126 | #print("multiple scattering calc time", time.time()-t0) |
---|
| 127 | |
---|
[ae91fce] | 128 | #plotxy(Iqxy); import pylab; pylab.figure() |
---|
| 129 | if self.is2d: |
---|
[a09d55d] | 130 | if outfile and powers: |
---|
| 131 | data = np.vstack([Ipower.flatten() for Ipower in powers]).T |
---|
| 132 | np.savetxt(outfile + "_powers.txt", data) |
---|
| 133 | if outfile: |
---|
| 134 | data = np.vstack(Iq_calc).T |
---|
| 135 | np.savetxt(outfile + ".txt", data) |
---|
| 136 | if plot: |
---|
[ae91fce] | 137 | import pylab |
---|
| 138 | plotxy(Iq_calc) |
---|
| 139 | pylab.title("single scattering") |
---|
| 140 | pylab.figure() |
---|
| 141 | |
---|
[a09d55d] | 142 | return Iqxy + background |
---|
[ae91fce] | 143 | else: |
---|
| 144 | # circular average, no anti-aliasing |
---|
| 145 | Iq = np.histogram(self._qxy, bins=self._edges, weights=Iqxy)[0]/self._norm |
---|
[a09d55d] | 146 | if powers: |
---|
| 147 | Iq_powers = [np.histogram(self._qxy, bins=self._edges, weights=Ipower)[0]/self._norm |
---|
| 148 | for Ipower in powers]# or powers[:5] for the first five |
---|
| 149 | |
---|
| 150 | if outfile: |
---|
| 151 | data = np.vstack([q_corners, theory, Iq]).T |
---|
| 152 | np.savetxt(outfile + ".txt", data) |
---|
| 153 | if outfile and powers: |
---|
| 154 | # circular average, no anti-aliasing for individual powers |
---|
| 155 | data = np.vstack([q_corners] + Iq_powers).T |
---|
| 156 | np.savetxt(outfile + "_powers.txt", data) |
---|
| 157 | if plot: |
---|
[ae91fce] | 158 | import pylab |
---|
[a09d55d] | 159 | plotxy(Iqxy) |
---|
| 160 | pylab.title("multiple scattering") |
---|
| 161 | pylab.figure() |
---|
| 162 | if plot and powers: |
---|
| 163 | import pylab |
---|
[e309e23] | 164 | L = -np.log(1-self.probability) |
---|
| 165 | pylab.loglog(q_corners, Iq, label="total %g"%self.probability) |
---|
| 166 | for n, Ipower in enumerate(Iq_powers): |
---|
| 167 | k = n+1 |
---|
| 168 | w = L**(k-1)/gamma(k+1) |
---|
| 169 | pylab.loglog(q_corners, w*Ipower, label="scattering**%d"%k) |
---|
[ae91fce] | 170 | pylab.legend() |
---|
| 171 | pylab.figure() |
---|
| 172 | |
---|
[a09d55d] | 173 | return q_corners, Iq + background |
---|
[ae91fce] | 174 | |
---|
[a09d55d] | 175 | def scattering_power(Iq, n): |
---|
[54a5024] | 176 | """ |
---|
| 177 | Calculate the nth scattering power as a distribution. To get the |
---|
| 178 | weighted contribution, scale by $\lambda^k e^{-\lambda}/k!$. |
---|
| 179 | """ |
---|
[a09d55d] | 180 | scale = np.sum(Iq) |
---|
| 181 | F = _forward_fft(Iq/scale) |
---|
[54a5024] | 182 | result = _inverse_fft(F**n) |
---|
[a09d55d] | 183 | return result |
---|
| 184 | |
---|
| 185 | def multiple_scattering(Iq, p, coverage=0.99, return_powers=False): |
---|
| 186 | """ |
---|
| 187 | Compute multiple scattering for I(q) given scattering probability p. |
---|
| 188 | |
---|
| 189 | Given a probability p of scattering with the thickness, the expected |
---|
| 190 | number of scattering events, $\lambda$ is $-\log(1 - p)$, giving a |
---|
[54a5024] | 191 | Poisson weighted sum of single, double, triple, etc. scattering patterns. |
---|
| 192 | The number of patterns used is based on coverage (default 99%). |
---|
[a09d55d] | 193 | """ |
---|
| 194 | L = -np.log(1-p) |
---|
| 195 | num_scatter = truncated_poisson_invcdf(coverage, L) |
---|
[ae91fce] | 196 | |
---|
| 197 | # Compute multiple scattering via convolution. |
---|
[a09d55d] | 198 | scale = np.sum(Iq) |
---|
| 199 | F = _forward_fft(Iq/scale) |
---|
| 200 | coeffs = [L**(k-1)/gamma(k+1) for k in range(1, num_scatter+1)] |
---|
| 201 | multiple_scattering = F * np.polyval(coeffs[::-1], F) |
---|
| 202 | result = scale * _inverse_fft(multiple_scattering) |
---|
| 203 | |
---|
| 204 | if return_powers: |
---|
| 205 | powers = [scale * _inverse_fft(F**k) for k in range(1, num_scatter+1)] |
---|
| 206 | else: |
---|
| 207 | powers = [] |
---|
| 208 | |
---|
| 209 | return result, powers |
---|
| 210 | |
---|
| 211 | def truncated_poisson_invcdf(p, L): |
---|
| 212 | """ |
---|
| 213 | Return smallest k such that cdf(k; L) > p from the truncated Poisson |
---|
| 214 | probability excluding k=0 |
---|
| 215 | """ |
---|
| 216 | # pmf(k; L) = L**k * exp(-L) / (k! * (1-exp(-L)) |
---|
| 217 | cdf = 0 |
---|
| 218 | pmf = -np.exp(-L) / np.expm1(-L) |
---|
| 219 | k = 0 |
---|
| 220 | while cdf < p: |
---|
| 221 | k += 1 |
---|
| 222 | pmf *= L/k |
---|
| 223 | cdf += pmf |
---|
| 224 | return k |
---|
| 225 | |
---|
| 226 | def _forward_fft(Iq): |
---|
| 227 | # Prepare padded array and forward transform |
---|
| 228 | nq = Iq.shape[0] |
---|
[ae91fce] | 229 | half_nq = nq//2 |
---|
| 230 | frame = np.zeros((2*nq, 2*nq)) |
---|
[a09d55d] | 231 | frame[:half_nq, :half_nq] = Iq[half_nq:, half_nq:] |
---|
| 232 | frame[-half_nq:, :half_nq] = Iq[:half_nq, half_nq:] |
---|
| 233 | frame[:half_nq, -half_nq:] = Iq[half_nq:, :half_nq] |
---|
| 234 | frame[-half_nq:, -half_nq:] = Iq[:half_nq, :half_nq] |
---|
| 235 | fourier_frame = np.fft.fft2(frame) |
---|
| 236 | return fourier_frame |
---|
| 237 | |
---|
| 238 | def _inverse_fft(fourier_frame): |
---|
| 239 | # Invert the transform and recover the transformed data |
---|
| 240 | nq = fourier_frame.shape[0]//2 |
---|
[ae91fce] | 241 | half_nq = nq//2 |
---|
[a09d55d] | 242 | frame = np.fft.ifft2(fourier_frame).real |
---|
| 243 | Iq = np.empty((nq, nq)) |
---|
| 244 | Iq[half_nq:, half_nq:] = frame[:half_nq, :half_nq] |
---|
| 245 | Iq[:half_nq, half_nq:] = frame[-half_nq:, :half_nq] |
---|
| 246 | Iq[half_nq:, :half_nq] = frame[:half_nq, -half_nq:] |
---|
| 247 | Iq[:half_nq, :half_nq] = frame[-half_nq:, -half_nq:] |
---|
| 248 | return Iq |
---|
[ae91fce] | 249 | |
---|
| 250 | def parse_pars(model, opts): |
---|
| 251 | # type: (ModelInfo, argparse.Namespace) -> Dict[str, float] |
---|
| 252 | |
---|
| 253 | seed = np.random.randint(1000000) if opts.random and opts.seed < 0 else opts.seed |
---|
| 254 | compare_opts = { |
---|
| 255 | 'info': (model.info, model.info), |
---|
| 256 | 'use_demo': False, |
---|
| 257 | 'seed': seed, |
---|
| 258 | 'mono': True, |
---|
| 259 | 'magnetic': False, |
---|
| 260 | 'values': opts.pars, |
---|
| 261 | 'show_pars': True, |
---|
| 262 | 'is2d': opts.is2d, |
---|
| 263 | } |
---|
| 264 | pars, pars2 = compare.parse_pars(compare_opts) |
---|
| 265 | return pars |
---|
| 266 | |
---|
| 267 | |
---|
| 268 | def main(): |
---|
| 269 | parser = argparse.ArgumentParser( |
---|
| 270 | description="Compute multiple scattering", |
---|
| 271 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
---|
| 272 | ) |
---|
[a09d55d] | 273 | parser.add_argument('-k', '--power', type=int, default=0, help="show pattern for nth scattering") |
---|
| 274 | parser.add_argument('-p', '--probability', type=float, default=0.1, help="scattering probability") |
---|
[ae91fce] | 275 | parser.add_argument('-n', '--nq', type=int, default=1024, help='number of mesh points') |
---|
| 276 | parser.add_argument('-q', '--qmax', type=float, default=0.5, help='max q') |
---|
| 277 | parser.add_argument('-w', '--window', type=float, default=2.0, help='q calc = q max * window') |
---|
| 278 | parser.add_argument('-2', '--2d', dest='is2d', action='store_true', help='oriented sample') |
---|
| 279 | parser.add_argument('-s', '--seed', default=-1, help='random pars with given seed') |
---|
| 280 | parser.add_argument('-r', '--random', action='store_true', help='random pars with random seed') |
---|
[a09d55d] | 281 | parser.add_argument('-o', '--outfile', type=str, default="", help='random pars with random seed') |
---|
[049e02d] | 282 | parser.add_argument('model', type=str, help='sas model name such as cylinder') |
---|
| 283 | parser.add_argument('pars', type=str, nargs='*', help='model parameters such as radius=30') |
---|
[ae91fce] | 284 | opts = parser.parse_args() |
---|
| 285 | assert opts.nq%2 == 0, "require even # points" |
---|
| 286 | |
---|
| 287 | model = core.load_model(opts.model) |
---|
| 288 | pars = parse_pars(model, opts) |
---|
[a09d55d] | 289 | res = MultipleScattering(opts.qmax, opts.nq, opts.probability, opts.is2d, |
---|
[ae91fce] | 290 | window=opts.window, power=opts.power) |
---|
| 291 | kernel = model.make_kernel(res.q_calc) |
---|
| 292 | #print(pars) |
---|
| 293 | bg = pars.get('background', 0.0) |
---|
| 294 | pars['background'] = 0.0 |
---|
| 295 | Iq_calc = call_kernel(kernel, pars) |
---|
[a09d55d] | 296 | Iq = res.apply(Iq_calc, background=bg, outfile=opts.outfile, plot=True) |
---|
[ae91fce] | 297 | plotxy(Iq) |
---|
| 298 | import pylab |
---|
| 299 | if opts.power > 0: |
---|
| 300 | pylab.title('scattering power %d'%opts.power) |
---|
| 301 | else: |
---|
[a09d55d] | 302 | pylab.title('multiple scattering with fraction %g'%opts.probability) |
---|
[ae91fce] | 303 | pylab.show() |
---|
| 304 | |
---|
| 305 | def plotxy(Iq): |
---|
| 306 | import pylab |
---|
| 307 | if isinstance(Iq, tuple): |
---|
| 308 | q, Iq = Iq |
---|
| 309 | pylab.loglog(q, Iq) |
---|
| 310 | else: |
---|
| 311 | data = Iq+0. |
---|
| 312 | data[Iq <= 0] = np.min(Iq[Iq>0])/2 |
---|
| 313 | pylab.imshow(np.log10(data)) |
---|
| 314 | #import pylab; pylab.show() |
---|
| 315 | |
---|
| 316 | if __name__ == "__main__": |
---|
| 317 | main() |
---|