1 | """ |
---|
2 | #This software was developed by the University of Tennessee as part of the |
---|
3 | #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) |
---|
4 | #project funded by the US National Science Foundation. |
---|
5 | #See the license text in license.txt |
---|
6 | """ |
---|
7 | from __future__ import division |
---|
8 | |
---|
9 | import numpy as np # type: ignore |
---|
10 | from numpy import pi, cos, sin, sqrt # type: ignore |
---|
11 | |
---|
12 | from . import resolution |
---|
13 | from .resolution import Resolution |
---|
14 | |
---|
15 | ## Singular point |
---|
16 | SIGMA_ZERO = 1.0e-010 |
---|
17 | ## Limit of how many sigmas to be covered for the Gaussian smearing |
---|
18 | # default: 2.5 to cover 98.7% of Gaussian |
---|
19 | NSIGMA = 3.0 |
---|
20 | ## Defaults |
---|
21 | NR = {'xhigh':10, 'high':5, 'med':5, 'low':3} |
---|
22 | NPHI = {'xhigh':20, 'high':12, 'med':6, 'low':4} |
---|
23 | |
---|
24 | ## Defaults |
---|
25 | N_SLIT_PERP = {'xhigh':1000, 'high':500, 'med':200, 'low':50} |
---|
26 | N_SLIT_PERP_DOC = ", ".join("%s=%d"%(name, value) |
---|
27 | for value, name in |
---|
28 | sorted((2*v+1, k) for k, v in N_SLIT_PERP.items())) |
---|
29 | |
---|
30 | class Pinhole2D(Resolution): |
---|
31 | """ |
---|
32 | Gaussian Q smearing class for SAS 2d data |
---|
33 | """ |
---|
34 | |
---|
35 | def __init__(self, data=None, index=None, |
---|
36 | nsigma=NSIGMA, accuracy='Low', coords='polar'): |
---|
37 | """ |
---|
38 | Assumption: equally spaced bins in dq_r, dq_phi space. |
---|
39 | |
---|
40 | :param data: 2d data used to set the smearing parameters |
---|
41 | :param index: 1d array with len(data) to define the range |
---|
42 | of the calculation: elements are given as True or False |
---|
43 | :param nr: number of bins in dq_r-axis |
---|
44 | :param nphi: number of bins in dq_phi-axis |
---|
45 | :param coord: coordinates [string], 'polar' or 'cartesian' |
---|
46 | """ |
---|
47 | ## Accuracy: Higher stands for more sampling points in both directions |
---|
48 | ## of r and phi. |
---|
49 | ## number of bins in r axis for over-sampling |
---|
50 | self.nr = NR[accuracy.lower()] |
---|
51 | ## number of bins in phi axis for over-sampling |
---|
52 | self.nphi = NPHI[accuracy.lower()] |
---|
53 | ## maximum nsigmas |
---|
54 | self.nsigma = nsigma |
---|
55 | self.coords = coords |
---|
56 | self._init_data(data, index) |
---|
57 | |
---|
58 | def _init_data(self, data, index): |
---|
59 | """ |
---|
60 | Get qx_data, qy_data, dqx_data,dqy_data, |
---|
61 | and calculate phi_data=arctan(qx_data/qy_data) |
---|
62 | """ |
---|
63 | # TODO: maybe don't need to hold copy of qx,qy,dqx,dqy,data,index |
---|
64 | # just need q_calc and weights |
---|
65 | self.data = data |
---|
66 | self.index = index if index is not None else slice(None) |
---|
67 | |
---|
68 | self.qx_data = data.qx_data[self.index] |
---|
69 | self.qy_data = data.qy_data[self.index] |
---|
70 | self.q_data = data.q_data[self.index] |
---|
71 | |
---|
72 | dqx = getattr(data, 'dqx_data', None) |
---|
73 | dqy = getattr(data, 'dqy_data', None) |
---|
74 | if dqx is not None and dqy is not None: |
---|
75 | # Here dqx and dqy mean dq_parr and dq_perp |
---|
76 | self.dqx_data = dqx[self.index] |
---|
77 | self.dqy_data = dqy[self.index] |
---|
78 | ## Remove singular points if exists |
---|
79 | self.dqx_data[self.dqx_data < SIGMA_ZERO] = SIGMA_ZERO |
---|
80 | self.dqy_data[self.dqy_data < SIGMA_ZERO] = SIGMA_ZERO |
---|
81 | qx_calc, qy_calc, weights = self._calc_res() |
---|
82 | self.q_calc = [qx_calc, qy_calc] |
---|
83 | self.q_calc_weights = weights |
---|
84 | else: |
---|
85 | # No resolution information |
---|
86 | self.dqx_data = self.dqy_data = None |
---|
87 | self.q_calc = [self.qx_data, self.qy_data] |
---|
88 | self.q_calc_weights = None |
---|
89 | |
---|
90 | #self.phi_data = np.arctan(self.qx_data / self.qy_data) |
---|
91 | |
---|
92 | def _calc_res(self): |
---|
93 | """ |
---|
94 | Over sampling of r_nbins times phi_nbins, calculate Gaussian weights, |
---|
95 | then find smeared intensity |
---|
96 | """ |
---|
97 | nr, nphi = self.nr, self.nphi |
---|
98 | # Total number of bins = # of bins |
---|
99 | nbins = nr * nphi |
---|
100 | # Number of bins in the dqr direction (polar coordinate of dqx and dqy) |
---|
101 | bin_size = self.nsigma / nr |
---|
102 | # in dq_r-direction times # of bins in dq_phi-direction |
---|
103 | # data length in the range of self.index |
---|
104 | nq = len(self.qx_data) |
---|
105 | |
---|
106 | # Mean values of dqr at each bins |
---|
107 | # starting from the half of bin size |
---|
108 | r = bin_size / 2.0 + np.arange(nr) * bin_size |
---|
109 | # mean values of qphi at each bines |
---|
110 | phi = np.arange(nphi) |
---|
111 | dphi = phi * 2.0 * pi / nphi |
---|
112 | dphi = dphi.repeat(nr) |
---|
113 | |
---|
114 | ## Transform to polar coordinate, |
---|
115 | # and set dphi at each data points ; 1d array |
---|
116 | dphi = dphi.repeat(nq) |
---|
117 | q_phi = self.qy_data / self.qx_data |
---|
118 | |
---|
119 | # Starting angle is different between polar |
---|
120 | # and cartesian coordinates. |
---|
121 | #if self.coords != 'polar': |
---|
122 | # dphi += np.arctan( q_phi * self.dqx_data/ \ |
---|
123 | # self.dqy_data).repeat(nbins).reshape(nq,\ |
---|
124 | # nbins).transpose().flatten() |
---|
125 | |
---|
126 | # The angle (phi) of the original q point |
---|
127 | q_phi = np.arctan(q_phi).repeat(nbins)\ |
---|
128 | .reshape([nq, nbins]).transpose().flatten() |
---|
129 | ## Find Gaussian weight for each dq bins: The weight depends only |
---|
130 | # on r-direction (The integration may not need) |
---|
131 | weight_res = (np.exp(-0.5 * (r - bin_size / 2.0)**2) - |
---|
132 | np.exp(-0.5 * (r + bin_size / 2.0)**2)) |
---|
133 | # No needs of normalization here. |
---|
134 | #weight_res /= np.sum(weight_res) |
---|
135 | weight_res = weight_res.repeat(nphi).reshape(nr, nphi) |
---|
136 | weight_res = weight_res.transpose().flatten() |
---|
137 | |
---|
138 | ## Set dr for all dq bins for averaging |
---|
139 | dr = r.repeat(nphi).reshape(nr, nphi).transpose().flatten() |
---|
140 | ## Set dqr for all data points |
---|
141 | dqx = np.outer(dr, self.dqx_data).flatten() |
---|
142 | dqy = np.outer(dr, self.dqy_data).flatten() |
---|
143 | |
---|
144 | qx = self.qx_data.repeat(nbins)\ |
---|
145 | .reshape(nq, nbins).transpose().flatten() |
---|
146 | qy = self.qy_data.repeat(nbins)\ |
---|
147 | .reshape(nq, nbins).transpose().flatten() |
---|
148 | |
---|
149 | # The polar needs rotation by -q_phi |
---|
150 | if self.coords == 'polar': |
---|
151 | q_r = sqrt(qx**2 + qy**2) |
---|
152 | qx_res = ((dqx*cos(dphi) + q_r) * cos(-q_phi) |
---|
153 | + dqy*sin(dphi) * sin(-q_phi)) |
---|
154 | qy_res = (-(dqx*cos(dphi) + q_r) * sin(-q_phi) |
---|
155 | + dqy*sin(dphi) * cos(-q_phi)) |
---|
156 | else: |
---|
157 | qx_res = qx + dqx*cos(dphi) |
---|
158 | qy_res = qy + dqy*sin(dphi) |
---|
159 | |
---|
160 | |
---|
161 | return qx_res, qy_res, weight_res |
---|
162 | |
---|
163 | def apply(self, theory): |
---|
164 | if self.q_calc_weights is not None: |
---|
165 | # TODO: interpolate rather than recomputing all the different qx,qy |
---|
166 | # Resolution needs to be applied |
---|
167 | nq, nbins = len(self.qx_data), self.nr * self.nphi |
---|
168 | ## Reshape into 2d array to use np weighted averaging |
---|
169 | theory = np.reshape(theory, (nbins, nq)) |
---|
170 | ## Averaging with Gaussian weighting: normalization included. |
---|
171 | value = np.average(theory, axis=0, weights=self.q_calc_weights) |
---|
172 | ## Return the smeared values in the range of self.index |
---|
173 | return value |
---|
174 | else: |
---|
175 | return theory |
---|
176 | |
---|
177 | |
---|
178 | class Slit2D(Resolution): |
---|
179 | """ |
---|
180 | Slit aperture with resolution function on an oriented sample. |
---|
181 | |
---|
182 | *q* points at which the data is measured. |
---|
183 | |
---|
184 | *qx_width* slit width in qx |
---|
185 | |
---|
186 | *qy_width* slit height in qy; current implementation requires a fixed |
---|
187 | qy_width for all q points. |
---|
188 | |
---|
189 | *q_calc* is the list of q points to calculate, or None if this |
---|
190 | should be estimated from the *q* and *qx_width*. |
---|
191 | |
---|
192 | *accuracy* determines the number of *qy* points to compute for each *q*. |
---|
193 | The values are stored in sasmodels.resolution2d.N_SLIT_PERP. The default |
---|
194 | values are: %s |
---|
195 | """ |
---|
196 | __doc__ = __doc__%N_SLIT_PERP_DOC |
---|
197 | def __init__(self, q, qx_width, qy_width=0., q_calc=None, accuracy='low'): |
---|
198 | # Remember what q and width was used even though we won't need them |
---|
199 | # after the weight matrix is constructed |
---|
200 | self.q, self.qx_width, self.qy_width = q, qx_width, qy_width |
---|
201 | |
---|
202 | # Allow independent resolution on each qx point even though it is not |
---|
203 | # needed in practice. Set qy_width to the maximum qy width. |
---|
204 | if np.isscalar(qx_width): |
---|
205 | qx_width = np.ones(len(q))*qx_width |
---|
206 | else: |
---|
207 | qx_width = np.asarray(qx_width) |
---|
208 | if not np.isscalar(qy_width): |
---|
209 | qy_width = np.max(qy_width) |
---|
210 | |
---|
211 | # Build grid of qx, qy points |
---|
212 | if q_calc is not None: |
---|
213 | qx_calc = np.sort(q_calc) |
---|
214 | else: |
---|
215 | qx_calc = resolution.pinhole_extend_q(q, qx_width, nsigma=3) |
---|
216 | qy_min, qy_max = np.log10(np.min(q)), np.log10(qy_width) |
---|
217 | qy_calc = np.logspace(qy_min, qy_max, N_SLIT_PERP[accuracy]) |
---|
218 | qy_calc = np.hstack((-qy_calc[::-1], 0, qy_calc)) |
---|
219 | self.q_calc = [v.flatten() for v in np.meshgrid(qx_calc, qy_calc)] |
---|
220 | self.qx_calc, self.qy_calc = qx_calc, qy_calc |
---|
221 | self.nx, self.ny = len(qx_calc), len(qy_calc) |
---|
222 | self.dy = 2*qy_width/self.ny |
---|
223 | |
---|
224 | # Build weight matrix for resolution integration |
---|
225 | if np.any(qx_width > 0): |
---|
226 | self.weights = resolution.pinhole_resolution(qx_calc, q, |
---|
227 | np.maximum(qx_width, resolution.MINIMUM_RESOLUTION)) |
---|
228 | elif len(qx_calc) == len(q) and np.all(qx_calc == q): |
---|
229 | self.weights = None |
---|
230 | else: |
---|
231 | raise ValueError("Slit2D fails with q_calc != q") |
---|
232 | |
---|
233 | def apply(self, theory): |
---|
234 | Iq = np.trapz(theory.reshape(self.ny, self.nx), axis=0, x=self.qy_calc) |
---|
235 | if self.weights is not None: |
---|
236 | Iq = resolution.apply_resolution_matrix(self.weights, Iq) |
---|
237 | return Iq |
---|
238 | |
---|