1 | |
---|
2 | /* |
---|
3 | ########################################################## |
---|
4 | # # |
---|
5 | # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # |
---|
6 | # !! !! # |
---|
7 | # !! KEEP THIS CODE CONSISTENT WITH KERNELPY.PY !! # |
---|
8 | # !! !! # |
---|
9 | # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # |
---|
10 | # # |
---|
11 | ########################################################## |
---|
12 | */ |
---|
13 | |
---|
14 | #ifndef _PAR_BLOCK_ // protected block so we can include this code twice. |
---|
15 | #define _PAR_BLOCK_ |
---|
16 | |
---|
17 | typedef struct { |
---|
18 | int32_t pd_par[MAX_PD]; // id of the nth polydispersity variable |
---|
19 | int32_t pd_length[MAX_PD]; // length of the nth polydispersity weight vector |
---|
20 | int32_t pd_offset[MAX_PD]; // offset of pd weights in the value & weight vector |
---|
21 | int32_t pd_stride[MAX_PD]; // stride to move to the next index at this level |
---|
22 | int32_t pd_isvol[MAX_PD]; // True if parameter is a volume weighting parameter |
---|
23 | int32_t par_offset[NPARS]; // offset of par values in the value & weight vector |
---|
24 | int32_t par_coord[NPARS]; // polydispersity coordination bitvector |
---|
25 | int32_t fast_coord_pars[NPARS]; // ids of the fast coordination parameters |
---|
26 | int32_t fast_coord_count; // number of parameters coordinated with pd 1 |
---|
27 | int32_t theta_par; // id of spherical correction variable |
---|
28 | } ProblemDetails; |
---|
29 | |
---|
30 | typedef struct { |
---|
31 | PARAMETER_TABLE; |
---|
32 | } ParameterBlock; |
---|
33 | #endif |
---|
34 | |
---|
35 | |
---|
36 | kernel |
---|
37 | void KERNEL_NAME( |
---|
38 | int32_t nq, // number of q values |
---|
39 | const int32_t pd_start, // where we are in the polydispersity loop |
---|
40 | const int32_t pd_stop, // where we are stopping in the polydispersity loop |
---|
41 | global const ProblemDetails *problem, |
---|
42 | global const double *weights, |
---|
43 | global const double *values, |
---|
44 | global const double *q, // nq q values, with padding to boundary |
---|
45 | global double *result, // nq+3 return values, again with padding |
---|
46 | const double cutoff // cutoff in the polydispersity weight product |
---|
47 | ) |
---|
48 | { |
---|
49 | // Storage for the current parameter values. These will be updated as we |
---|
50 | // walk the polydispersity cube. |
---|
51 | local ParameterBlock local_values; // current parameter values |
---|
52 | double *pvec = (double *)(&local_values); // Alias named parameters with a vector |
---|
53 | |
---|
54 | local int offset[NPARS]; // NPARS excludes scale/background |
---|
55 | |
---|
56 | #if 0 // defined(USE_SHORTCUT_OPTIMIZATION) |
---|
57 | if (problem->pd_length[0] == 1) { |
---|
58 | // Shouldn't need to copy!! |
---|
59 | |
---|
60 | for (int k=0; k < NPARS; k++) { |
---|
61 | pvec[k] = values[k+2]; // skip scale and background |
---|
62 | } |
---|
63 | |
---|
64 | const double volume = CALL_VOLUME(local_values); |
---|
65 | #ifdef USE_OPENMP |
---|
66 | #pragma omp parallel for |
---|
67 | #endif |
---|
68 | for (int i=0; i < nq; i++) { |
---|
69 | const double scattering = CALL_IQ(q, i, local_values); |
---|
70 | result[i] = values[0]*scattering/volume + values[1]; |
---|
71 | } |
---|
72 | return; |
---|
73 | } |
---|
74 | printf("falling through\n"); |
---|
75 | #endif |
---|
76 | |
---|
77 | printf("Entering polydispersity\n"); |
---|
78 | // Since we are no longer looping over the entire polydispersity hypercube |
---|
79 | // for each q, we need to track the normalization values for each q in a |
---|
80 | // separate work vector. |
---|
81 | double norm; // contains sum over weights |
---|
82 | double vol; // contains sum over volume |
---|
83 | double norm_vol; // contains weights over volume |
---|
84 | |
---|
85 | // Initialize the results to zero |
---|
86 | if (pd_start == 0) { |
---|
87 | norm_vol = 0.0; |
---|
88 | norm = 0.0; |
---|
89 | vol = 0.0; |
---|
90 | |
---|
91 | #ifdef USE_OPENMP |
---|
92 | #pragma omp parallel for |
---|
93 | #endif |
---|
94 | for (int i=0; i < nq; i++) { |
---|
95 | result[i] = 0.0; |
---|
96 | } |
---|
97 | } else { |
---|
98 | //Pulling values from previous segment |
---|
99 | norm = result[nq]; |
---|
100 | vol = result[nq+1]; |
---|
101 | norm_vol = result[nq+2]; |
---|
102 | } |
---|
103 | |
---|
104 | // Location in the polydispersity hypercube, one index per dimension. |
---|
105 | local int pd_index[MAX_PD]; |
---|
106 | |
---|
107 | // Trigger the reset behaviour that happens at the end the fast loop |
---|
108 | // by setting the initial index >= weight vector length. |
---|
109 | pd_index[0] = problem->pd_length[0]; |
---|
110 | |
---|
111 | |
---|
112 | // need product of weights at every Iq calc, so keep product of |
---|
113 | // weights from the outer loops so that weight = partial_weight * fast_weight |
---|
114 | double partial_weight = NAN; // product of weight w4*w3*w2 but not w1 |
---|
115 | double partial_volweight = NAN; |
---|
116 | double weight = 1.0; // set to 1 in case there are no weights |
---|
117 | double vol_weight = 1.0; // set to 1 in case there are no vol weights |
---|
118 | double spherical_correction = 1.0; // correction for latitude variation |
---|
119 | |
---|
120 | // Loop over the weights then loop over q, accumulating values |
---|
121 | for (int loop_index=pd_start; loop_index < pd_stop; loop_index++) { |
---|
122 | // check if indices need to be updated |
---|
123 | if (pd_index[0] >= problem->pd_length[0]) { |
---|
124 | |
---|
125 | // RESET INDICES |
---|
126 | pd_index[0] = loop_index%problem->pd_length[0]; |
---|
127 | partial_weight = 1.0; |
---|
128 | partial_volweight = 1.0; |
---|
129 | for (int k=1; k < MAX_PD; k++) { |
---|
130 | pd_index[k] = (loop_index%problem->pd_length[k])/problem->pd_stride[k]; |
---|
131 | const double wi = weights[problem->pd_offset[0]+pd_index[0]]; |
---|
132 | partial_weight *= wi; |
---|
133 | if (problem->pd_isvol[k]) partial_volweight *= wi; |
---|
134 | } |
---|
135 | for (int k=0; k < NPARS; k++) { |
---|
136 | int coord = problem->par_coord[k]; |
---|
137 | int this_offset = problem->par_offset[k]; |
---|
138 | int block_size = 1; |
---|
139 | for (int bit=0; bit < MAX_PD && coord != 0; bit++) { |
---|
140 | if (coord&1) { |
---|
141 | this_offset += block_size * pd_index[bit]; |
---|
142 | block_size *= problem->pd_length[bit]; |
---|
143 | } |
---|
144 | coord /= 2; |
---|
145 | } |
---|
146 | offset[k] = this_offset; |
---|
147 | pvec[k] = values[this_offset]; |
---|
148 | } |
---|
149 | weight = partial_weight * weights[problem->pd_offset[0]+pd_index[0]]; |
---|
150 | if (problem->theta_par >= 0) { |
---|
151 | spherical_correction = fabs(cos(M_PI_180*pvec[problem->theta_par])); |
---|
152 | } |
---|
153 | if (problem->theta_par == problem->pd_par[0]) { |
---|
154 | weight *= spherical_correction; |
---|
155 | } |
---|
156 | |
---|
157 | } else { |
---|
158 | |
---|
159 | // INCREMENT INDICES |
---|
160 | pd_index[0] += 1; |
---|
161 | const double wi = weights[problem->pd_offset[0]+pd_index[0]]; |
---|
162 | weight = partial_weight*wi; |
---|
163 | if (problem->pd_isvol[0]) vol_weight *= wi; |
---|
164 | for (int k=0; k < problem->fast_coord_count; k++) { |
---|
165 | printf("fast loop %d coord %d idx %d ?%d offset %d %g\n", |
---|
166 | k,problem->fast_coord_pars[k],pd_index[0], |
---|
167 | problem->fast_coord_pars[k], |
---|
168 | offset[problem->fast_coord_pars[k]], |
---|
169 | values[offset[problem->fast_coord_pars[k]]] |
---|
170 | ); |
---|
171 | pvec[problem->fast_coord_pars[k]] |
---|
172 | = values[offset[problem->fast_coord_pars[k]]++]; |
---|
173 | |
---|
174 | } |
---|
175 | if (problem->theta_par ==problem->pd_par[0]) { |
---|
176 | weight *= fabs(cos(M_PI_180*pvec[problem->theta_par])); |
---|
177 | } |
---|
178 | } |
---|
179 | printf("rad len %f %f\n",local_values.radius, local_values.length); |
---|
180 | #ifdef INVALID |
---|
181 | if (INVALID(local_values)) continue; |
---|
182 | #endif |
---|
183 | |
---|
184 | // Accumulate I(q) |
---|
185 | // Note: weight==0 must always be excluded |
---|
186 | if (weight > cutoff) { |
---|
187 | norm += weight; |
---|
188 | vol += vol_weight * CALL_VOLUME(local_values); |
---|
189 | norm_vol += vol_weight; |
---|
190 | |
---|
191 | #ifdef USE_OPENMP |
---|
192 | #pragma omp parallel for |
---|
193 | #endif |
---|
194 | for (int i=0; i < nq; i++) { |
---|
195 | const double scattering = CALL_IQ(q, i, local_values); |
---|
196 | result[i] += weight*scattering; |
---|
197 | } |
---|
198 | } |
---|
199 | } |
---|
200 | |
---|
201 | //Makes a normalization avialable for the next round |
---|
202 | result[nq] = norm; |
---|
203 | result[nq+1] = vol; |
---|
204 | result[nq+2] = norm_vol; |
---|
205 | |
---|
206 | //End of the PD loop we can normalize |
---|
207 | if (pd_stop >= problem->pd_stride[MAX_PD-1]) { |
---|
208 | #ifdef USE_OPENMP |
---|
209 | #pragma omp parallel for |
---|
210 | #endif |
---|
211 | for (int i=0; i < nq; i++) { |
---|
212 | if (vol*norm_vol != 0.0) { |
---|
213 | result[i] *= norm_vol/vol; |
---|
214 | } |
---|
215 | result[i] = values[0]*result[i]/norm + values[1]; |
---|
216 | } |
---|
217 | } |
---|
218 | } |
---|