1 | #!/usr/bin/env python |
---|
2 | """ |
---|
3 | Program to compare results from many random parameter sets for a given model. |
---|
4 | |
---|
5 | The result is a comma separated value (CSV) table that can be redirected |
---|
6 | from standard output into a file and loaded into a spreadsheet. |
---|
7 | |
---|
8 | The models are compared for each parameter set and if the difference is |
---|
9 | greater than expected for that precision, the parameter set is labeled |
---|
10 | as bad and written to the output, along with the random seed used to |
---|
11 | generate that parameter value. This seed can be used with :mod:`compare` |
---|
12 | to reload and display the details of the model. |
---|
13 | """ |
---|
14 | from __future__ import print_function |
---|
15 | |
---|
16 | import sys |
---|
17 | import traceback |
---|
18 | |
---|
19 | import numpy as np # type: ignore |
---|
20 | |
---|
21 | from . import core |
---|
22 | from .compare import (randomize_pars, suppress_pd, make_data, |
---|
23 | make_engine, get_pars, columnize, |
---|
24 | constrain_pars, constrain_new_to_old) |
---|
25 | |
---|
26 | MODELS = core.list_models() |
---|
27 | |
---|
28 | def calc_stats(target, value, index): |
---|
29 | """ |
---|
30 | Calculate statistics between the target value and the computed value. |
---|
31 | |
---|
32 | *target* and *value* are the vectors being compared, with the |
---|
33 | difference normalized by *target* to get relative error. Only |
---|
34 | the elements listed in *index* are used, though index may be |
---|
35 | and empty slice defined by *slice(None, None)*. |
---|
36 | |
---|
37 | Returns: |
---|
38 | |
---|
39 | *maxrel* the maximum relative difference |
---|
40 | |
---|
41 | *rel95* the relative difference with the 5% biggest differences ignored |
---|
42 | |
---|
43 | *maxabs* the maximum absolute difference for the 5% biggest differences |
---|
44 | |
---|
45 | *maxval* the maximum value for the 5% biggest differences |
---|
46 | """ |
---|
47 | resid = abs(value-target)[index] |
---|
48 | relerr = resid/target[index] |
---|
49 | sorted_rel_index = np.argsort(relerr) |
---|
50 | #p90 = int(len(relerr)*0.90) |
---|
51 | p95 = int(len(relerr)*0.95) |
---|
52 | maxrel = np.max(relerr) |
---|
53 | rel95 = relerr[sorted_rel_index[p95]] |
---|
54 | maxabs = np.max(resid[sorted_rel_index[p95:]]) |
---|
55 | maxval = np.max(value[sorted_rel_index[p95:]]) |
---|
56 | return maxrel, rel95, maxabs, maxval |
---|
57 | |
---|
58 | def print_column_headers(pars, parts): |
---|
59 | """ |
---|
60 | Generate column headers for the differences and for the parameters, |
---|
61 | and print them to standard output. |
---|
62 | """ |
---|
63 | stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|')) |
---|
64 | groups = [''] |
---|
65 | for p in parts: |
---|
66 | groups.append(p) |
---|
67 | groups.extend(['']*(len(stats)-1)) |
---|
68 | groups.append("Parameters") |
---|
69 | columns = ['Seed'] + stats*len(parts) + list(sorted(pars.keys())) |
---|
70 | print(','.join('"%s"'%c for c in groups)) |
---|
71 | print(','.join('"%s"'%c for c in columns)) |
---|
72 | |
---|
73 | # Target 'good' value for various precision levels. |
---|
74 | PRECISION = { |
---|
75 | 'fast': 1e-3, |
---|
76 | 'half': 1e-3, |
---|
77 | 'single': 5e-5, |
---|
78 | 'double': 5e-14, |
---|
79 | 'single!': 5e-5, |
---|
80 | 'double!': 5e-14, |
---|
81 | 'quad!': 5e-18, |
---|
82 | 'sasview': 5e-14, |
---|
83 | } |
---|
84 | def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5, |
---|
85 | base='sasview', comp='double'): |
---|
86 | r""" |
---|
87 | Compare the model under different calculation engines. |
---|
88 | |
---|
89 | *name* is the name of the model. |
---|
90 | |
---|
91 | *data* is the data object giving $q, \Delta q$ calculation points. |
---|
92 | |
---|
93 | *index* is the active set of points. |
---|
94 | |
---|
95 | *N* is the number of comparisons to make. |
---|
96 | |
---|
97 | *cutoff* is the polydispersity weight cutoff to make the calculation |
---|
98 | a little bit faster. |
---|
99 | |
---|
100 | *base* and *comp* are the names of the calculation engines to compare. |
---|
101 | """ |
---|
102 | |
---|
103 | is_2d = hasattr(data, 'qx_data') |
---|
104 | model_info = core.load_model_info(name) |
---|
105 | pars = get_pars(model_info, use_demo=True) |
---|
106 | header = ('\n"Model","%s","Count","%d","Dimension","%s"' |
---|
107 | % (name, N, "2D" if is_2d else "1D")) |
---|
108 | if not mono: |
---|
109 | header += ',"Cutoff",%g'%(cutoff,) |
---|
110 | print(header) |
---|
111 | |
---|
112 | if is_2d: |
---|
113 | if not model_info.parameters.has_2d: |
---|
114 | print(',"1-D only"') |
---|
115 | return |
---|
116 | |
---|
117 | # Some not very clean macros for evaluating the models and checking the |
---|
118 | # results. They freely use variables from the current scope, even some |
---|
119 | # which have not been defined yet, complete with abuse of mutable lists |
---|
120 | # to allow them to update values in the current scope since nonlocal |
---|
121 | # declarations are not available in python 2.7. |
---|
122 | def try_model(fn, pars): |
---|
123 | """ |
---|
124 | Return the model evaluated at *pars*. If there is an exception, |
---|
125 | print it and return NaN of the right shape. |
---|
126 | """ |
---|
127 | try: |
---|
128 | result = fn(**pars) |
---|
129 | except Exception: |
---|
130 | traceback.print_exc() |
---|
131 | print("when comparing %s for %d"%(name, seed)) |
---|
132 | if hasattr(data, 'qx_data'): |
---|
133 | result = np.NaN*data.data |
---|
134 | else: |
---|
135 | result = np.NaN*data.x |
---|
136 | return result |
---|
137 | def check_model(pars): |
---|
138 | """ |
---|
139 | Run the two calculators against *pars*, returning statistics |
---|
140 | on the differences. See :func:`calc_stats` for the list of stats. |
---|
141 | """ |
---|
142 | base_value = try_model(calc_base, pars) |
---|
143 | comp_value = try_model(calc_comp, pars) |
---|
144 | stats = calc_stats(base_value, comp_value, index) |
---|
145 | max_diff[0] = max(max_diff[0], stats[0]) |
---|
146 | good[0] = good[0] and (stats[0] < expected) |
---|
147 | return list(stats) |
---|
148 | |
---|
149 | |
---|
150 | try: |
---|
151 | calc_base = make_engine(model_info, data, base, cutoff) |
---|
152 | calc_comp = make_engine(model_info, data, comp, cutoff) |
---|
153 | except Exception as exc: |
---|
154 | #raise |
---|
155 | print('"Error: %s"'%str(exc).replace('"', "'")) |
---|
156 | print('"good","%d of %d","max diff",%g' % (0, N, np.NaN)) |
---|
157 | return |
---|
158 | expected = max(PRECISION[base], PRECISION[comp]) |
---|
159 | |
---|
160 | num_good = 0 |
---|
161 | first = True |
---|
162 | max_diff = [0] |
---|
163 | for k in range(N): |
---|
164 | print("Model %s %d"%(name, k+1), file=sys.stderr) |
---|
165 | seed = np.random.randint(1e6) |
---|
166 | pars_i = randomize_pars(model_info, pars, seed) |
---|
167 | constrain_pars(model_info, pars_i) |
---|
168 | if 'sasview' in (base, comp): |
---|
169 | constrain_new_to_old(model_info, pars_i) |
---|
170 | if mono: |
---|
171 | pars_i = suppress_pd(pars_i) |
---|
172 | |
---|
173 | good = [True] |
---|
174 | columns = check_model(pars_i) |
---|
175 | columns += [v for _, v in sorted(pars_i.items())] |
---|
176 | if first: |
---|
177 | labels = [" vs. ".join((calc_base.engine, calc_comp.engine))] |
---|
178 | print_column_headers(pars_i, labels) |
---|
179 | first = False |
---|
180 | if good[0]: |
---|
181 | num_good += 1 |
---|
182 | else: |
---|
183 | print(("%d,"%seed)+','.join("%s"%v for v in columns)) |
---|
184 | print('"good","%d of %d","max diff",%g'%(num_good, N, max_diff[0])) |
---|
185 | |
---|
186 | |
---|
187 | def print_usage(): |
---|
188 | """ |
---|
189 | Print the command usage string. |
---|
190 | """ |
---|
191 | print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)") |
---|
192 | |
---|
193 | |
---|
194 | def print_models(): |
---|
195 | """ |
---|
196 | Print the list of available models in columns. |
---|
197 | """ |
---|
198 | print(columnize(MODELS, indent=" ")) |
---|
199 | |
---|
200 | |
---|
201 | def print_help(): |
---|
202 | """ |
---|
203 | Print usage string, the option description and the list of available models. |
---|
204 | """ |
---|
205 | print_usage() |
---|
206 | print("""\ |
---|
207 | |
---|
208 | MODEL is the model name of the model or one of the model types listed in |
---|
209 | sasmodels.core.list_models (all, py, c, double, single, opencl, 1d, 2d, |
---|
210 | nonmagnetic, magnetic). Model types can be combined, such as 2d+single. |
---|
211 | |
---|
212 | COUNT is the number of randomly generated parameter sets to try. A value |
---|
213 | of "10000" is a reasonable check for monodisperse models, or "100" for |
---|
214 | polydisperse models. For a quick check, use "100" and "5" respectively. |
---|
215 | |
---|
216 | NQ is the number of Q values to calculate. If it starts with "1d", then |
---|
217 | it is a 1-dimensional problem, with log spaced Q points from 1e-3 to 1.0. |
---|
218 | If it starts with "2d" then it is a 2-dimensional problem, with linearly |
---|
219 | spaced points Q points from -1.0 to 1.0 in each dimension. The usual |
---|
220 | values are "1d100" for 1-D and "2d32" for 2-D. |
---|
221 | |
---|
222 | CUTOFF is the cutoff value to use for the polydisperse distribution. Weights |
---|
223 | below the cutoff will be ignored. Use "mono" for monodisperse models. The |
---|
224 | choice of polydisperse parameters, and the number of points in the distribution |
---|
225 | is set in compare.py defaults for each model. |
---|
226 | |
---|
227 | PRECISION is the floating point precision to use for comparisons. If two |
---|
228 | precisions are given, then compare one to the other, ignoring sasview. |
---|
229 | |
---|
230 | Available models: |
---|
231 | """) |
---|
232 | print_models() |
---|
233 | |
---|
234 | def main(argv): |
---|
235 | """ |
---|
236 | Main program. |
---|
237 | """ |
---|
238 | if len(argv) not in (5, 6): |
---|
239 | print_help() |
---|
240 | return |
---|
241 | |
---|
242 | target = argv[0] |
---|
243 | try: |
---|
244 | model_list = [target] if target in MODELS else core.list_models(target) |
---|
245 | except ValueError: |
---|
246 | print('Bad model %s. Use model type or one of:'%model) |
---|
247 | print_models() |
---|
248 | print('model types: all, py, c, double, single, opencl, 1d, 2d, nonmagnetic, magnetic') |
---|
249 | return |
---|
250 | try: |
---|
251 | count = int(argv[1]) |
---|
252 | is2D = argv[2].startswith('2d') |
---|
253 | assert argv[2][1] == 'd' |
---|
254 | Nq = int(argv[2][2:]) |
---|
255 | mono = argv[3] == 'mono' |
---|
256 | cutoff = float(argv[3]) if not mono else 0 |
---|
257 | base = argv[4] |
---|
258 | comp = argv[5] if len(argv) > 5 else "sasview" |
---|
259 | except Exception: |
---|
260 | traceback.print_exc() |
---|
261 | print_usage() |
---|
262 | return |
---|
263 | |
---|
264 | data, index = make_data({'qmax':1.0, 'is2d':is2D, 'nq':Nq, 'res':0., |
---|
265 | 'accuracy': 'Low', 'view':'log', 'zero': False}) |
---|
266 | for model in model_list: |
---|
267 | compare_instance(model, data, index, N=count, mono=mono, |
---|
268 | cutoff=cutoff, base=base, comp=comp) |
---|
269 | |
---|
270 | if __name__ == "__main__": |
---|
271 | #from .compare import push_seed |
---|
272 | #with push_seed(1): main(sys.argv[1:]) |
---|
273 | main(sys.argv[1:]) |
---|