1 | # -*- coding: utf-8 -*- |
---|
2 | """ |
---|
3 | Run model unit tests. |
---|
4 | |
---|
5 | Usage:: |
---|
6 | |
---|
7 | python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ... |
---|
8 | |
---|
9 | if model1 is 'all', then all except the remaining models will be tested |
---|
10 | |
---|
11 | Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1), |
---|
12 | and the ER and VR are computed. The return values at these points are not |
---|
13 | considered. The test is only to verify that the models run to completion, |
---|
14 | and do not produce inf or NaN. |
---|
15 | |
---|
16 | Tests are defined with the *tests* attribute in the model.py file. *tests* |
---|
17 | is a list of individual tests to run, where each test consists of the |
---|
18 | parameter values for the test, the q-values and the expected results. For |
---|
19 | the effective radius test, the q-value should be 'ER'. For the VR test, |
---|
20 | the q-value should be 'VR'. For 1-D tests, either specify the q value or |
---|
21 | a list of q-values, and the corresponding I(q) value, or list of I(q) values. |
---|
22 | |
---|
23 | That is:: |
---|
24 | |
---|
25 | tests = [ |
---|
26 | [ {parameters}, q, I(q)], |
---|
27 | [ {parameters}, [q], [I(q)] ], |
---|
28 | [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]], |
---|
29 | |
---|
30 | [ {parameters}, (qx, qy), I(qx, Iqy)], |
---|
31 | [ {parameters}, [(qx1, qy1), (qx2, qy2), ...], |
---|
32 | [I(qx1, qy1), I(qx2, qy2), ...]], |
---|
33 | |
---|
34 | [ {parameters}, 'ER', ER(pars) ], |
---|
35 | [ {parameters}, 'VR', VR(pars) ], |
---|
36 | ... |
---|
37 | ] |
---|
38 | |
---|
39 | Parameters are *key:value* pairs, where key is one of the parameters of the |
---|
40 | model and value is the value to use for the test. Any parameters not given |
---|
41 | in the parameter list will take on the default parameter value. |
---|
42 | |
---|
43 | Precision defaults to 5 digits (relative). |
---|
44 | """ |
---|
45 | #TODO: rename to tests so that tab completion works better for models directory |
---|
46 | |
---|
47 | from __future__ import print_function |
---|
48 | |
---|
49 | import sys |
---|
50 | import unittest |
---|
51 | |
---|
52 | import numpy as np |
---|
53 | |
---|
54 | from .core import list_models, load_model_info, build_model, HAVE_OPENCL |
---|
55 | from .details import dispersion_mesh |
---|
56 | from .direct_model import call_kernel, get_weights |
---|
57 | from .exception import annotate_exception |
---|
58 | from .modelinfo import expand_pars |
---|
59 | |
---|
60 | try: |
---|
61 | from typing import List, Iterator, Callable |
---|
62 | except ImportError: |
---|
63 | pass |
---|
64 | else: |
---|
65 | from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo |
---|
66 | from .kernelpy import PyModel, PyInput, PyKernel, DType |
---|
67 | |
---|
68 | def call_ER(model_info, pars): |
---|
69 | # type: (ModelInfo, ParameterSet) -> float |
---|
70 | """ |
---|
71 | Call the model ER function using *values*. |
---|
72 | |
---|
73 | *model_info* is either *model.info* if you have a loaded model, |
---|
74 | or *kernel.info* if you have a model kernel prepared for evaluation. |
---|
75 | """ |
---|
76 | if model_info.ER is None: |
---|
77 | return 1.0 |
---|
78 | else: |
---|
79 | value, weight = _vol_pars(model_info, pars) |
---|
80 | individual_radii = model_info.ER(*value) |
---|
81 | return np.sum(weight*individual_radii) / np.sum(weight) |
---|
82 | |
---|
83 | def call_VR(model_info, pars): |
---|
84 | # type: (ModelInfo, ParameterSet) -> float |
---|
85 | """ |
---|
86 | Call the model VR function using *pars*. |
---|
87 | |
---|
88 | *model_info* is either *model.info* if you have a loaded model, |
---|
89 | or *kernel.info* if you have a model kernel prepared for evaluation. |
---|
90 | """ |
---|
91 | if model_info.VR is None: |
---|
92 | return 1.0 |
---|
93 | else: |
---|
94 | value, weight = _vol_pars(model_info, pars) |
---|
95 | whole, part = model_info.VR(*value) |
---|
96 | return np.sum(weight*part)/np.sum(weight*whole) |
---|
97 | |
---|
98 | def _vol_pars(model_info, pars): |
---|
99 | # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray] |
---|
100 | vol_pars = [get_weights(p, pars) |
---|
101 | for p in model_info.parameters.call_parameters |
---|
102 | if p.type == 'volume'] |
---|
103 | value, weight = dispersion_mesh(model_info, vol_pars) |
---|
104 | return value, weight |
---|
105 | |
---|
106 | |
---|
107 | def make_suite(loaders, models): |
---|
108 | # type: (List[str], List[str]) -> unittest.TestSuite |
---|
109 | """ |
---|
110 | Construct the pyunit test suite. |
---|
111 | |
---|
112 | *loaders* is the list of kernel drivers to use, which is one of |
---|
113 | *["dll", "opencl"]*, *["dll"]* or *["opencl"]*. For python models, |
---|
114 | the python driver is always used. |
---|
115 | |
---|
116 | *models* is the list of models to test, or *["all"]* to test all models. |
---|
117 | """ |
---|
118 | ModelTestCase = _hide_model_case_from_nose() |
---|
119 | suite = unittest.TestSuite() |
---|
120 | |
---|
121 | if models[0] == 'all': |
---|
122 | skip = models[1:] |
---|
123 | models = list_models() |
---|
124 | else: |
---|
125 | skip = [] |
---|
126 | for model_name in models: |
---|
127 | if model_name in skip: continue |
---|
128 | model_info = load_model_info(model_name) |
---|
129 | |
---|
130 | #print('------') |
---|
131 | #print('found tests in', model_name) |
---|
132 | #print('------') |
---|
133 | |
---|
134 | # if ispy then use the dll loader to call pykernel |
---|
135 | # don't try to call cl kernel since it will not be |
---|
136 | # available in some environmentes. |
---|
137 | is_py = callable(model_info.Iq) |
---|
138 | |
---|
139 | if is_py: # kernel implemented in python |
---|
140 | test_name = "Model: %s, Kernel: python"%model_name |
---|
141 | test_method_name = "test_%s_python" % model_name |
---|
142 | test = ModelTestCase(test_name, model_info, |
---|
143 | test_method_name, |
---|
144 | platform="dll", # so that |
---|
145 | dtype="double") |
---|
146 | suite.addTest(test) |
---|
147 | else: # kernel implemented in C |
---|
148 | # test using opencl if desired and available |
---|
149 | if 'opencl' in loaders and HAVE_OPENCL: |
---|
150 | test_name = "Model: %s, Kernel: OpenCL"%model_name |
---|
151 | test_method_name = "test_%s_opencl" % model_name |
---|
152 | # Using dtype=None so that the models that are only |
---|
153 | # correct for double precision are not tested using |
---|
154 | # single precision. The choice is determined by the |
---|
155 | # presence of *single=False* in the model file. |
---|
156 | test = ModelTestCase(test_name, model_info, |
---|
157 | test_method_name, |
---|
158 | platform="ocl", dtype=None) |
---|
159 | #print("defining", test_name) |
---|
160 | suite.addTest(test) |
---|
161 | |
---|
162 | # test using dll if desired |
---|
163 | if 'dll' in loaders: |
---|
164 | test_name = "Model: %s, Kernel: dll"%model_name |
---|
165 | test_method_name = "test_%s_dll" % model_name |
---|
166 | test = ModelTestCase(test_name, model_info, |
---|
167 | test_method_name, |
---|
168 | platform="dll", |
---|
169 | dtype="double") |
---|
170 | suite.addTest(test) |
---|
171 | |
---|
172 | return suite |
---|
173 | |
---|
174 | |
---|
175 | def _hide_model_case_from_nose(): |
---|
176 | # type: () -> type |
---|
177 | class ModelTestCase(unittest.TestCase): |
---|
178 | """ |
---|
179 | Test suit for a particular model with a particular kernel driver. |
---|
180 | |
---|
181 | The test suite runs a simple smoke test to make sure the model |
---|
182 | functions, then runs the list of tests at the bottom of the model |
---|
183 | description file. |
---|
184 | """ |
---|
185 | def __init__(self, test_name, model_info, test_method_name, |
---|
186 | platform, dtype): |
---|
187 | # type: (str, ModelInfo, str, str, DType) -> None |
---|
188 | self.test_name = test_name |
---|
189 | self.info = model_info |
---|
190 | self.platform = platform |
---|
191 | self.dtype = dtype |
---|
192 | |
---|
193 | setattr(self, test_method_name, self.run_all) |
---|
194 | unittest.TestCase.__init__(self, test_method_name) |
---|
195 | |
---|
196 | def run_all(self): |
---|
197 | # type: () -> None |
---|
198 | smoke_tests = [ |
---|
199 | ({}, 0.1, None), |
---|
200 | ({}, (0.1, 0.1), None), |
---|
201 | ({}, 'ER', None), |
---|
202 | ({}, 'VR', None), |
---|
203 | ] |
---|
204 | |
---|
205 | tests = self.info.tests |
---|
206 | try: |
---|
207 | model = build_model(self.info, dtype=self.dtype, |
---|
208 | platform=self.platform) |
---|
209 | for test in smoke_tests + tests: |
---|
210 | self.run_one(model, test) |
---|
211 | |
---|
212 | if not tests and self.platform == "dll": |
---|
213 | ## Uncomment the following to make forgetting the test |
---|
214 | ## values an error. Only do so for the "dll" tests |
---|
215 | ## to reduce noise from both opencl and dll, and because |
---|
216 | ## python kernels use platform="dll". |
---|
217 | #raise Exception("No test cases provided") |
---|
218 | pass |
---|
219 | |
---|
220 | except: |
---|
221 | annotate_exception(self.test_name) |
---|
222 | raise |
---|
223 | |
---|
224 | def run_one(self, model, test): |
---|
225 | # type: (PyModel, TestCondition) -> None |
---|
226 | user_pars, x, y = test |
---|
227 | pars = expand_pars(self.info.parameters, user_pars) |
---|
228 | |
---|
229 | if not isinstance(y, list): |
---|
230 | y = [y] |
---|
231 | if not isinstance(x, list): |
---|
232 | x = [x] |
---|
233 | |
---|
234 | self.assertEqual(len(y), len(x)) |
---|
235 | |
---|
236 | if x[0] == 'ER': |
---|
237 | actual = [call_ER(model.info, pars)] |
---|
238 | elif x[0] == 'VR': |
---|
239 | actual = [call_VR(model.info, pars)] |
---|
240 | elif isinstance(x[0], tuple): |
---|
241 | qx, qy = zip(*x) |
---|
242 | q_vectors = [np.array(qx), np.array(qy)] |
---|
243 | kernel = model.make_kernel(q_vectors) |
---|
244 | actual = call_kernel(kernel, pars) |
---|
245 | else: |
---|
246 | q_vectors = [np.array(x)] |
---|
247 | kernel = model.make_kernel(q_vectors) |
---|
248 | actual = call_kernel(kernel, pars) |
---|
249 | |
---|
250 | self.assertTrue(len(actual) > 0) |
---|
251 | self.assertEqual(len(y), len(actual)) |
---|
252 | |
---|
253 | for xi, yi, actual_yi in zip(x, y, actual): |
---|
254 | if yi is None: |
---|
255 | # smoke test --- make sure it runs and produces a value |
---|
256 | self.assertTrue(np.isfinite(actual_yi), |
---|
257 | 'invalid f(%s): %s' % (xi, actual_yi)) |
---|
258 | else: |
---|
259 | self.assertTrue(is_near(yi, actual_yi, 5), |
---|
260 | 'f(%s); expected:%s; actual:%s' |
---|
261 | % (xi, yi, actual_yi)) |
---|
262 | |
---|
263 | return ModelTestCase |
---|
264 | |
---|
265 | def is_near(target, actual, digits=5): |
---|
266 | # type: (float, float, int) -> bool |
---|
267 | """ |
---|
268 | Returns true if *actual* is within *digits* significant digits of *target*. |
---|
269 | """ |
---|
270 | import math |
---|
271 | shift = 10**math.ceil(math.log10(abs(target))) |
---|
272 | return abs(target-actual)/shift < 1.5*10**-digits |
---|
273 | |
---|
274 | def main(): |
---|
275 | # type: () -> int |
---|
276 | """ |
---|
277 | Run tests given is sys.argv. |
---|
278 | |
---|
279 | Returns 0 if success or 1 if any tests fail. |
---|
280 | """ |
---|
281 | import xmlrunner |
---|
282 | |
---|
283 | models = sys.argv[1:] |
---|
284 | if models and models[0] == '-v': |
---|
285 | verbosity = 2 |
---|
286 | models = models[1:] |
---|
287 | else: |
---|
288 | verbosity = 1 |
---|
289 | if models and models[0] == 'opencl': |
---|
290 | if not HAVE_OPENCL: |
---|
291 | print("opencl is not available") |
---|
292 | return 1 |
---|
293 | loaders = ['opencl'] |
---|
294 | models = models[1:] |
---|
295 | elif models and models[0] == 'dll': |
---|
296 | # TODO: test if compiler is available? |
---|
297 | loaders = ['dll'] |
---|
298 | models = models[1:] |
---|
299 | elif models and models[0] == 'opencl_and_dll': |
---|
300 | loaders = ['opencl', 'dll'] |
---|
301 | models = models[1:] |
---|
302 | else: |
---|
303 | loaders = ['opencl', 'dll'] |
---|
304 | if not models: |
---|
305 | print("""\ |
---|
306 | usage: |
---|
307 | python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ... |
---|
308 | |
---|
309 | If -v is included on the command line, then use verbose output. |
---|
310 | |
---|
311 | If neither opencl nor dll is specified, then models will be tested with |
---|
312 | both OpenCL and dll; the compute target is ignored for pure python models. |
---|
313 | |
---|
314 | If model1 is 'all', then all except the remaining models will be tested. |
---|
315 | |
---|
316 | """) |
---|
317 | |
---|
318 | return 1 |
---|
319 | |
---|
320 | #runner = unittest.TextTestRunner() |
---|
321 | runner = xmlrunner.XMLTestRunner(output='logs', verbosity=verbosity) |
---|
322 | result = runner.run(make_suite(loaders, models)) |
---|
323 | return 1 if result.failures or result.errors else 0 |
---|
324 | |
---|
325 | |
---|
326 | def model_tests(): |
---|
327 | # type: () -> Iterator[Callable[[], None]] |
---|
328 | """ |
---|
329 | Test runner visible to nosetests. |
---|
330 | |
---|
331 | Run "nosetests sasmodels" on the command line to invoke it. |
---|
332 | """ |
---|
333 | tests = make_suite(['opencl', 'dll'], ['all']) |
---|
334 | for test_i in tests: |
---|
335 | yield test_i.run_all |
---|
336 | |
---|
337 | |
---|
338 | if __name__ == "__main__": |
---|
339 | sys.exit(main()) |
---|