1 | # -*- coding: utf-8 -*- |
---|
2 | """ |
---|
3 | Run model unit tests. |
---|
4 | |
---|
5 | Usage:: |
---|
6 | |
---|
7 | python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ... |
---|
8 | |
---|
9 | if model1 is 'all', then all except the remaining models will be tested |
---|
10 | |
---|
11 | Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1), |
---|
12 | and the ER and VR are computed. The return values at these points are not |
---|
13 | considered. The test is only to verify that the models run to completion, |
---|
14 | and do not produce inf or NaN. |
---|
15 | |
---|
16 | Tests are defined with the *tests* attribute in the model.py file. *tests* |
---|
17 | is a list of individual tests to run, where each test consists of the |
---|
18 | parameter values for the test, the q-values and the expected results. For |
---|
19 | the effective radius test, the q-value should be 'ER'. For the VR test, |
---|
20 | the q-value should be 'VR'. For 1-D tests, either specify the q value or |
---|
21 | a list of q-values, and the corresponding I(q) value, or list of I(q) values. |
---|
22 | |
---|
23 | That is:: |
---|
24 | |
---|
25 | tests = [ |
---|
26 | [ {parameters}, q, I(q)], |
---|
27 | [ {parameters}, [q], [I(q)] ], |
---|
28 | [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]], |
---|
29 | |
---|
30 | [ {parameters}, (qx, qy), I(qx, Iqy)], |
---|
31 | [ {parameters}, [(qx1, qy1), (qx2, qy2), ...], |
---|
32 | [I(qx1, qy1), I(qx2, qy2), ...]], |
---|
33 | |
---|
34 | [ {parameters}, 'ER', ER(pars) ], |
---|
35 | [ {parameters}, 'VR', VR(pars) ], |
---|
36 | ... |
---|
37 | ] |
---|
38 | |
---|
39 | Parameters are *key:value* pairs, where key is one of the parameters of the |
---|
40 | model and value is the value to use for the test. Any parameters not given |
---|
41 | in the parameter list will take on the default parameter value. |
---|
42 | |
---|
43 | Precision defaults to 5 digits (relative). |
---|
44 | """ |
---|
45 | from __future__ import print_function |
---|
46 | |
---|
47 | import sys |
---|
48 | import unittest |
---|
49 | |
---|
50 | import numpy as np # type: ignore |
---|
51 | |
---|
52 | from .core import list_models, load_model_info, build_model, HAVE_OPENCL |
---|
53 | from .details import dispersion_mesh |
---|
54 | from .direct_model import call_kernel, get_weights |
---|
55 | from .exception import annotate_exception |
---|
56 | from .modelinfo import expand_pars |
---|
57 | |
---|
58 | try: |
---|
59 | from typing import List, Iterator, Callable |
---|
60 | except ImportError: |
---|
61 | pass |
---|
62 | else: |
---|
63 | from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo |
---|
64 | from .kernel import KernelModel |
---|
65 | |
---|
66 | def call_ER(model_info, pars): |
---|
67 | # type: (ModelInfo, ParameterSet) -> float |
---|
68 | """ |
---|
69 | Call the model ER function using *values*. |
---|
70 | |
---|
71 | *model_info* is either *model.info* if you have a loaded model, |
---|
72 | or *kernel.info* if you have a model kernel prepared for evaluation. |
---|
73 | """ |
---|
74 | if model_info.ER is None: |
---|
75 | return 1.0 |
---|
76 | else: |
---|
77 | value, weight = _vol_pars(model_info, pars) |
---|
78 | individual_radii = model_info.ER(*value) |
---|
79 | return np.sum(weight*individual_radii) / np.sum(weight) |
---|
80 | |
---|
81 | def call_VR(model_info, pars): |
---|
82 | # type: (ModelInfo, ParameterSet) -> float |
---|
83 | """ |
---|
84 | Call the model VR function using *pars*. |
---|
85 | |
---|
86 | *model_info* is either *model.info* if you have a loaded model, |
---|
87 | or *kernel.info* if you have a model kernel prepared for evaluation. |
---|
88 | """ |
---|
89 | if model_info.VR is None: |
---|
90 | return 1.0 |
---|
91 | else: |
---|
92 | value, weight = _vol_pars(model_info, pars) |
---|
93 | whole, part = model_info.VR(*value) |
---|
94 | return np.sum(weight*part)/np.sum(weight*whole) |
---|
95 | |
---|
96 | def _vol_pars(model_info, pars): |
---|
97 | # type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray] |
---|
98 | vol_pars = [get_weights(p, pars) |
---|
99 | for p in model_info.parameters.call_parameters |
---|
100 | if p.type == 'volume'] |
---|
101 | value, weight = dispersion_mesh(model_info, vol_pars) |
---|
102 | return value, weight |
---|
103 | |
---|
104 | |
---|
105 | def make_suite(loaders, models): |
---|
106 | # type: (List[str], List[str]) -> unittest.TestSuite |
---|
107 | """ |
---|
108 | Construct the pyunit test suite. |
---|
109 | |
---|
110 | *loaders* is the list of kernel drivers to use, which is one of |
---|
111 | *["dll", "opencl"]*, *["dll"]* or *["opencl"]*. For python models, |
---|
112 | the python driver is always used. |
---|
113 | |
---|
114 | *models* is the list of models to test, or *["all"]* to test all models. |
---|
115 | """ |
---|
116 | ModelTestCase = _hide_model_case_from_nose() |
---|
117 | suite = unittest.TestSuite() |
---|
118 | |
---|
119 | if models[0] == 'all': |
---|
120 | skip = models[1:] |
---|
121 | models = list_models() |
---|
122 | else: |
---|
123 | skip = [] |
---|
124 | for model_name in models: |
---|
125 | if model_name in skip: continue |
---|
126 | model_info = load_model_info(model_name) |
---|
127 | |
---|
128 | #print('------') |
---|
129 | #print('found tests in', model_name) |
---|
130 | #print('------') |
---|
131 | |
---|
132 | # if ispy then use the dll loader to call pykernel |
---|
133 | # don't try to call cl kernel since it will not be |
---|
134 | # available in some environmentes. |
---|
135 | is_py = callable(model_info.Iq) |
---|
136 | |
---|
137 | if is_py: # kernel implemented in python |
---|
138 | test_name = "Model: %s, Kernel: python"%model_name |
---|
139 | test_method_name = "test_%s_python" % model_name |
---|
140 | test = ModelTestCase(test_name, model_info, |
---|
141 | test_method_name, |
---|
142 | platform="dll", # so that |
---|
143 | dtype="double") |
---|
144 | suite.addTest(test) |
---|
145 | else: # kernel implemented in C |
---|
146 | # test using opencl if desired and available |
---|
147 | if 'opencl' in loaders and HAVE_OPENCL: |
---|
148 | test_name = "Model: %s, Kernel: OpenCL"%model_name |
---|
149 | test_method_name = "test_%s_opencl" % model_name |
---|
150 | # Using dtype=None so that the models that are only |
---|
151 | # correct for double precision are not tested using |
---|
152 | # single precision. The choice is determined by the |
---|
153 | # presence of *single=False* in the model file. |
---|
154 | test = ModelTestCase(test_name, model_info, |
---|
155 | test_method_name, |
---|
156 | platform="ocl", dtype=None) |
---|
157 | #print("defining", test_name) |
---|
158 | suite.addTest(test) |
---|
159 | |
---|
160 | # test using dll if desired |
---|
161 | if 'dll' in loaders: |
---|
162 | test_name = "Model: %s, Kernel: dll"%model_name |
---|
163 | test_method_name = "test_%s_dll" % model_name |
---|
164 | test = ModelTestCase(test_name, model_info, |
---|
165 | test_method_name, |
---|
166 | platform="dll", |
---|
167 | dtype="double") |
---|
168 | suite.addTest(test) |
---|
169 | |
---|
170 | return suite |
---|
171 | |
---|
172 | |
---|
173 | def _hide_model_case_from_nose(): |
---|
174 | # type: () -> type |
---|
175 | class ModelTestCase(unittest.TestCase): |
---|
176 | """ |
---|
177 | Test suit for a particular model with a particular kernel driver. |
---|
178 | |
---|
179 | The test suite runs a simple smoke test to make sure the model |
---|
180 | functions, then runs the list of tests at the bottom of the model |
---|
181 | description file. |
---|
182 | """ |
---|
183 | def __init__(self, test_name, model_info, test_method_name, |
---|
184 | platform, dtype): |
---|
185 | # type: (str, ModelInfo, str, str, DType) -> None |
---|
186 | self.test_name = test_name |
---|
187 | self.info = model_info |
---|
188 | self.platform = platform |
---|
189 | self.dtype = dtype |
---|
190 | |
---|
191 | setattr(self, test_method_name, self.run_all) |
---|
192 | unittest.TestCase.__init__(self, test_method_name) |
---|
193 | |
---|
194 | def run_all(self): |
---|
195 | # type: () -> None |
---|
196 | smoke_tests = [ |
---|
197 | # test validity at reasonable values |
---|
198 | ({}, 0.1, None), |
---|
199 | ({}, (0.1, 0.1), None), |
---|
200 | # test validity at q = 0 |
---|
201 | #({}, 0.0, None), |
---|
202 | #({}, (0.0, 0.0), None), |
---|
203 | # test vector form |
---|
204 | ({}, [0.1]*2, [None]*2), |
---|
205 | ({}, [(0.1, 0.1)]*2, [None]*2), |
---|
206 | # test that ER/VR will run if they exist |
---|
207 | ({}, 'ER', None), |
---|
208 | ({}, 'VR', None), |
---|
209 | ] |
---|
210 | |
---|
211 | tests = self.info.tests |
---|
212 | try: |
---|
213 | model = build_model(self.info, dtype=self.dtype, |
---|
214 | platform=self.platform) |
---|
215 | for test in smoke_tests + tests: |
---|
216 | self.run_one(model, test) |
---|
217 | |
---|
218 | if not tests and self.platform == "dll": |
---|
219 | ## Uncomment the following to make forgetting the test |
---|
220 | ## values an error. Only do so for the "dll" tests |
---|
221 | ## to reduce noise from both opencl and dll, and because |
---|
222 | ## python kernels use platform="dll". |
---|
223 | #raise Exception("No test cases provided") |
---|
224 | pass |
---|
225 | |
---|
226 | except: |
---|
227 | annotate_exception(self.test_name) |
---|
228 | raise |
---|
229 | |
---|
230 | def run_one(self, model, test): |
---|
231 | # type: (KernelModel, TestCondition) -> None |
---|
232 | user_pars, x, y = test |
---|
233 | pars = expand_pars(self.info.parameters, user_pars) |
---|
234 | |
---|
235 | if not isinstance(y, list): |
---|
236 | y = [y] |
---|
237 | if not isinstance(x, list): |
---|
238 | x = [x] |
---|
239 | |
---|
240 | self.assertEqual(len(y), len(x)) |
---|
241 | |
---|
242 | if x[0] == 'ER': |
---|
243 | actual = [call_ER(model.info, pars)] |
---|
244 | elif x[0] == 'VR': |
---|
245 | actual = [call_VR(model.info, pars)] |
---|
246 | elif isinstance(x[0], tuple): |
---|
247 | qx, qy = zip(*x) |
---|
248 | q_vectors = [np.array(qx), np.array(qy)] |
---|
249 | kernel = model.make_kernel(q_vectors) |
---|
250 | actual = call_kernel(kernel, pars) |
---|
251 | else: |
---|
252 | q_vectors = [np.array(x)] |
---|
253 | kernel = model.make_kernel(q_vectors) |
---|
254 | actual = call_kernel(kernel, pars) |
---|
255 | |
---|
256 | self.assertTrue(len(actual) > 0) |
---|
257 | self.assertEqual(len(y), len(actual)) |
---|
258 | |
---|
259 | for xi, yi, actual_yi in zip(x, y, actual): |
---|
260 | if yi is None: |
---|
261 | # smoke test --- make sure it runs and produces a value |
---|
262 | self.assertTrue(not np.isnan(actual_yi), |
---|
263 | 'invalid f(%s): %s' % (xi, actual_yi)) |
---|
264 | elif np.isnan(yi): |
---|
265 | self.assertTrue(np.isnan(actual_yi), |
---|
266 | 'f(%s): expected:%s; actual:%s' |
---|
267 | % (xi, yi, actual_yi)) |
---|
268 | else: |
---|
269 | # is_near does not work for infinite values, so also test |
---|
270 | # for exact values. Note that this will not |
---|
271 | self.assertTrue(yi==actual_yi or is_near(yi, actual_yi, 5), |
---|
272 | 'f(%s); expected:%s; actual:%s' |
---|
273 | % (xi, yi, actual_yi)) |
---|
274 | |
---|
275 | return ModelTestCase |
---|
276 | |
---|
277 | def is_near(target, actual, digits=5): |
---|
278 | # type: (float, float, int) -> bool |
---|
279 | """ |
---|
280 | Returns true if *actual* is within *digits* significant digits of *target*. |
---|
281 | """ |
---|
282 | import math |
---|
283 | shift = 10**math.ceil(math.log10(abs(target))) |
---|
284 | return abs(target-actual)/shift < 1.5*10**-digits |
---|
285 | |
---|
286 | def main(): |
---|
287 | # type: () -> int |
---|
288 | """ |
---|
289 | Run tests given is sys.argv. |
---|
290 | |
---|
291 | Returns 0 if success or 1 if any tests fail. |
---|
292 | """ |
---|
293 | try: |
---|
294 | from xmlrunner import XMLTestRunner as TestRunner |
---|
295 | test_args = { 'output': 'logs' } |
---|
296 | except ImportError: |
---|
297 | from unittest import TextTestRunner as TestRunner |
---|
298 | test_args = { } |
---|
299 | |
---|
300 | models = sys.argv[1:] |
---|
301 | if models and models[0] == '-v': |
---|
302 | verbosity = 2 |
---|
303 | models = models[1:] |
---|
304 | else: |
---|
305 | verbosity = 1 |
---|
306 | if models and models[0] == 'opencl': |
---|
307 | if not HAVE_OPENCL: |
---|
308 | print("opencl is not available") |
---|
309 | return 1 |
---|
310 | loaders = ['opencl'] |
---|
311 | models = models[1:] |
---|
312 | elif models and models[0] == 'dll': |
---|
313 | # TODO: test if compiler is available? |
---|
314 | loaders = ['dll'] |
---|
315 | models = models[1:] |
---|
316 | elif models and models[0] == 'opencl_and_dll': |
---|
317 | loaders = ['opencl', 'dll'] |
---|
318 | models = models[1:] |
---|
319 | else: |
---|
320 | loaders = ['opencl', 'dll'] |
---|
321 | if not models: |
---|
322 | print("""\ |
---|
323 | usage: |
---|
324 | python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ... |
---|
325 | |
---|
326 | If -v is included on the command line, then use verbose output. |
---|
327 | |
---|
328 | If neither opencl nor dll is specified, then models will be tested with |
---|
329 | both OpenCL and dll; the compute target is ignored for pure python models. |
---|
330 | |
---|
331 | If model1 is 'all', then all except the remaining models will be tested. |
---|
332 | |
---|
333 | """) |
---|
334 | |
---|
335 | return 1 |
---|
336 | |
---|
337 | runner = TestRunner(verbosity=verbosity, **test_args) |
---|
338 | result = runner.run(make_suite(loaders, models)) |
---|
339 | return 1 if result.failures or result.errors else 0 |
---|
340 | |
---|
341 | |
---|
342 | def model_tests(): |
---|
343 | # type: () -> Iterator[Callable[[], None]] |
---|
344 | """ |
---|
345 | Test runner visible to nosetests. |
---|
346 | |
---|
347 | Run "nosetests sasmodels" on the command line to invoke it. |
---|
348 | """ |
---|
349 | tests = make_suite(['opencl', 'dll'], ['all']) |
---|
350 | for test_i in tests: |
---|
351 | yield test_i.run_all |
---|
352 | |
---|
353 | |
---|
354 | if __name__ == "__main__": |
---|
355 | sys.exit(main()) |
---|