1 | # -*- coding: utf-8 -*- |
---|
2 | """ |
---|
3 | Run model unit tests. |
---|
4 | |
---|
5 | Usage:: |
---|
6 | |
---|
7 | python -m sasmodels.model_test [opencl|cuda|dll|all] model1 model2 ... |
---|
8 | |
---|
9 | If model1 is 'all', then all except the remaining models will be tested. |
---|
10 | Subgroups are also possible, such as 'py', 'single' or '1d'. See |
---|
11 | :func:`core.list_models` for details. |
---|
12 | |
---|
13 | Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1), |
---|
14 | and Fq is called to make sure R_eff, volume and volume ratio are computed. |
---|
15 | The return values at these points are not considered. The test is only to |
---|
16 | verify that the models run to completion, and do not produce inf or NaN. |
---|
17 | |
---|
18 | Tests are defined with the *tests* attribute in the model.py file. *tests* |
---|
19 | is a list of individual tests to run, where each test consists of the |
---|
20 | parameter values for the test, the q-values and the expected results. For |
---|
21 | the effective radius test and volume ratio tests, use the extended output |
---|
22 | form, which checks each output of kernel.Fq. For 1-D tests, either specify |
---|
23 | the q value or a list of q-values, and the corresponding I(q) value, or |
---|
24 | list of I(q) values. |
---|
25 | |
---|
26 | That is:: |
---|
27 | |
---|
28 | tests = [ |
---|
29 | [ {parameters}, q, I(q)], |
---|
30 | [ {parameters}, [q], [I(q)] ], |
---|
31 | [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]], |
---|
32 | |
---|
33 | [ {parameters}, (qx, qy), I(qx, Iqy)], |
---|
34 | [ {parameters}, [(qx1, qy1), (qx2, qy2), ...], |
---|
35 | [I(qx1, qy1), I(qx2, qy2), ...]], |
---|
36 | |
---|
37 | [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ], |
---|
38 | ... |
---|
39 | ] |
---|
40 | |
---|
41 | Parameters are *key:value* pairs, where key is one of the parameters of the |
---|
42 | model and value is the value to use for the test. Any parameters not given |
---|
43 | in the parameter list will take on the default parameter value. |
---|
44 | |
---|
45 | Precision defaults to 5 digits (relative). |
---|
46 | """ |
---|
47 | from __future__ import print_function |
---|
48 | |
---|
49 | import argparse |
---|
50 | import sys |
---|
51 | import unittest |
---|
52 | import traceback |
---|
53 | |
---|
54 | try: |
---|
55 | from StringIO import StringIO |
---|
56 | except ImportError: |
---|
57 | # StringIO.StringIO renamed to io.StringIO in Python 3 |
---|
58 | # Note: io.StringIO exists in python 2, but using unicode instead of str |
---|
59 | from io import StringIO |
---|
60 | |
---|
61 | import numpy as np # type: ignore |
---|
62 | |
---|
63 | from .core import list_models, load_model_info, build_model |
---|
64 | from .direct_model import call_kernel, call_Fq |
---|
65 | from .exception import annotate_exception |
---|
66 | from .modelinfo import expand_pars |
---|
67 | from .kernelcl import use_opencl |
---|
68 | from .kernelcuda import use_cuda |
---|
69 | from . import product |
---|
70 | |
---|
71 | # pylint: disable=unused-import |
---|
72 | try: |
---|
73 | from typing import List, Iterator, Callable |
---|
74 | except ImportError: |
---|
75 | pass |
---|
76 | else: |
---|
77 | from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo |
---|
78 | from .kernel import KernelModel |
---|
79 | # pylint: enable=unused-import |
---|
80 | |
---|
81 | def make_suite(loaders, models): |
---|
82 | # type: (List[str], List[str]) -> unittest.TestSuite |
---|
83 | """ |
---|
84 | Construct the pyunit test suite. |
---|
85 | |
---|
86 | *loaders* is the list of kernel drivers to use (dll, opencl or cuda). |
---|
87 | For python model the python driver is always used. |
---|
88 | |
---|
89 | *models* is the list of models to test, or *["all"]* to test all models. |
---|
90 | """ |
---|
91 | suite = unittest.TestSuite() |
---|
92 | |
---|
93 | try: |
---|
94 | # See if the first model parses as a model group |
---|
95 | group = list_models(models[0]) |
---|
96 | skip = models[1:] |
---|
97 | models = group |
---|
98 | except Exception: |
---|
99 | skip = [] |
---|
100 | for model_name in models: |
---|
101 | if model_name not in skip: |
---|
102 | model_info = load_model_info(model_name) |
---|
103 | _add_model_to_suite(loaders, suite, model_info) |
---|
104 | |
---|
105 | return suite |
---|
106 | |
---|
107 | def _add_model_to_suite(loaders, suite, model_info): |
---|
108 | ModelTestCase = _hide_model_case_from_nose() |
---|
109 | |
---|
110 | #print('------') |
---|
111 | #print('found tests in', model_name) |
---|
112 | #print('------') |
---|
113 | |
---|
114 | # if ispy then use the dll loader to call pykernel |
---|
115 | # don't try to call cl kernel since it will not be |
---|
116 | # available in some environmentes. |
---|
117 | is_py = callable(model_info.Iq) |
---|
118 | |
---|
119 | # Some OpenCL drivers seem to be flaky, and are not producing the |
---|
120 | # expected result. Since we don't have known test values yet for |
---|
121 | # all of our models, we are instead going to compare the results |
---|
122 | # for the 'smoke test' (that is, evaluation at q=0.1 for the default |
---|
123 | # parameters just to see that the model runs to completion) between |
---|
124 | # the OpenCL and the DLL. To do this, we define a 'stash' which is |
---|
125 | # shared between OpenCL and DLL tests. This is just a list. If the |
---|
126 | # list is empty (which it will be when DLL runs, if the DLL runs |
---|
127 | # first), then the results are appended to the list. If the list |
---|
128 | # is not empty (which it will be when OpenCL runs second), the results |
---|
129 | # are compared to the results stored in the first element of the list. |
---|
130 | # This is a horrible stateful hack which only makes sense because the |
---|
131 | # test suite is thrown away after being run once. |
---|
132 | stash = [] |
---|
133 | |
---|
134 | if is_py: # kernel implemented in python |
---|
135 | test_name = "%s-python"%model_info.name |
---|
136 | test_method_name = "test_%s_python" % model_info.id |
---|
137 | test = ModelTestCase(test_name, model_info, |
---|
138 | test_method_name, |
---|
139 | platform="dll", # so that |
---|
140 | dtype="double", |
---|
141 | stash=stash) |
---|
142 | suite.addTest(test) |
---|
143 | else: # kernel implemented in C |
---|
144 | |
---|
145 | # test using dll if desired |
---|
146 | if 'dll' in loaders or not use_opencl(): |
---|
147 | test_name = "%s-dll"%model_info.name |
---|
148 | test_method_name = "test_%s_dll" % model_info.id |
---|
149 | test = ModelTestCase(test_name, model_info, |
---|
150 | test_method_name, |
---|
151 | platform="dll", |
---|
152 | dtype="double", |
---|
153 | stash=stash) |
---|
154 | suite.addTest(test) |
---|
155 | |
---|
156 | # test using opencl if desired and available |
---|
157 | if 'opencl' in loaders and use_opencl(): |
---|
158 | test_name = "%s-opencl"%model_info.name |
---|
159 | test_method_name = "test_%s_opencl" % model_info.id |
---|
160 | # Using dtype=None so that the models that are only |
---|
161 | # correct for double precision are not tested using |
---|
162 | # single precision. The choice is determined by the |
---|
163 | # presence of *single=False* in the model file. |
---|
164 | test = ModelTestCase(test_name, model_info, |
---|
165 | test_method_name, |
---|
166 | platform="ocl", dtype=None, |
---|
167 | stash=stash) |
---|
168 | #print("defining", test_name) |
---|
169 | suite.addTest(test) |
---|
170 | |
---|
171 | # test using cuda if desired and available |
---|
172 | if 'cuda' in loaders and use_cuda(): |
---|
173 | test_name = "%s-cuda" % model_info.id |
---|
174 | test_method_name = "test_%s_cuda" % model_info.id |
---|
175 | # Using dtype=None so that the models that are only |
---|
176 | # correct for double precision are not tested using |
---|
177 | # single precision. The choice is determined by the |
---|
178 | # presence of *single=False* in the model file. |
---|
179 | test = ModelTestCase(test_name, model_info, |
---|
180 | test_method_name, |
---|
181 | platform="cuda", dtype=None, |
---|
182 | stash=stash) |
---|
183 | #print("defining", test_name) |
---|
184 | suite.addTest(test) |
---|
185 | |
---|
186 | |
---|
187 | def _hide_model_case_from_nose(): |
---|
188 | # type: () -> type |
---|
189 | class ModelTestCase(unittest.TestCase): |
---|
190 | """ |
---|
191 | Test suit for a particular model with a particular kernel driver. |
---|
192 | |
---|
193 | The test suite runs a simple smoke test to make sure the model |
---|
194 | functions, then runs the list of tests at the bottom of the model |
---|
195 | description file. |
---|
196 | """ |
---|
197 | def __init__(self, test_name, model_info, test_method_name, |
---|
198 | platform, dtype, stash): |
---|
199 | # type: (str, ModelInfo, str, str, DType, List[Any]) -> None |
---|
200 | self.test_name = test_name |
---|
201 | self.info = model_info |
---|
202 | self.platform = platform |
---|
203 | self.dtype = dtype |
---|
204 | self.stash = stash # container for the results of the first run |
---|
205 | |
---|
206 | setattr(self, test_method_name, self.run_all) |
---|
207 | unittest.TestCase.__init__(self, test_method_name) |
---|
208 | |
---|
209 | def run_all(self): |
---|
210 | # type: () -> None |
---|
211 | """ |
---|
212 | Run all the tests in the test suite, including smoke tests. |
---|
213 | """ |
---|
214 | smoke_tests = [ |
---|
215 | # test validity at reasonable values |
---|
216 | ({}, 0.1, None), |
---|
217 | ({}, (0.1, 0.1), None), |
---|
218 | # test validity at q = 0 |
---|
219 | #({}, 0.0, None), |
---|
220 | #({}, (0.0, 0.0), None), |
---|
221 | # test vector form |
---|
222 | ({}, [0.001, 0.01, 0.1], [None]*3), |
---|
223 | ({}, [(0.1, 0.1)]*2, [None]*2), |
---|
224 | # test that Fq will run, and return R_eff, V, V_r |
---|
225 | ({}, 0.1, None, None, None, None, None), |
---|
226 | ] |
---|
227 | tests = smoke_tests |
---|
228 | #tests = [] |
---|
229 | if self.info.tests is not None: |
---|
230 | tests += self.info.tests |
---|
231 | S_tests = [test for test in tests if '@S' in test[0]] |
---|
232 | P_tests = [test for test in tests if '@S' not in test[0]] |
---|
233 | try: |
---|
234 | model = build_model(self.info, dtype=self.dtype, |
---|
235 | platform=self.platform) |
---|
236 | results = [self.run_one(model, test) for test in P_tests] |
---|
237 | for test in S_tests: |
---|
238 | # pull the S model name out of the test defn |
---|
239 | pars = test[0].copy() |
---|
240 | s_name = pars.pop('@S') |
---|
241 | ps_test = [pars] + list(test[1:]) |
---|
242 | #print("PS TEST PARAMS!!!",ps_test) |
---|
243 | # build the P@S model |
---|
244 | s_info = load_model_info(s_name) |
---|
245 | ps_info = product.make_product_info(self.info, s_info) |
---|
246 | ps_model = build_model(ps_info, dtype=self.dtype, |
---|
247 | platform=self.platform) |
---|
248 | # run the tests |
---|
249 | #self.info = ps_model.info |
---|
250 | #print("SELF.INFO PARAMS!!!",[p.id for p in self.info.parameters.call_parameters]) |
---|
251 | #print("PS MODEL PARAMETERS:",[p.id for p in ps_model.info.parameters.call_parameters]) |
---|
252 | results.append(self.run_one(ps_model, ps_test)) |
---|
253 | |
---|
254 | if self.stash: |
---|
255 | for test, target, actual in zip(tests, self.stash[0], results): |
---|
256 | assert np.all(abs(target-actual) < 5e-5*abs(actual)), \ |
---|
257 | ("GPU/CPU comparison expected %s but got %s for %s" |
---|
258 | % (target, actual, test[0])) |
---|
259 | else: |
---|
260 | self.stash.append(results) |
---|
261 | |
---|
262 | # Check for missing tests. Only do so for the "dll" tests |
---|
263 | # to reduce noise from both opencl and cuda, and because |
---|
264 | # python kernels use platform="dll". |
---|
265 | if self.platform == "dll": |
---|
266 | missing = [] |
---|
267 | ## Uncomment the following to require test cases |
---|
268 | #missing = self._find_missing_tests() |
---|
269 | if missing: |
---|
270 | raise ValueError("Missing tests for "+", ".join(missing)) |
---|
271 | |
---|
272 | except: |
---|
273 | annotate_exception(self.test_name) |
---|
274 | raise |
---|
275 | |
---|
276 | def _find_missing_tests(self): |
---|
277 | # type: () -> None |
---|
278 | """make sure there are 1D and 2D tests as appropriate""" |
---|
279 | model_has_1D = True |
---|
280 | model_has_2D = any(p.type == 'orientation' |
---|
281 | for p in self.info.parameters.kernel_parameters) |
---|
282 | |
---|
283 | # Lists of tests that have a result that is not None |
---|
284 | single = [test for test in self.info.tests |
---|
285 | if not isinstance(test[2], list) and test[2] is not None] |
---|
286 | tests_has_1D_single = any(isinstance(test[1], float) for test in single) |
---|
287 | tests_has_2D_single = any(isinstance(test[1], tuple) for test in single) |
---|
288 | |
---|
289 | multiple = [test for test in self.info.tests |
---|
290 | if isinstance(test[2], list) |
---|
291 | and not all(result is None for result in test[2])] |
---|
292 | tests_has_1D_multiple = any(isinstance(test[1][0], float) |
---|
293 | for test in multiple) |
---|
294 | tests_has_2D_multiple = any(isinstance(test[1][0], tuple) |
---|
295 | for test in multiple) |
---|
296 | |
---|
297 | missing = [] |
---|
298 | if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple): |
---|
299 | missing.append("1D") |
---|
300 | if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple): |
---|
301 | missing.append("2D") |
---|
302 | |
---|
303 | return missing |
---|
304 | |
---|
305 | def run_one(self, model, test): |
---|
306 | # type: (KernelModel, TestCondition) -> None |
---|
307 | """Run a single test case.""" |
---|
308 | user_pars, x, y = test[:3] |
---|
309 | #print("PS MODEL PARAMETERS:",[p.id for p in model.info.parameters.call_parameters]) |
---|
310 | pars = expand_pars(model.info.parameters, user_pars) |
---|
311 | invalid = invalid_pars(model.info.parameters, pars) |
---|
312 | if invalid: |
---|
313 | raise ValueError("Unknown parameters in test: " + ", ".join(invalid)) |
---|
314 | |
---|
315 | if not isinstance(y, list): |
---|
316 | y = [y] |
---|
317 | if not isinstance(x, list): |
---|
318 | x = [x] |
---|
319 | |
---|
320 | self.assertEqual(len(y), len(x)) |
---|
321 | |
---|
322 | if isinstance(x[0], tuple): |
---|
323 | qx, qy = zip(*x) |
---|
324 | q_vectors = [np.array(qx), np.array(qy)] |
---|
325 | else: |
---|
326 | q_vectors = [np.array(x)] |
---|
327 | |
---|
328 | kernel = model.make_kernel(q_vectors) |
---|
329 | if len(test) == 3: |
---|
330 | actual = call_kernel(kernel, pars) |
---|
331 | self._check_vectors(x, y, actual, 'I') |
---|
332 | return actual |
---|
333 | else: |
---|
334 | y1 = y |
---|
335 | y2 = test[3] if isinstance(test[3], list) else [test[3]] |
---|
336 | F, Fsq, R_eff, volume, volume_ratio = call_Fq(kernel, pars) |
---|
337 | if F is not None: # F is none for models with Iq instead of Fq |
---|
338 | self._check_vectors(x, y1, F, 'F') |
---|
339 | self._check_vectors(x, y2, Fsq, 'F^2') |
---|
340 | self._check_scalar(test[4], R_eff, 'R_eff') |
---|
341 | self._check_scalar(test[5], volume, 'volume') |
---|
342 | self._check_scalar(test[6], volume_ratio, 'form:shell ratio') |
---|
343 | return Fsq |
---|
344 | |
---|
345 | def _check_scalar(self, target, actual, name): |
---|
346 | self.assertTrue(is_near(target, actual, 5), |
---|
347 | '%s: expected:%s; actual:%s' |
---|
348 | % (name, target, actual)) |
---|
349 | |
---|
350 | def _check_vectors(self, x, target, actual, name='I'): |
---|
351 | self.assertTrue(len(actual) > 0, |
---|
352 | '%s(...) expected return'%name) |
---|
353 | if target is None: |
---|
354 | return |
---|
355 | self.assertEqual(len(target), len(actual), |
---|
356 | '%s(...) returned wrong length'%name) |
---|
357 | for xi, yi, actual_yi in zip(x, target, actual): |
---|
358 | self.assertTrue(is_near(yi, actual_yi, 5), |
---|
359 | '%s(%s): expected:%s; actual:%s' |
---|
360 | % (name, xi, target, actual)) |
---|
361 | |
---|
362 | return ModelTestCase |
---|
363 | |
---|
364 | def invalid_pars(partable, pars): |
---|
365 | # type: (ParameterTable, Dict[str, float]) |
---|
366 | """ |
---|
367 | Return a list of parameter names that are not part of the model. |
---|
368 | """ |
---|
369 | names = set(p.id for p in partable.call_parameters) |
---|
370 | invalid = [] |
---|
371 | for par in sorted(pars.keys()): |
---|
372 | # Ignore the R_eff mode parameter when checking for valid parameters. |
---|
373 | # It is an allowed parameter for a model even though it does not exist |
---|
374 | # in the parameter table. The call_Fq() function pops it from the |
---|
375 | # parameter list and sends it directly to kernel.Fq(). |
---|
376 | if par == product.RADIUS_MODE_ID: |
---|
377 | continue |
---|
378 | parts = par.split('_pd') |
---|
379 | if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"): |
---|
380 | invalid.append(par) |
---|
381 | continue |
---|
382 | if parts[0] not in names: |
---|
383 | invalid.append(par) |
---|
384 | return invalid |
---|
385 | |
---|
386 | |
---|
387 | def is_near(target, actual, digits=5): |
---|
388 | # type: (float, float, int) -> bool |
---|
389 | """ |
---|
390 | Returns true if *actual* is within *digits* significant digits of *target*. |
---|
391 | |
---|
392 | *taget* zero and inf should match *actual* zero and inf. If you want to |
---|
393 | accept eps for zero, choose a value such as 1e-10, which must match up to |
---|
394 | +/- 1e-15 when *digits* is the default value of 5. |
---|
395 | |
---|
396 | If *target* is None, then just make sure that *actual* is not NaN. |
---|
397 | |
---|
398 | If *target* is NaN, make sure *actual* is NaN. |
---|
399 | """ |
---|
400 | if target is None: |
---|
401 | # target is None => actual cannot be NaN |
---|
402 | return not np.isnan(actual) |
---|
403 | elif target == 0.: |
---|
404 | # target is 0. => actual must be 0. |
---|
405 | # Note: if small values are allowed, then use maybe test zero against eps instead? |
---|
406 | return actual == 0. |
---|
407 | elif np.isfinite(target): |
---|
408 | shift = np.ceil(np.log10(abs(target))) |
---|
409 | return abs(target-actual) < 1.5*10**(shift-digits) |
---|
410 | elif target == actual: |
---|
411 | # target is inf => actual must be inf of same sign |
---|
412 | return True |
---|
413 | else: |
---|
414 | # target is NaN => actual must be NaN |
---|
415 | return np.isnan(target) == np.isnan(actual) |
---|
416 | |
---|
417 | # CRUFT: old interface; should be deprecated and removed |
---|
418 | def run_one(model_name): |
---|
419 | # type: (str) -> str |
---|
420 | """ |
---|
421 | [Deprecated] Run the tests associated with *model_name*. |
---|
422 | |
---|
423 | Use the following instead:: |
---|
424 | |
---|
425 | succss, output = check_model(load_model_info(model_name)) |
---|
426 | """ |
---|
427 | # msg = "use check_model(model_info) rather than run_one(model_name)" |
---|
428 | # warnings.warn(msg, category=DeprecationWarning, stacklevel=2) |
---|
429 | try: |
---|
430 | model_info = load_model_info(model_name) |
---|
431 | except Exception: |
---|
432 | output = traceback.format_exc() |
---|
433 | return output |
---|
434 | |
---|
435 | _, output = check_model(model_info) |
---|
436 | return output |
---|
437 | |
---|
438 | def check_model(model_info): |
---|
439 | # type: (ModelInfo) -> Tuple[bool, str] |
---|
440 | """ |
---|
441 | Run the tests for a single model, capturing the output. |
---|
442 | |
---|
443 | Returns success status and the output string. |
---|
444 | """ |
---|
445 | # Note that running main() directly did not work from within the |
---|
446 | # wxPython pycrust console. Instead of the results appearing in the |
---|
447 | # window they were printed to the underlying console. |
---|
448 | from unittest.runner import TextTestResult, _WritelnDecorator |
---|
449 | |
---|
450 | # Build a object to capture and print the test results |
---|
451 | stream = _WritelnDecorator(StringIO()) # Add writeln() method to stream |
---|
452 | verbosity = 2 |
---|
453 | descriptions = True |
---|
454 | result = TextTestResult(stream, descriptions, verbosity) |
---|
455 | |
---|
456 | # Build a test suite containing just the model |
---|
457 | loaders = ['opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll'] |
---|
458 | suite = unittest.TestSuite() |
---|
459 | _add_model_to_suite(loaders, suite, model_info) |
---|
460 | |
---|
461 | # Warn if there are no user defined tests. |
---|
462 | # Note: the test suite constructed above only has one test in it, which |
---|
463 | # runs through some smoke tests to make sure the model runs, then runs |
---|
464 | # through the input-output pairs given in the model definition file. To |
---|
465 | # check if any such pairs are defined, therefore, we just need to check if |
---|
466 | # they are in the first test of the test suite. We do this with an |
---|
467 | # iterator since we don't have direct access to the list of tests in the |
---|
468 | # test suite. |
---|
469 | # In Qt5 suite.run() will clear all tests in the suite after running |
---|
470 | # with no way of retaining them for the test below, so let's check |
---|
471 | # for user tests before running the suite. |
---|
472 | for test in suite: |
---|
473 | if not test.info.tests: |
---|
474 | stream.writeln("Note: %s has no user defined tests."%model_info.name) |
---|
475 | break |
---|
476 | else: |
---|
477 | stream.writeln("Note: no test suite created --- this should never happen") |
---|
478 | |
---|
479 | # Run the test suite |
---|
480 | suite.run(result) |
---|
481 | |
---|
482 | # Print the failures and errors |
---|
483 | for _, tb in result.errors: |
---|
484 | stream.writeln(tb) |
---|
485 | for _, tb in result.failures: |
---|
486 | stream.writeln(tb) |
---|
487 | |
---|
488 | output = stream.getvalue() |
---|
489 | stream.close() |
---|
490 | return result.wasSuccessful(), output |
---|
491 | |
---|
492 | |
---|
493 | def model_tests(): |
---|
494 | # type: () -> Iterator[Callable[[], None]] |
---|
495 | """ |
---|
496 | Test runner visible to nosetests. |
---|
497 | |
---|
498 | Run "nosetests sasmodels" on the command line to invoke it. |
---|
499 | """ |
---|
500 | loaders = ['dll'] |
---|
501 | if use_opencl(): |
---|
502 | loaders.append('opencl') |
---|
503 | if use_cuda(): |
---|
504 | loaders.append('cuda') |
---|
505 | tests = make_suite(loaders, ['all']) |
---|
506 | def _build_test(test): |
---|
507 | # In order for nosetest to show the test name, wrap the test.run_all |
---|
508 | # instance in function that takes the test name as a parameter which |
---|
509 | # will be displayed when the test is run. Do this as a function so |
---|
510 | # that it properly captures the context for tests that captured and |
---|
511 | # run later. If done directly in the for loop, then the looping |
---|
512 | # variable test will be shared amongst all the tests, and we will be |
---|
513 | # repeatedly testing vesicle. |
---|
514 | |
---|
515 | # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options |
---|
516 | # requires that the test.description field be set. |
---|
517 | wrap = lambda: test.run_all() |
---|
518 | wrap.description = test.test_name |
---|
519 | return wrap |
---|
520 | # The following would work with nosetests and pytest: |
---|
521 | # return lambda name: test.run_all(), test.test_name |
---|
522 | |
---|
523 | for test in tests: |
---|
524 | yield _build_test(test) |
---|
525 | |
---|
526 | |
---|
527 | def main(): |
---|
528 | # type: (*str) -> int |
---|
529 | """ |
---|
530 | Run tests given is models. |
---|
531 | |
---|
532 | Returns 0 if success or 1 if any tests fail. |
---|
533 | """ |
---|
534 | try: |
---|
535 | from xmlrunner import XMLTestRunner as TestRunner |
---|
536 | test_args = {'output': 'logs'} |
---|
537 | except ImportError: |
---|
538 | from unittest import TextTestRunner as TestRunner |
---|
539 | test_args = {} |
---|
540 | |
---|
541 | parser = argparse.ArgumentParser(description="Test SasModels Models") |
---|
542 | parser.add_argument("-v", "--verbose", action="store_const", |
---|
543 | default=1, const=2, help="Use verbose output") |
---|
544 | parser.add_argument("-e", "--engine", default="all", |
---|
545 | help="Engines on which to run the test. " |
---|
546 | "Valid values are opencl, cuda, dll, and all. " |
---|
547 | "Defaults to all if no value is given") |
---|
548 | parser.add_argument("models", nargs="*", |
---|
549 | help="The names of the models to be tested. " |
---|
550 | "If the first model is 'all', then all but the listed " |
---|
551 | "models will be tested. See core.list_models() for " |
---|
552 | "names of other groups, such as 'py' or 'single'.") |
---|
553 | opts = parser.parse_args() |
---|
554 | |
---|
555 | if opts.engine == "opencl": |
---|
556 | if not use_opencl(): |
---|
557 | print("opencl is not available") |
---|
558 | return 1 |
---|
559 | loaders = ['opencl'] |
---|
560 | elif opts.engine == "dll": |
---|
561 | loaders = ["dll"] |
---|
562 | elif opts.engine == "cuda": |
---|
563 | if not use_cuda(): |
---|
564 | print("cuda is not available") |
---|
565 | return 1 |
---|
566 | loaders = ['cuda'] |
---|
567 | elif opts.engine == "all": |
---|
568 | loaders = ['dll'] |
---|
569 | if use_opencl(): |
---|
570 | loaders.append('opencl') |
---|
571 | if use_cuda(): |
---|
572 | loaders.append('cuda') |
---|
573 | else: |
---|
574 | print("unknown engine " + opts.engine) |
---|
575 | return 1 |
---|
576 | |
---|
577 | runner = TestRunner(verbosity=opts.verbose, **test_args) |
---|
578 | result = runner.run(make_suite(loaders, opts.models)) |
---|
579 | return 1 if result.failures or result.errors else 0 |
---|
580 | |
---|
581 | |
---|
582 | if __name__ == "__main__": |
---|
583 | sys.exit(main()) |
---|