[a84a0ca] | 1 | # -*- coding: utf-8 -*- |
---|
| 2 | """ |
---|
| 3 | Run model unit tests. |
---|
| 4 | |
---|
| 5 | Usage:: |
---|
| 6 | |
---|
| 7 | python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ... |
---|
| 8 | |
---|
| 9 | if model1 is 'all', then all except the remaining models will be tested |
---|
| 10 | |
---|
| 11 | Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1), |
---|
| 12 | and the ER and VR are computed. The return values at these points are not |
---|
| 13 | considered. The test is only to verify that the models run to completion, |
---|
| 14 | and do not produce inf or NaN. |
---|
| 15 | |
---|
| 16 | Tests are defined with the *tests* attribute in the model.py file. *tests* |
---|
| 17 | is a list of individual tests to run, where each test consists of the |
---|
| 18 | parameter values for the test, the q-values and the expected results. For |
---|
| 19 | the effective radius test, the q-value should be 'ER'. For the VR test, |
---|
| 20 | the q-value should be 'VR'. For 1-D tests, either specify the q value or |
---|
| 21 | a list of q-values, and the corresponding I(q) value, or list of I(q) values. |
---|
| 22 | |
---|
| 23 | That is:: |
---|
| 24 | |
---|
| 25 | tests = [ |
---|
| 26 | [ {parameters}, q, I(q)], |
---|
| 27 | [ {parameters}, [q], [I(q)] ], |
---|
| 28 | [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]], |
---|
| 29 | |
---|
| 30 | [ {parameters}, (qx, qy), I(qx, Iqy)], |
---|
| 31 | [ {parameters}, [(qx1, qy1), (qx2, qy2), ...], |
---|
| 32 | [I(qx1, qy1), I(qx2, qy2), ...]], |
---|
| 33 | |
---|
| 34 | [ {parameters}, 'ER', ER(pars) ], |
---|
| 35 | [ {parameters}, 'VR', VR(pars) ], |
---|
| 36 | ... |
---|
| 37 | ] |
---|
| 38 | |
---|
| 39 | Parameters are *key:value* pairs, where key is one of the parameters of the |
---|
| 40 | model and value is the value to use for the test. Any parameters not given |
---|
| 41 | in the parameter list will take on the default parameter value. |
---|
| 42 | |
---|
| 43 | Precision defaults to 5 digits (relative). |
---|
| 44 | """ |
---|
| 45 | from __future__ import print_function |
---|
| 46 | |
---|
| 47 | import sys |
---|
| 48 | import unittest |
---|
| 49 | |
---|
[7ae2b7f] | 50 | import numpy as np # type: ignore |
---|
[a84a0ca] | 51 | |
---|
[897ca7f] | 52 | from . import core |
---|
| 53 | from .core import list_models, load_model_info, build_model |
---|
[40a87fa] | 54 | from .direct_model import call_kernel, call_ER, call_VR |
---|
[a84a0ca] | 55 | from .exception import annotate_exception |
---|
[4bfd277] | 56 | from .modelinfo import expand_pars |
---|
[a84a0ca] | 57 | |
---|
[e62a134] | 58 | try: |
---|
| 59 | from typing import List, Iterator, Callable |
---|
| 60 | except ImportError: |
---|
| 61 | pass |
---|
| 62 | else: |
---|
| 63 | from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo |
---|
[0ff62d4] | 64 | from .kernel import KernelModel |
---|
[e62a134] | 65 | |
---|
[a84a0ca] | 66 | |
---|
| 67 | def make_suite(loaders, models): |
---|
[e62a134] | 68 | # type: (List[str], List[str]) -> unittest.TestSuite |
---|
[a84a0ca] | 69 | """ |
---|
| 70 | Construct the pyunit test suite. |
---|
| 71 | |
---|
| 72 | *loaders* is the list of kernel drivers to use, which is one of |
---|
| 73 | *["dll", "opencl"]*, *["dll"]* or *["opencl"]*. For python models, |
---|
| 74 | the python driver is always used. |
---|
| 75 | |
---|
| 76 | *models* is the list of models to test, or *["all"]* to test all models. |
---|
| 77 | """ |
---|
[e62a134] | 78 | ModelTestCase = _hide_model_case_from_nose() |
---|
[a84a0ca] | 79 | suite = unittest.TestSuite() |
---|
| 80 | |
---|
| 81 | if models[0] == 'all': |
---|
| 82 | skip = models[1:] |
---|
| 83 | models = list_models() |
---|
| 84 | else: |
---|
| 85 | skip = [] |
---|
| 86 | for model_name in models: |
---|
| 87 | if model_name in skip: continue |
---|
| 88 | model_info = load_model_info(model_name) |
---|
| 89 | |
---|
| 90 | #print('------') |
---|
| 91 | #print('found tests in', model_name) |
---|
| 92 | #print('------') |
---|
| 93 | |
---|
| 94 | # if ispy then use the dll loader to call pykernel |
---|
| 95 | # don't try to call cl kernel since it will not be |
---|
| 96 | # available in some environmentes. |
---|
[6d6508e] | 97 | is_py = callable(model_info.Iq) |
---|
[a84a0ca] | 98 | |
---|
| 99 | if is_py: # kernel implemented in python |
---|
| 100 | test_name = "Model: %s, Kernel: python"%model_name |
---|
[897ca7f] | 101 | test_method_name = "test_%s_python" % model_info.id |
---|
[a84a0ca] | 102 | test = ModelTestCase(test_name, model_info, |
---|
| 103 | test_method_name, |
---|
| 104 | platform="dll", # so that |
---|
| 105 | dtype="double") |
---|
| 106 | suite.addTest(test) |
---|
| 107 | else: # kernel implemented in C |
---|
| 108 | # test using opencl if desired and available |
---|
[897ca7f] | 109 | if 'opencl' in loaders and core.HAVE_OPENCL: |
---|
[a84a0ca] | 110 | test_name = "Model: %s, Kernel: OpenCL"%model_name |
---|
[897ca7f] | 111 | test_method_name = "test_%s_opencl" % model_info.id |
---|
[a84a0ca] | 112 | # Using dtype=None so that the models that are only |
---|
| 113 | # correct for double precision are not tested using |
---|
| 114 | # single precision. The choice is determined by the |
---|
| 115 | # presence of *single=False* in the model file. |
---|
| 116 | test = ModelTestCase(test_name, model_info, |
---|
| 117 | test_method_name, |
---|
| 118 | platform="ocl", dtype=None) |
---|
| 119 | #print("defining", test_name) |
---|
| 120 | suite.addTest(test) |
---|
| 121 | |
---|
| 122 | # test using dll if desired |
---|
| 123 | if 'dll' in loaders: |
---|
| 124 | test_name = "Model: %s, Kernel: dll"%model_name |
---|
[897ca7f] | 125 | test_method_name = "test_%s_dll" % model_info.id |
---|
[a84a0ca] | 126 | test = ModelTestCase(test_name, model_info, |
---|
| 127 | test_method_name, |
---|
| 128 | platform="dll", |
---|
| 129 | dtype="double") |
---|
| 130 | suite.addTest(test) |
---|
| 131 | |
---|
| 132 | return suite |
---|
| 133 | |
---|
| 134 | |
---|
[e62a134] | 135 | def _hide_model_case_from_nose(): |
---|
| 136 | # type: () -> type |
---|
[a84a0ca] | 137 | class ModelTestCase(unittest.TestCase): |
---|
| 138 | """ |
---|
| 139 | Test suit for a particular model with a particular kernel driver. |
---|
| 140 | |
---|
| 141 | The test suite runs a simple smoke test to make sure the model |
---|
| 142 | functions, then runs the list of tests at the bottom of the model |
---|
| 143 | description file. |
---|
| 144 | """ |
---|
| 145 | def __init__(self, test_name, model_info, test_method_name, |
---|
| 146 | platform, dtype): |
---|
[e62a134] | 147 | # type: (str, ModelInfo, str, str, DType) -> None |
---|
[a84a0ca] | 148 | self.test_name = test_name |
---|
| 149 | self.info = model_info |
---|
| 150 | self.platform = platform |
---|
| 151 | self.dtype = dtype |
---|
| 152 | |
---|
[4bfd277] | 153 | setattr(self, test_method_name, self.run_all) |
---|
[a84a0ca] | 154 | unittest.TestCase.__init__(self, test_method_name) |
---|
| 155 | |
---|
[4bfd277] | 156 | def run_all(self): |
---|
[e62a134] | 157 | # type: () -> None |
---|
[40a87fa] | 158 | """ |
---|
| 159 | Run all the tests in the test suite, including smoke tests. |
---|
| 160 | """ |
---|
[a84a0ca] | 161 | smoke_tests = [ |
---|
[e78edc4] | 162 | # test validity at reasonable values |
---|
[e62a134] | 163 | ({}, 0.1, None), |
---|
| 164 | ({}, (0.1, 0.1), None), |
---|
[e78edc4] | 165 | # test validity at q = 0 |
---|
[b151003] | 166 | #({}, 0.0, None), |
---|
| 167 | #({}, (0.0, 0.0), None), |
---|
[2c74c11] | 168 | # test vector form |
---|
| 169 | ({}, [0.1]*2, [None]*2), |
---|
| 170 | ({}, [(0.1, 0.1)]*2, [None]*2), |
---|
[e78edc4] | 171 | # test that ER/VR will run if they exist |
---|
[e62a134] | 172 | ({}, 'ER', None), |
---|
| 173 | ({}, 'VR', None), |
---|
[a84a0ca] | 174 | ] |
---|
| 175 | |
---|
[6d6508e] | 176 | tests = self.info.tests |
---|
[a84a0ca] | 177 | try: |
---|
| 178 | model = build_model(self.info, dtype=self.dtype, |
---|
| 179 | platform=self.platform) |
---|
| 180 | for test in smoke_tests + tests: |
---|
[4bfd277] | 181 | self.run_one(model, test) |
---|
[a84a0ca] | 182 | |
---|
| 183 | if not tests and self.platform == "dll": |
---|
| 184 | ## Uncomment the following to make forgetting the test |
---|
| 185 | ## values an error. Only do so for the "dll" tests |
---|
| 186 | ## to reduce noise from both opencl and dll, and because |
---|
| 187 | ## python kernels use platform="dll". |
---|
| 188 | #raise Exception("No test cases provided") |
---|
| 189 | pass |
---|
| 190 | |
---|
[4d76711] | 191 | except: |
---|
| 192 | annotate_exception(self.test_name) |
---|
[a84a0ca] | 193 | raise |
---|
| 194 | |
---|
[4bfd277] | 195 | def run_one(self, model, test): |
---|
[0ff62d4] | 196 | # type: (KernelModel, TestCondition) -> None |
---|
[40a87fa] | 197 | """Run a single test case.""" |
---|
[e62a134] | 198 | user_pars, x, y = test |
---|
| 199 | pars = expand_pars(self.info.parameters, user_pars) |
---|
[a84a0ca] | 200 | |
---|
| 201 | if not isinstance(y, list): |
---|
| 202 | y = [y] |
---|
| 203 | if not isinstance(x, list): |
---|
| 204 | x = [x] |
---|
| 205 | |
---|
| 206 | self.assertEqual(len(y), len(x)) |
---|
| 207 | |
---|
| 208 | if x[0] == 'ER': |
---|
| 209 | actual = [call_ER(model.info, pars)] |
---|
| 210 | elif x[0] == 'VR': |
---|
| 211 | actual = [call_VR(model.info, pars)] |
---|
| 212 | elif isinstance(x[0], tuple): |
---|
[e62a134] | 213 | qx, qy = zip(*x) |
---|
| 214 | q_vectors = [np.array(qx), np.array(qy)] |
---|
[4d76711] | 215 | kernel = model.make_kernel(q_vectors) |
---|
[a84a0ca] | 216 | actual = call_kernel(kernel, pars) |
---|
| 217 | else: |
---|
| 218 | q_vectors = [np.array(x)] |
---|
[4d76711] | 219 | kernel = model.make_kernel(q_vectors) |
---|
[a84a0ca] | 220 | actual = call_kernel(kernel, pars) |
---|
| 221 | |
---|
[4bfd277] | 222 | self.assertTrue(len(actual) > 0) |
---|
[a84a0ca] | 223 | self.assertEqual(len(y), len(actual)) |
---|
| 224 | |
---|
| 225 | for xi, yi, actual_yi in zip(x, y, actual): |
---|
| 226 | if yi is None: |
---|
| 227 | # smoke test --- make sure it runs and produces a value |
---|
[2c74c11] | 228 | self.assertTrue(not np.isnan(actual_yi), |
---|
[a84a0ca] | 229 | 'invalid f(%s): %s' % (xi, actual_yi)) |
---|
[82923a6] | 230 | elif np.isnan(yi): |
---|
| 231 | self.assertTrue(np.isnan(actual_yi), |
---|
| 232 | 'f(%s): expected:%s; actual:%s' |
---|
| 233 | % (xi, yi, actual_yi)) |
---|
[a84a0ca] | 234 | else: |
---|
[82923a6] | 235 | # is_near does not work for infinite values, so also test |
---|
| 236 | # for exact values. Note that this will not |
---|
[40a87fa] | 237 | self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5), |
---|
[a84a0ca] | 238 | 'f(%s); expected:%s; actual:%s' |
---|
| 239 | % (xi, yi, actual_yi)) |
---|
| 240 | |
---|
| 241 | return ModelTestCase |
---|
| 242 | |
---|
| 243 | def is_near(target, actual, digits=5): |
---|
[e62a134] | 244 | # type: (float, float, int) -> bool |
---|
[a84a0ca] | 245 | """ |
---|
| 246 | Returns true if *actual* is within *digits* significant digits of *target*. |
---|
| 247 | """ |
---|
| 248 | import math |
---|
| 249 | shift = 10**math.ceil(math.log10(abs(target))) |
---|
| 250 | return abs(target-actual)/shift < 1.5*10**-digits |
---|
| 251 | |
---|
[897ca7f] | 252 | def run_one(model): |
---|
| 253 | # type: (str) -> None |
---|
[a84a0ca] | 254 | """ |
---|
[897ca7f] | 255 | Run the tests for a single model, printing the results to stdout. |
---|
| 256 | |
---|
| 257 | *model* can by a python file, which is handy for checking user defined |
---|
| 258 | plugin models. |
---|
| 259 | """ |
---|
| 260 | # Note that running main() directly did not work from within the |
---|
| 261 | # wxPython pycrust console. Instead of the results appearing in the |
---|
| 262 | # window they were printed to the underlying console. |
---|
| 263 | from unittest.runner import TextTestResult, _WritelnDecorator |
---|
| 264 | |
---|
| 265 | # Build a object to capture and print the test results |
---|
| 266 | stream = _WritelnDecorator(sys.stdout) # Add writeln() method to stream |
---|
| 267 | verbosity = 2 |
---|
| 268 | descriptions = True |
---|
| 269 | result = TextTestResult(stream, descriptions, verbosity) |
---|
| 270 | |
---|
| 271 | # Build a test suite containing just the model |
---|
| 272 | loaders = ['opencl'] |
---|
| 273 | models = [model] |
---|
| 274 | try: |
---|
| 275 | suite = make_suite(loaders, models) |
---|
| 276 | except Exception: |
---|
| 277 | import traceback |
---|
| 278 | stream.writeln(traceback.format_exc()) |
---|
| 279 | return |
---|
| 280 | |
---|
| 281 | # Run the test suite |
---|
| 282 | suite.run(result) |
---|
| 283 | |
---|
| 284 | # Print the failures and errors |
---|
| 285 | for _, tb in result.errors: |
---|
| 286 | stream.writeln(tb) |
---|
| 287 | for _, tb in result.failures: |
---|
| 288 | stream.writeln(tb) |
---|
| 289 | |
---|
| 290 | # Check if there are user defined tests. |
---|
| 291 | # Yes, it is naughty to peek into the structure of the test suite, and |
---|
| 292 | # to assume that it contains only one test. |
---|
| 293 | if not suite._tests[0].info.tests: |
---|
| 294 | stream.writeln("Note: %s has no user defined tests."%model) |
---|
| 295 | |
---|
| 296 | |
---|
| 297 | def main(*models): |
---|
| 298 | # type: (*str) -> int |
---|
| 299 | """ |
---|
| 300 | Run tests given is models. |
---|
[a84a0ca] | 301 | |
---|
| 302 | Returns 0 if success or 1 if any tests fail. |
---|
| 303 | """ |
---|
[3434240] | 304 | try: |
---|
[81cd2a2] | 305 | from xmlrunner import XMLTestRunner as TestRunner |
---|
[40a87fa] | 306 | test_args = {'output': 'logs'} |
---|
[81cd2a2] | 307 | except ImportError: |
---|
| 308 | from unittest import TextTestRunner as TestRunner |
---|
[40a87fa] | 309 | test_args = {} |
---|
[a84a0ca] | 310 | |
---|
| 311 | if models and models[0] == '-v': |
---|
| 312 | verbosity = 2 |
---|
| 313 | models = models[1:] |
---|
| 314 | else: |
---|
| 315 | verbosity = 1 |
---|
| 316 | if models and models[0] == 'opencl': |
---|
[897ca7f] | 317 | if not core.HAVE_OPENCL: |
---|
[a84a0ca] | 318 | print("opencl is not available") |
---|
| 319 | return 1 |
---|
| 320 | loaders = ['opencl'] |
---|
| 321 | models = models[1:] |
---|
| 322 | elif models and models[0] == 'dll': |
---|
| 323 | # TODO: test if compiler is available? |
---|
| 324 | loaders = ['dll'] |
---|
| 325 | models = models[1:] |
---|
| 326 | elif models and models[0] == 'opencl_and_dll': |
---|
| 327 | loaders = ['opencl', 'dll'] |
---|
| 328 | models = models[1:] |
---|
| 329 | else: |
---|
| 330 | loaders = ['opencl', 'dll'] |
---|
| 331 | if not models: |
---|
| 332 | print("""\ |
---|
| 333 | usage: |
---|
| 334 | python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ... |
---|
| 335 | |
---|
[e62a134] | 336 | If -v is included on the command line, then use verbose output. |
---|
[a84a0ca] | 337 | |
---|
| 338 | If neither opencl nor dll is specified, then models will be tested with |
---|
[e62a134] | 339 | both OpenCL and dll; the compute target is ignored for pure python models. |
---|
[a84a0ca] | 340 | |
---|
| 341 | If model1 is 'all', then all except the remaining models will be tested. |
---|
| 342 | |
---|
| 343 | """) |
---|
| 344 | |
---|
| 345 | return 1 |
---|
| 346 | |
---|
[81cd2a2] | 347 | runner = TestRunner(verbosity=verbosity, **test_args) |
---|
[a84a0ca] | 348 | result = runner.run(make_suite(loaders, models)) |
---|
| 349 | return 1 if result.failures or result.errors else 0 |
---|
| 350 | |
---|
| 351 | |
---|
| 352 | def model_tests(): |
---|
[c1a888b] | 353 | # type: () -> Iterator[Callable[[], None]] |
---|
[a84a0ca] | 354 | """ |
---|
| 355 | Test runner visible to nosetests. |
---|
| 356 | |
---|
| 357 | Run "nosetests sasmodels" on the command line to invoke it. |
---|
| 358 | """ |
---|
| 359 | tests = make_suite(['opencl', 'dll'], ['all']) |
---|
| 360 | for test_i in tests: |
---|
[4bfd277] | 361 | yield test_i.run_all |
---|
[a84a0ca] | 362 | |
---|
| 363 | |
---|
| 364 | if __name__ == "__main__": |
---|
[897ca7f] | 365 | sys.exit(main(*sys.argv[1:])) |
---|