source: sasmodels/sasmodels/model_test.py @ b3af1c2

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since b3af1c2 was f354e46, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

yield tests: need function scope to capture context correctly

  • Property mode set to 100644
File size: 18.4 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50try:
51    from StringIO import StringIO
52except ImportError:
53    # StringIO.StringIO renamed to io.StringIO in Python 3
54    # Note: io.StringIO exists in python 2, but using unicode instead of str
55    from io import StringIO
56
57import numpy as np  # type: ignore
58
59from . import core
60from .core import list_models, load_model_info, build_model
61from .direct_model import call_kernel, call_ER, call_VR
62from .exception import annotate_exception
63from .modelinfo import expand_pars
64
65# pylint: disable=unused-import
66try:
67    from typing import List, Iterator, Callable
68except ImportError:
69    pass
70else:
71    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
72    from .kernel import KernelModel
73# pylint: enable=unused-import
74
75
76def make_suite(loaders, models):
77    # type: (List[str], List[str]) -> unittest.TestSuite
78    """
79    Construct the pyunit test suite.
80
81    *loaders* is the list of kernel drivers to use, which is one of
82    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
83    the python driver is always used.
84
85    *models* is the list of models to test, or *["all"]* to test all models.
86    """
87    ModelTestCase = _hide_model_case_from_nose()
88    suite = unittest.TestSuite()
89
90    if models[0] in core.KINDS:
91        skip = models[1:]
92        models = list_models(models[0])
93    else:
94        skip = []
95    for model_name in models:
96        if model_name in skip:
97            continue
98        model_info = load_model_info(model_name)
99
100        #print('------')
101        #print('found tests in', model_name)
102        #print('------')
103
104        # if ispy then use the dll loader to call pykernel
105        # don't try to call cl kernel since it will not be
106        # available in some environmentes.
107        is_py = callable(model_info.Iq)
108
109        # Some OpenCL drivers seem to be flaky, and are not producing the
110        # expected result.  Since we don't have known test values yet for
111        # all of our models, we are instead going to compare the results
112        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
113        # parameters just to see that the model runs to completion) between
114        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
115        # shared between OpenCL and DLL tests.  This is just a list.  If the
116        # list is empty (which it will be when DLL runs, if the DLL runs
117        # first), then the results are appended to the list.  If the list
118        # is not empty (which it will be when OpenCL runs second), the results
119        # are compared to the results stored in the first element of the list.
120        # This is a horrible stateful hack which only makes sense because the
121        # test suite is thrown away after being run once.
122        stash = []
123
124        if is_py:  # kernel implemented in python
125            test_name = "%s-python"%model_name
126            test_method_name = "test_%s_python" % model_info.id
127            test = ModelTestCase(test_name, model_info,
128                                 test_method_name,
129                                 platform="dll",  # so that
130                                 dtype="double",
131                                 stash=stash)
132            suite.addTest(test)
133        else:   # kernel implemented in C
134
135            # test using dll if desired
136            if 'dll' in loaders or not core.HAVE_OPENCL:
137                test_name = "%s-dll"%model_name
138                test_method_name = "test_%s_dll" % model_info.id
139                test = ModelTestCase(test_name, model_info,
140                                     test_method_name,
141                                     platform="dll",
142                                     dtype="double",
143                                     stash=stash)
144                suite.addTest(test)
145
146            # test using opencl if desired and available
147            if 'opencl' in loaders and core.HAVE_OPENCL:
148                test_name = "%s-opencl"%model_name
149                test_method_name = "test_%s_opencl" % model_info.id
150                # Using dtype=None so that the models that are only
151                # correct for double precision are not tested using
152                # single precision.  The choice is determined by the
153                # presence of *single=False* in the model file.
154                test = ModelTestCase(test_name, model_info,
155                                     test_method_name,
156                                     platform="ocl", dtype=None,
157                                     stash=stash)
158                #print("defining", test_name)
159                suite.addTest(test)
160
161    return suite
162
163def _hide_model_case_from_nose():
164    # type: () -> type
165    class ModelTestCase(unittest.TestCase):
166        """
167        Test suit for a particular model with a particular kernel driver.
168
169        The test suite runs a simple smoke test to make sure the model
170        functions, then runs the list of tests at the bottom of the model
171        description file.
172        """
173        def __init__(self, test_name, model_info, test_method_name,
174                     platform, dtype, stash):
175            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
176            self.test_name = test_name
177            self.info = model_info
178            self.platform = platform
179            self.dtype = dtype
180            self.stash = stash  # container for the results of the first run
181
182            setattr(self, test_method_name, self.run_all)
183            unittest.TestCase.__init__(self, test_method_name)
184
185        def run_all(self):
186            # type: () -> None
187            """
188            Run all the tests in the test suite, including smoke tests.
189            """
190            smoke_tests = [
191                # test validity at reasonable values
192                ({}, 0.1, None),
193                ({}, (0.1, 0.1), None),
194                # test validity at q = 0
195                #({}, 0.0, None),
196                #({}, (0.0, 0.0), None),
197                # test vector form
198                ({}, [0.001, 0.01, 0.1], [None]*3),
199                ({}, [(0.1, 0.1)]*2, [None]*2),
200                # test that ER/VR will run if they exist
201                ({}, 'ER', None),
202                ({}, 'VR', None),
203                ]
204            tests = smoke_tests
205            #tests = []
206            if self.info.tests is not None:
207                tests += self.info.tests
208            try:
209                model = build_model(self.info, dtype=self.dtype,
210                                    platform=self.platform)
211                results = [self.run_one(model, test) for test in tests]
212                if self.stash:
213                    for test, target, actual in zip(tests, self.stash[0], results):
214                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
215                            ("GPU/CPU comparison expected %s but got %s for %s"
216                             % (target, actual, test[0]))
217                else:
218                    self.stash.append(results)
219
220                # Check for missing tests.  Only do so for the "dll" tests
221                # to reduce noise from both opencl and dll, and because
222                # python kernels use platform="dll".
223                if self.platform == "dll":
224                    missing = []
225                    ## Uncomment the following to require test cases
226                    #missing = self._find_missing_tests()
227                    if missing:
228                        raise ValueError("Missing tests for "+", ".join(missing))
229
230            except:
231                annotate_exception(self.test_name)
232                raise
233
234        def _find_missing_tests(self):
235            # type: () -> None
236            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
237            model_has_VR = callable(self.info.VR)
238            model_has_ER = callable(self.info.ER)
239            model_has_1D = True
240            model_has_2D = any(p.type == 'orientation'
241                               for p in self.info.parameters.kernel_parameters)
242
243            # Lists of tests that have a result that is not None
244            single = [test for test in self.info.tests
245                      if not isinstance(test[2], list) and test[2] is not None]
246            tests_has_VR = any(test[1] == 'VR' for test in single)
247            tests_has_ER = any(test[1] == 'ER' for test in single)
248            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
249            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
250
251            multiple = [test for test in self.info.tests
252                        if isinstance(test[2], list)
253                        and not all(result is None for result in test[2])]
254            tests_has_1D_multiple = any(isinstance(test[1][0], float)
255                                        for test in multiple)
256            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
257                                        for test in multiple)
258
259            missing = []
260            if model_has_VR and not tests_has_VR:
261                missing.append("VR")
262            if model_has_ER and not tests_has_ER:
263                missing.append("ER")
264            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
265                missing.append("1D")
266            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
267                missing.append("2D")
268
269            return missing
270
271        def run_one(self, model, test):
272            # type: (KernelModel, TestCondition) -> None
273            """Run a single test case."""
274            user_pars, x, y = test
275            pars = expand_pars(self.info.parameters, user_pars)
276            invalid = invalid_pars(self.info.parameters, pars)
277            if invalid:
278                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
279
280            if not isinstance(y, list):
281                y = [y]
282            if not isinstance(x, list):
283                x = [x]
284
285            self.assertEqual(len(y), len(x))
286
287            if x[0] == 'ER':
288                actual = np.array([call_ER(model.info, pars)])
289            elif x[0] == 'VR':
290                actual = np.array([call_VR(model.info, pars)])
291            elif isinstance(x[0], tuple):
292                qx, qy = zip(*x)
293                q_vectors = [np.array(qx), np.array(qy)]
294                kernel = model.make_kernel(q_vectors)
295                actual = call_kernel(kernel, pars)
296            else:
297                q_vectors = [np.array(x)]
298                kernel = model.make_kernel(q_vectors)
299                actual = call_kernel(kernel, pars)
300
301            self.assertTrue(len(actual) > 0)
302            self.assertEqual(len(y), len(actual))
303
304            for xi, yi, actual_yi in zip(x, y, actual):
305                if yi is None:
306                    # smoke test --- make sure it runs and produces a value
307                    self.assertTrue(not np.isnan(actual_yi),
308                                    'invalid f(%s): %s' % (xi, actual_yi))
309                elif np.isnan(yi):
310                    self.assertTrue(np.isnan(actual_yi),
311                                    'f(%s): expected:%s; actual:%s'
312                                    % (xi, yi, actual_yi))
313                else:
314                    # is_near does not work for infinite values, so also test
315                    # for exact values.  Note that this will not
316                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
317                                    'f(%s); expected:%s; actual:%s'
318                                    % (xi, yi, actual_yi))
319            return actual
320
321    return ModelTestCase
322
323def invalid_pars(partable, pars):
324    # type: (ParameterTable, Dict[str, float])
325    """
326    Return a list of parameter names that are not part of the model.
327    """
328    names = set(p.id for p in partable.call_parameters)
329    invalid = []
330    for par in sorted(pars.keys()):
331        parts = par.split('_pd')
332        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
333            invalid.append(par)
334            continue
335        if parts[0] not in names:
336            invalid.append(par)
337    return invalid
338
339
340def is_near(target, actual, digits=5):
341    # type: (float, float, int) -> bool
342    """
343    Returns true if *actual* is within *digits* significant digits of *target*.
344    """
345    import math
346    shift = 10**math.ceil(math.log10(abs(target)))
347    return abs(target-actual)/shift < 1.5*10**-digits
348
349def run_one(model):
350    # type: (str) -> str
351    """
352    Run the tests for a single model, printing the results to stdout.
353
354    *model* can by a python file, which is handy for checking user defined
355    plugin models.
356    """
357    # Note that running main() directly did not work from within the
358    # wxPython pycrust console.  Instead of the results appearing in the
359    # window they were printed to the underlying console.
360    from unittest.runner import TextTestResult, _WritelnDecorator
361
362    # Build a object to capture and print the test results
363    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
364    verbosity = 2
365    descriptions = True
366    result = TextTestResult(stream, descriptions, verbosity)
367
368    # Build a test suite containing just the model
369    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
370    models = [model]
371    try:
372        suite = make_suite(loaders, models)
373    except Exception:
374        import traceback
375        stream.writeln(traceback.format_exc())
376        return
377    # Run the test suite
378    suite.run(result)
379
380    # Print the failures and errors
381    for _, tb in result.errors:
382        stream.writeln(tb)
383    for _, tb in result.failures:
384        stream.writeln(tb)
385
386    # Warn if there are no user defined tests.
387    # Note: the test suite constructed above only has one test in it, which
388    # runs through some smoke tests to make sure the model runs, then runs
389    # through the input-output pairs given in the model definition file.  To
390    # check if any such pairs are defined, therefore, we just need to check if
391    # they are in the first test of the test suite.  We do this with an
392    # iterator since we don't have direct access to the list of tests in the
393    # test suite.
394    for test in suite:
395        if not test.info.tests:
396            stream.writeln("Note: %s has no user defined tests."%model)
397        break
398    else:
399        stream.writeln("Note: no test suite created --- this should never happen")
400
401    output = stream.getvalue()
402    stream.close()
403    return output
404
405
406def main(*models):
407    # type: (*str) -> int
408    """
409    Run tests given is models.
410
411    Returns 0 if success or 1 if any tests fail.
412    """
413    try:
414        from xmlrunner import XMLTestRunner as TestRunner
415        test_args = {'output': 'logs'}
416    except ImportError:
417        from unittest import TextTestRunner as TestRunner
418        test_args = {}
419
420    if models and models[0] == '-v':
421        verbosity = 2
422        models = models[1:]
423    else:
424        verbosity = 1
425    if models and models[0] == 'opencl':
426        if not core.HAVE_OPENCL:
427            print("opencl is not available")
428            return 1
429        loaders = ['opencl']
430        models = models[1:]
431    elif models and models[0] == 'dll':
432        # TODO: test if compiler is available?
433        loaders = ['dll']
434        models = models[1:]
435    elif models and models[0] == 'opencl_and_dll':
436        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
437        models = models[1:]
438    else:
439        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
440    if not models:
441        print("""\
442usage:
443  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
444
445If -v is included on the command line, then use verbose output.
446
447If neither opencl nor dll is specified, then models will be tested with
448both OpenCL and dll; the compute target is ignored for pure python models.
449
450If model1 is 'all', then all except the remaining models will be tested.
451
452""")
453
454        return 1
455
456    runner = TestRunner(verbosity=verbosity, **test_args)
457    result = runner.run(make_suite(loaders, models))
458    return 1 if result.failures or result.errors else 0
459
460
461def model_tests():
462    # type: () -> Iterator[Callable[[], None]]
463    """
464    Test runner visible to nosetests.
465
466    Run "nosetests sasmodels" on the command line to invoke it.
467    """
468    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
469    tests = make_suite(loaders, ['all'])
470    def build_test(test):
471        # In order for nosetest to show the test name, wrap the test.run_all
472        # instance in function that takes the test name as a parameter which
473        # will be displayed when the test is run.  Do this as a function so
474        # that it properly captures the context for tests that captured and
475        # run later.  If done directly in the for loop, then the looping
476        # variable test will be shared amongst all the tests, and we will be
477        # repeatedly testing vesicle.
478        return lambda name: test.run_all(), test.test_name
479    for test in tests:
480        yield build_test(test)
481
482
483if __name__ == "__main__":
484    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.