source: sasmodels/sasmodels/model_test.py @ 2d81cfe

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 2d81cfe was 2d81cfe, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

lint

  • Property mode set to 100644
File size: 18.3 KB
RevLine 
[3330bb4]1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
[9826f82]50try:
51    from StringIO import StringIO
[bedb9b0]52except ImportError:
53    # StringIO.StringIO renamed to io.StringIO in Python 3
54    # Note: io.StringIO exists in python 2, but using unicode instead of str
[9826f82]55    from io import StringIO
[3330bb4]56
57import numpy as np  # type: ignore
58
59from . import core
60from .core import list_models, load_model_info, build_model
61from .direct_model import call_kernel, call_ER, call_VR
62from .exception import annotate_exception
63from .modelinfo import expand_pars
64
[2d81cfe]65# pylint: disable=unused-import
[3330bb4]66try:
67    from typing import List, Iterator, Callable
68except ImportError:
69    pass
70else:
71    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
72    from .kernel import KernelModel
[2d81cfe]73# pylint: enable=unused-import
[3330bb4]74
75
76def make_suite(loaders, models):
77    # type: (List[str], List[str]) -> unittest.TestSuite
78    """
79    Construct the pyunit test suite.
80
81    *loaders* is the list of kernel drivers to use, which is one of
82    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
83    the python driver is always used.
84
85    *models* is the list of models to test, or *["all"]* to test all models.
86    """
87    ModelTestCase = _hide_model_case_from_nose()
88    suite = unittest.TestSuite()
89
[eaa4458]90    if models[0] in core.KINDS:
[3330bb4]91        skip = models[1:]
[eaa4458]92        models = list_models(models[0])
[3330bb4]93    else:
94        skip = []
95    for model_name in models:
[bb4b509]96        if model_name in skip:
97            continue
[3330bb4]98        model_info = load_model_info(model_name)
99
100        #print('------')
101        #print('found tests in', model_name)
102        #print('------')
103
104        # if ispy then use the dll loader to call pykernel
105        # don't try to call cl kernel since it will not be
106        # available in some environmentes.
107        is_py = callable(model_info.Iq)
108
109        # Some OpenCL drivers seem to be flaky, and are not producing the
110        # expected result.  Since we don't have known test values yet for
111        # all of our models, we are instead going to compare the results
112        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
113        # parameters just to see that the model runs to completion) between
114        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
115        # shared between OpenCL and DLL tests.  This is just a list.  If the
116        # list is empty (which it will be when DLL runs, if the DLL runs
117        # first), then the results are appended to the list.  If the list
118        # is not empty (which it will be when OpenCL runs second), the results
119        # are compared to the results stored in the first element of the list.
120        # This is a horrible stateful hack which only makes sense because the
121        # test suite is thrown away after being run once.
122        stash = []
123
124        if is_py:  # kernel implemented in python
125            test_name = "Model: %s, Kernel: python"%model_name
126            test_method_name = "test_%s_python" % model_info.id
127            test = ModelTestCase(test_name, model_info,
128                                 test_method_name,
129                                 platform="dll",  # so that
130                                 dtype="double",
131                                 stash=stash)
132            suite.addTest(test)
133        else:   # kernel implemented in C
134
135            # test using dll if desired
136            if 'dll' in loaders or not core.HAVE_OPENCL:
137                test_name = "Model: %s, Kernel: dll"%model_name
138                test_method_name = "test_%s_dll" % model_info.id
139                test = ModelTestCase(test_name, model_info,
140                                     test_method_name,
141                                     platform="dll",
142                                     dtype="double",
143                                     stash=stash)
144                suite.addTest(test)
145
146            # test using opencl if desired and available
147            if 'opencl' in loaders and core.HAVE_OPENCL:
148                test_name = "Model: %s, Kernel: OpenCL"%model_name
149                test_method_name = "test_%s_opencl" % model_info.id
150                # Using dtype=None so that the models that are only
151                # correct for double precision are not tested using
152                # single precision.  The choice is determined by the
153                # presence of *single=False* in the model file.
154                test = ModelTestCase(test_name, model_info,
155                                     test_method_name,
156                                     platform="ocl", dtype=None,
157                                     stash=stash)
158                #print("defining", test_name)
159                suite.addTest(test)
160
161    return suite
162
163
164def _hide_model_case_from_nose():
165    # type: () -> type
166    class ModelTestCase(unittest.TestCase):
167        """
168        Test suit for a particular model with a particular kernel driver.
169
170        The test suite runs a simple smoke test to make sure the model
171        functions, then runs the list of tests at the bottom of the model
172        description file.
173        """
174        def __init__(self, test_name, model_info, test_method_name,
175                     platform, dtype, stash):
176            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
177            self.test_name = test_name
178            self.info = model_info
179            self.platform = platform
180            self.dtype = dtype
181            self.stash = stash  # container for the results of the first run
182
183            setattr(self, test_method_name, self.run_all)
184            unittest.TestCase.__init__(self, test_method_name)
185
186        def run_all(self):
187            # type: () -> None
188            """
189            Run all the tests in the test suite, including smoke tests.
190            """
191            smoke_tests = [
192                # test validity at reasonable values
193                ({}, 0.1, None),
194                ({}, (0.1, 0.1), None),
195                # test validity at q = 0
196                #({}, 0.0, None),
197                #({}, (0.0, 0.0), None),
198                # test vector form
199                ({}, [0.001, 0.01, 0.1], [None]*3),
200                ({}, [(0.1, 0.1)]*2, [None]*2),
201                # test that ER/VR will run if they exist
202                ({}, 'ER', None),
203                ({}, 'VR', None),
204                ]
[65314f7]205            tests = smoke_tests
[20fe0cd]206            #tests = []
[65314f7]207            if self.info.tests is not None:
208                tests += self.info.tests
[3330bb4]209            try:
210                model = build_model(self.info, dtype=self.dtype,
211                                    platform=self.platform)
212                results = [self.run_one(model, test) for test in tests]
213                if self.stash:
214                    for test, target, actual in zip(tests, self.stash[0], results):
[2d81cfe]215                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
216                            ("GPU/CPU comparison expected %s but got %s for %s"
217                             % (target, actual, test[0]))
[3330bb4]218                else:
219                    self.stash.append(results)
220
221                # Check for missing tests.  Only do so for the "dll" tests
222                # to reduce noise from both opencl and dll, and because
223                # python kernels use platform="dll".
224                if self.platform == "dll":
225                    missing = []
226                    ## Uncomment the following to require test cases
227                    #missing = self._find_missing_tests()
228                    if missing:
229                        raise ValueError("Missing tests for "+", ".join(missing))
230
231            except:
232                annotate_exception(self.test_name)
233                raise
234
235        def _find_missing_tests(self):
236            # type: () -> None
237            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
238            model_has_VR = callable(self.info.VR)
239            model_has_ER = callable(self.info.ER)
240            model_has_1D = True
241            model_has_2D = any(p.type == 'orientation'
242                               for p in self.info.parameters.kernel_parameters)
243
244            # Lists of tests that have a result that is not None
245            single = [test for test in self.info.tests
246                      if not isinstance(test[2], list) and test[2] is not None]
247            tests_has_VR = any(test[1] == 'VR' for test in single)
248            tests_has_ER = any(test[1] == 'ER' for test in single)
249            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
250            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
251
252            multiple = [test for test in self.info.tests
253                        if isinstance(test[2], list)
[bb4b509]254                        and not all(result is None for result in test[2])]
[3330bb4]255            tests_has_1D_multiple = any(isinstance(test[1][0], float)
256                                        for test in multiple)
257            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
258                                        for test in multiple)
259
260            missing = []
261            if model_has_VR and not tests_has_VR:
262                missing.append("VR")
263            if model_has_ER and not tests_has_ER:
264                missing.append("ER")
265            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
266                missing.append("1D")
267            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
268                missing.append("2D")
269
270            return missing
271
272        def run_one(self, model, test):
273            # type: (KernelModel, TestCondition) -> None
274            """Run a single test case."""
275            user_pars, x, y = test
276            pars = expand_pars(self.info.parameters, user_pars)
[bb4b509]277            invalid = invalid_pars(self.info.parameters, pars)
278            if invalid:
279                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
[3330bb4]280
281            if not isinstance(y, list):
282                y = [y]
283            if not isinstance(x, list):
284                x = [x]
285
286            self.assertEqual(len(y), len(x))
287
288            if x[0] == 'ER':
289                actual = np.array([call_ER(model.info, pars)])
290            elif x[0] == 'VR':
291                actual = np.array([call_VR(model.info, pars)])
292            elif isinstance(x[0], tuple):
293                qx, qy = zip(*x)
294                q_vectors = [np.array(qx), np.array(qy)]
295                kernel = model.make_kernel(q_vectors)
296                actual = call_kernel(kernel, pars)
297            else:
298                q_vectors = [np.array(x)]
299                kernel = model.make_kernel(q_vectors)
300                actual = call_kernel(kernel, pars)
301
302            self.assertTrue(len(actual) > 0)
303            self.assertEqual(len(y), len(actual))
304
305            for xi, yi, actual_yi in zip(x, y, actual):
306                if yi is None:
307                    # smoke test --- make sure it runs and produces a value
308                    self.assertTrue(not np.isnan(actual_yi),
309                                    'invalid f(%s): %s' % (xi, actual_yi))
310                elif np.isnan(yi):
311                    self.assertTrue(np.isnan(actual_yi),
312                                    'f(%s): expected:%s; actual:%s'
313                                    % (xi, yi, actual_yi))
314                else:
315                    # is_near does not work for infinite values, so also test
316                    # for exact values.  Note that this will not
317                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
318                                    'f(%s); expected:%s; actual:%s'
319                                    % (xi, yi, actual_yi))
320            return actual
321
322    return ModelTestCase
323
[bb4b509]324def invalid_pars(partable, pars):
325    # type: (ParameterTable, Dict[str, float])
326    """
327    Return a list of parameter names that are not part of the model.
328    """
329    names = set(p.id for p in partable.call_parameters)
330    invalid = []
331    for par in sorted(pars.keys()):
332        parts = par.split('_pd')
333        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
334            invalid.append(par)
335            continue
336        if parts[0] not in names:
337            invalid.append(par)
338    return invalid
339
340
[3330bb4]341def is_near(target, actual, digits=5):
342    # type: (float, float, int) -> bool
343    """
344    Returns true if *actual* is within *digits* significant digits of *target*.
345    """
346    import math
347    shift = 10**math.ceil(math.log10(abs(target)))
348    return abs(target-actual)/shift < 1.5*10**-digits
349
350def run_one(model):
[bedb9b0]351    # type: (str) -> str
[3330bb4]352    """
353    Run the tests for a single model, printing the results to stdout.
354
355    *model* can by a python file, which is handy for checking user defined
356    plugin models.
357    """
358    # Note that running main() directly did not work from within the
359    # wxPython pycrust console.  Instead of the results appearing in the
360    # window they were printed to the underlying console.
361    from unittest.runner import TextTestResult, _WritelnDecorator
362
363    # Build a object to capture and print the test results
[f2cbeb7]364    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
[3330bb4]365    verbosity = 2
366    descriptions = True
367    result = TextTestResult(stream, descriptions, verbosity)
368
369    # Build a test suite containing just the model
370    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
371    models = [model]
372    try:
373        suite = make_suite(loaders, models)
374    except Exception:
375        import traceback
376        stream.writeln(traceback.format_exc())
377        return
378    # Run the test suite
379    suite.run(result)
380
381    # Print the failures and errors
382    for _, tb in result.errors:
383        stream.writeln(tb)
384    for _, tb in result.failures:
385        stream.writeln(tb)
386
387    # Warn if there are no user defined tests.
388    # Note: the test suite constructed above only has one test in it, which
389    # runs through some smoke tests to make sure the model runs, then runs
390    # through the input-output pairs given in the model definition file.  To
391    # check if any such pairs are defined, therefore, we just need to check if
392    # they are in the first test of the test suite.  We do this with an
393    # iterator since we don't have direct access to the list of tests in the
394    # test suite.
395    for test in suite:
396        if not test.info.tests:
397            stream.writeln("Note: %s has no user defined tests."%model)
398        break
399    else:
400        stream.writeln("Note: no test suite created --- this should never happen")
401
[f2cbeb7]402    output = stream.getvalue()
403    stream.close()
404    return output
405
[3330bb4]406
407def main(*models):
408    # type: (*str) -> int
409    """
410    Run tests given is models.
411
412    Returns 0 if success or 1 if any tests fail.
413    """
414    try:
415        from xmlrunner import XMLTestRunner as TestRunner
416        test_args = {'output': 'logs'}
417    except ImportError:
418        from unittest import TextTestRunner as TestRunner
419        test_args = {}
420
421    if models and models[0] == '-v':
422        verbosity = 2
423        models = models[1:]
424    else:
425        verbosity = 1
426    if models and models[0] == 'opencl':
427        if not core.HAVE_OPENCL:
428            print("opencl is not available")
429            return 1
430        loaders = ['opencl']
431        models = models[1:]
432    elif models and models[0] == 'dll':
433        # TODO: test if compiler is available?
434        loaders = ['dll']
435        models = models[1:]
436    elif models and models[0] == 'opencl_and_dll':
437        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
438        models = models[1:]
439    else:
440        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
441    if not models:
442        print("""\
443usage:
444  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
445
446If -v is included on the command line, then use verbose output.
447
448If neither opencl nor dll is specified, then models will be tested with
449both OpenCL and dll; the compute target is ignored for pure python models.
450
451If model1 is 'all', then all except the remaining models will be tested.
452
453""")
454
455        return 1
456
457    runner = TestRunner(verbosity=verbosity, **test_args)
458    result = runner.run(make_suite(loaders, models))
459    return 1 if result.failures or result.errors else 0
460
461
462def model_tests():
463    # type: () -> Iterator[Callable[[], None]]
464    """
465    Test runner visible to nosetests.
466
467    Run "nosetests sasmodels" on the command line to invoke it.
468    """
469    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
470    tests = make_suite(loaders, ['all'])
471    for test_i in tests:
472        # In order for nosetest to see the correct test name, need to set
473        # the description attribute of the returned function.  Since we
474        # can't do this for the returned instance, wrap it in a lambda and
475        # set the description on the lambda.  Otherwise we could just do:
476        #    yield test_i.run_all
477        L = lambda: test_i.run_all()
478        L.description = test_i.test_name
479        yield L
480
481
482if __name__ == "__main__":
483    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.