source: sasmodels/sasmodels/model_test.py @ afe206d

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since afe206d was 12eec1e, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

refactor model tests so SasviewModel? can run tests without reloading model info

  • Property mode set to 100755
File size: 19.2 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49import traceback
50
51try:
52    from StringIO import StringIO
53except ImportError:
54    # StringIO.StringIO renamed to io.StringIO in Python 3
55    # Note: io.StringIO exists in python 2, but using unicode instead of str
56    from io import StringIO
57
58import numpy as np  # type: ignore
59
60from . import core
61from .core import list_models, load_model_info, build_model
62from .direct_model import call_kernel, call_ER, call_VR
63from .exception import annotate_exception
64from .modelinfo import expand_pars
65from .kernelcl import use_opencl
66
67# pylint: disable=unused-import
68try:
69    from typing import List, Iterator, Callable
70except ImportError:
71    pass
72else:
73    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
74    from .kernel import KernelModel
75# pylint: enable=unused-import
76
77def make_suite(loaders, models):
78    # type: (List[str], List[str]) -> unittest.TestSuite
79    """
80    Construct the pyunit test suite.
81
82    *loaders* is the list of kernel drivers to use, which is one of
83    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
84    the python driver is always used.
85
86    *models* is the list of models to test, or *["all"]* to test all models.
87    """
88    suite = unittest.TestSuite()
89
90    if models[0] in core.KINDS:
91        skip = models[1:]
92        models = list_models(models[0])
93    else:
94        skip = []
95    for model_name in models:
96        if model_name not in skip:
97            model_info = load_model_info(model_name)
98            _add_model_to_suite(loaders, suite, model_info)
99
100    return suite
101
102def _add_model_to_suite(loaders, suite, model_info):
103    ModelTestCase = _hide_model_case_from_nose()
104
105    #print('------')
106    #print('found tests in', model_name)
107    #print('------')
108
109    # if ispy then use the dll loader to call pykernel
110    # don't try to call cl kernel since it will not be
111    # available in some environmentes.
112    is_py = callable(model_info.Iq)
113
114    # Some OpenCL drivers seem to be flaky, and are not producing the
115    # expected result.  Since we don't have known test values yet for
116    # all of our models, we are instead going to compare the results
117    # for the 'smoke test' (that is, evaluation at q=0.1 for the default
118    # parameters just to see that the model runs to completion) between
119    # the OpenCL and the DLL.  To do this, we define a 'stash' which is
120    # shared between OpenCL and DLL tests.  This is just a list.  If the
121    # list is empty (which it will be when DLL runs, if the DLL runs
122    # first), then the results are appended to the list.  If the list
123    # is not empty (which it will be when OpenCL runs second), the results
124    # are compared to the results stored in the first element of the list.
125    # This is a horrible stateful hack which only makes sense because the
126    # test suite is thrown away after being run once.
127    stash = []
128
129    if is_py:  # kernel implemented in python
130        test_name = "%s-python"%model_info.name
131        test_method_name = "test_%s_python" % model_info.id
132        test = ModelTestCase(test_name, model_info,
133                                test_method_name,
134                                platform="dll",  # so that
135                                dtype="double",
136                                stash=stash)
137        suite.addTest(test)
138    else:   # kernel implemented in C
139
140        # test using dll if desired
141        if 'dll' in loaders or not use_opencl():
142            test_name = "%s-dll"%model_info.name
143            test_method_name = "test_%s_dll" % model_info.id
144            test = ModelTestCase(test_name, model_info,
145                                    test_method_name,
146                                    platform="dll",
147                                    dtype="double",
148                                    stash=stash)
149            suite.addTest(test)
150
151        # test using opencl if desired and available
152        if 'opencl' in loaders and use_opencl():
153            test_name = "%s-opencl"%model_info.name
154            test_method_name = "test_%s_opencl" % model_info.id
155            # Using dtype=None so that the models that are only
156            # correct for double precision are not tested using
157            # single precision.  The choice is determined by the
158            # presence of *single=False* in the model file.
159            test = ModelTestCase(test_name, model_info,
160                                    test_method_name,
161                                    platform="ocl", dtype=None,
162                                    stash=stash)
163            #print("defining", test_name)
164            suite.addTest(test)
165
166
167def _hide_model_case_from_nose():
168    # type: () -> type
169    class ModelTestCase(unittest.TestCase):
170        """
171        Test suit for a particular model with a particular kernel driver.
172
173        The test suite runs a simple smoke test to make sure the model
174        functions, then runs the list of tests at the bottom of the model
175        description file.
176        """
177        def __init__(self, test_name, model_info, test_method_name,
178                     platform, dtype, stash):
179            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
180            self.test_name = test_name
181            self.info = model_info
182            self.platform = platform
183            self.dtype = dtype
184            self.stash = stash  # container for the results of the first run
185
186            setattr(self, test_method_name, self.run_all)
187            unittest.TestCase.__init__(self, test_method_name)
188
189        def run_all(self):
190            # type: () -> None
191            """
192            Run all the tests in the test suite, including smoke tests.
193            """
194            smoke_tests = [
195                # test validity at reasonable values
196                ({}, 0.1, None),
197                ({}, (0.1, 0.1), None),
198                # test validity at q = 0
199                #({}, 0.0, None),
200                #({}, (0.0, 0.0), None),
201                # test vector form
202                ({}, [0.001, 0.01, 0.1], [None]*3),
203                ({}, [(0.1, 0.1)]*2, [None]*2),
204                # test that ER/VR will run if they exist
205                ({}, 'ER', None),
206                ({}, 'VR', None),
207                ]
208            tests = smoke_tests
209            #tests = []
210            if self.info.tests is not None:
211                tests += self.info.tests
212            try:
213                model = build_model(self.info, dtype=self.dtype,
214                                    platform=self.platform)
215                results = [self.run_one(model, test) for test in tests]
216                if self.stash:
217                    for test, target, actual in zip(tests, self.stash[0], results):
218                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
219                            ("GPU/CPU comparison expected %s but got %s for %s"
220                             % (target, actual, test[0]))
221                else:
222                    self.stash.append(results)
223
224                # Check for missing tests.  Only do so for the "dll" tests
225                # to reduce noise from both opencl and dll, and because
226                # python kernels use platform="dll".
227                if self.platform == "dll":
228                    missing = []
229                    ## Uncomment the following to require test cases
230                    #missing = self._find_missing_tests()
231                    if missing:
232                        raise ValueError("Missing tests for "+", ".join(missing))
233
234            except:
235                annotate_exception(self.test_name)
236                raise
237
238        def _find_missing_tests(self):
239            # type: () -> None
240            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
241            model_has_VR = callable(self.info.VR)
242            model_has_ER = callable(self.info.ER)
243            model_has_1D = True
244            model_has_2D = any(p.type == 'orientation'
245                               for p in self.info.parameters.kernel_parameters)
246
247            # Lists of tests that have a result that is not None
248            single = [test for test in self.info.tests
249                      if not isinstance(test[2], list) and test[2] is not None]
250            tests_has_VR = any(test[1] == 'VR' for test in single)
251            tests_has_ER = any(test[1] == 'ER' for test in single)
252            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
253            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
254
255            multiple = [test for test in self.info.tests
256                        if isinstance(test[2], list)
257                        and not all(result is None for result in test[2])]
258            tests_has_1D_multiple = any(isinstance(test[1][0], float)
259                                        for test in multiple)
260            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
261                                        for test in multiple)
262
263            missing = []
264            if model_has_VR and not tests_has_VR:
265                missing.append("VR")
266            if model_has_ER and not tests_has_ER:
267                missing.append("ER")
268            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
269                missing.append("1D")
270            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
271                missing.append("2D")
272
273            return missing
274
275        def run_one(self, model, test):
276            # type: (KernelModel, TestCondition) -> None
277            """Run a single test case."""
278            user_pars, x, y = test
279            pars = expand_pars(self.info.parameters, user_pars)
280            invalid = invalid_pars(self.info.parameters, pars)
281            if invalid:
282                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
283
284            if not isinstance(y, list):
285                y = [y]
286            if not isinstance(x, list):
287                x = [x]
288
289            self.assertEqual(len(y), len(x))
290
291            if x[0] == 'ER':
292                actual = np.array([call_ER(model.info, pars)])
293            elif x[0] == 'VR':
294                actual = np.array([call_VR(model.info, pars)])
295            elif isinstance(x[0], tuple):
296                qx, qy = zip(*x)
297                q_vectors = [np.array(qx), np.array(qy)]
298                kernel = model.make_kernel(q_vectors)
299                actual = call_kernel(kernel, pars)
300            else:
301                q_vectors = [np.array(x)]
302                kernel = model.make_kernel(q_vectors)
303                actual = call_kernel(kernel, pars)
304
305            self.assertTrue(len(actual) > 0)
306            self.assertEqual(len(y), len(actual))
307
308            for xi, yi, actual_yi in zip(x, y, actual):
309                if yi is None:
310                    # smoke test --- make sure it runs and produces a value
311                    self.assertTrue(not np.isnan(actual_yi),
312                                    'invalid f(%s): %s' % (xi, actual_yi))
313                elif np.isnan(yi):
314                    self.assertTrue(np.isnan(actual_yi),
315                                    'f(%s): expected:%s; actual:%s'
316                                    % (xi, yi, actual_yi))
317                else:
318                    # is_near does not work for infinite values, so also test
319                    # for exact values.  Note that this will not
320                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
321                                    'f(%s); expected:%s; actual:%s'
322                                    % (xi, yi, actual_yi))
323            return actual
324
325    return ModelTestCase
326
327def invalid_pars(partable, pars):
328    # type: (ParameterTable, Dict[str, float])
329    """
330    Return a list of parameter names that are not part of the model.
331    """
332    names = set(p.id for p in partable.call_parameters)
333    invalid = []
334    for par in sorted(pars.keys()):
335        parts = par.split('_pd')
336        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
337            invalid.append(par)
338            continue
339        if parts[0] not in names:
340            invalid.append(par)
341    return invalid
342
343
344def is_near(target, actual, digits=5):
345    # type: (float, float, int) -> bool
346    """
347    Returns true if *actual* is within *digits* significant digits of *target*.
348    """
349    import math
350    shift = 10**math.ceil(math.log10(abs(target)))
351    return abs(target-actual)/shift < 1.5*10**-digits
352
353# CRUFT: old interface; should be deprecated and removed
354def run_one(model_name):
355    # msg = "use check_model(model_info) rather than run_one(model_name)"
356    # warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
357    try:
358        model_info = load_model_info(model_name)
359    except Exception:
360        output = traceback.format_exc()
361        return output
362
363    success, output = check_model(model_info)
364    return output
365
366def check_model(model_info):
367    # type: (ModelInfo) -> str
368    """
369    Run the tests for a single model, capturing the output.
370
371    Returns success status and the output string.
372    """
373    # Note that running main() directly did not work from within the
374    # wxPython pycrust console.  Instead of the results appearing in the
375    # window they were printed to the underlying console.
376    from unittest.runner import TextTestResult, _WritelnDecorator
377
378    # Build a object to capture and print the test results
379    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
380    verbosity = 2
381    descriptions = True
382    result = TextTestResult(stream, descriptions, verbosity)
383
384    # Build a test suite containing just the model
385    loaders = ['opencl'] if use_opencl() else ['dll']
386    suite = unittest.TestSuite()
387    _add_model_to_suite(loaders, suite, model_info)
388
389    # Warn if there are no user defined tests.
390    # Note: the test suite constructed above only has one test in it, which
391    # runs through some smoke tests to make sure the model runs, then runs
392    # through the input-output pairs given in the model definition file.  To
393    # check if any such pairs are defined, therefore, we just need to check if
394    # they are in the first test of the test suite.  We do this with an
395    # iterator since we don't have direct access to the list of tests in the
396    # test suite.
397    # In Qt5 suite.run() will clear all tests in the suite after running
398    # with no way of retaining them for the test below, so let's check
399    # for user tests before running the suite.
400    for test in suite:
401        if not test.info.tests:
402            stream.writeln("Note: %s has no user defined tests."%model_info.name)
403        break
404    else:
405        stream.writeln("Note: no test suite created --- this should never happen")
406
407    # Run the test suite
408    suite.run(result)
409
410    # Print the failures and errors
411    for _, tb in result.errors:
412        stream.writeln(tb)
413    for _, tb in result.failures:
414        stream.writeln(tb)
415
416    output = stream.getvalue()
417    stream.close()
418    return result.wasSuccessful(), output
419
420
421def main(*models):
422    # type: (*str) -> int
423    """
424    Run tests given is models.
425
426    Returns 0 if success or 1 if any tests fail.
427    """
428    try:
429        from xmlrunner import XMLTestRunner as TestRunner
430        test_args = {'output': 'logs'}
431    except ImportError:
432        from unittest import TextTestRunner as TestRunner
433        test_args = {}
434
435    if models and models[0] == '-v':
436        verbosity = 2
437        models = models[1:]
438    else:
439        verbosity = 1
440    if models and models[0] == 'opencl':
441        if not use_opencl():
442            print("opencl is not available")
443            return 1
444        loaders = ['opencl']
445        models = models[1:]
446    elif models and models[0] == 'dll':
447        # TODO: test if compiler is available?
448        loaders = ['dll']
449        models = models[1:]
450    elif models and models[0] == 'opencl_and_dll':
451        loaders = ['opencl', 'dll'] if use_opencl() else ['dll']
452        models = models[1:]
453    else:
454        loaders = ['opencl', 'dll'] if use_opencl() else ['dll']
455    if not models:
456        print("""\
457usage:
458  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
459
460If -v is included on the command line, then use verbose output.
461
462If neither opencl nor dll is specified, then models will be tested with
463both OpenCL and dll; the compute target is ignored for pure python models.
464
465If model1 is 'all', then all except the remaining models will be tested.
466
467""")
468
469        return 1
470
471    runner = TestRunner(verbosity=verbosity, **test_args)
472    result = runner.run(make_suite(loaders, models))
473    return 1 if result.failures or result.errors else 0
474
475
476def model_tests():
477    # type: () -> Iterator[Callable[[], None]]
478    """
479    Test runner visible to nosetests.
480
481    Run "nosetests sasmodels" on the command line to invoke it.
482    """
483    loaders = ['opencl', 'dll'] if use_opencl() else ['dll']
484    tests = make_suite(loaders, ['all'])
485    def build_test(test):
486        # In order for nosetest to show the test name, wrap the test.run_all
487        # instance in function that takes the test name as a parameter which
488        # will be displayed when the test is run.  Do this as a function so
489        # that it properly captures the context for tests that captured and
490        # run later.  If done directly in the for loop, then the looping
491        # variable test will be shared amongst all the tests, and we will be
492        # repeatedly testing vesicle.
493
494        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
495        # requires that the test.description field be set.
496        wrap = lambda: test.run_all()
497        wrap.description = test.test_name
498        return wrap
499        # The following would work with nosetests and pytest:
500        #     return lambda name: test.run_all(), test.test_name
501
502    for test in tests:
503        yield build_test(test)
504
505
506if __name__ == "__main__":
507    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.