source: sasmodels/sasmodels/model_test.py @ 39a06c9

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 39a06c9 was 39a06c9, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

Remove references to ER and VR from sasmodels. Refs #1202.

  • Property mode set to 100755
File size: 21.1 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and Fq is called to make sure R_eff, volume and volume ratio are computed.
13The return values at these points are not considered.  The test is only to
14verify that the models run to completion, and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test and volume ratio tests, use the extended output
20form, which checks each output of kernel.Fq. For 1-D tests, either specify
21the q value or a list of q-values, and the corresponding I(q) value, or
22list of I(q) values.
23
24That is::
25
26    tests = [
27        [ {parameters}, q, I(q)],
28        [ {parameters}, [q], [I(q)] ],
29        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
30
31        [ {parameters}, (qx, qy), I(qx, Iqy)],
32        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
33                        [I(qx1, qy1), I(qx2, qy2), ...]],
34
35        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50try:
51    from StringIO import StringIO
52except ImportError:
53    # StringIO.StringIO renamed to io.StringIO in Python 3
54    # Note: io.StringIO exists in python 2, but using unicode instead of str
55    from io import StringIO
56
57import numpy as np  # type: ignore
58
59from . import core
60from .core import list_models, load_model_info, build_model
61from .direct_model import call_kernel, call_Fq
62from .exception import annotate_exception
63from .modelinfo import expand_pars
64from .kernelcl import use_opencl
65from .kernelcuda import use_cuda
66
67# pylint: disable=unused-import
68try:
69    from typing import List, Iterator, Callable
70except ImportError:
71    pass
72else:
73    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
74    from .kernel import KernelModel
75# pylint: enable=unused-import
76
77
78def make_suite(loaders, models):
79    # type: (List[str], List[str]) -> unittest.TestSuite
80    """
81    Construct the pyunit test suite.
82
83    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
84    For python model the python driver is always used.
85
86    *models* is the list of models to test, or *["all"]* to test all models.
87    """
88    ModelTestCase = _hide_model_case_from_nose()
89    suite = unittest.TestSuite()
90
91    if models[0] in core.KINDS:
92        skip = models[1:]
93        models = list_models(models[0])
94    else:
95        skip = []
96    for model_name in models:
97        if model_name in skip:
98            continue
99        model_info = load_model_info(model_name)
100
101        #print('------')
102        #print('found tests in', model_name)
103        #print('------')
104
105        # if ispy then use the dll loader to call pykernel
106        # don't try to call cl kernel since it will not be
107        # available in some environmentes.
108        is_py = callable(model_info.Iq)
109
110        # Some OpenCL drivers seem to be flaky, and are not producing the
111        # expected result.  Since we don't have known test values yet for
112        # all of our models, we are instead going to compare the results
113        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
114        # parameters just to see that the model runs to completion) between
115        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
116        # shared between OpenCL and DLL tests.  This is just a list.  If the
117        # list is empty (which it will be when DLL runs, if the DLL runs
118        # first), then the results are appended to the list.  If the list
119        # is not empty (which it will be when OpenCL runs second), the results
120        # are compared to the results stored in the first element of the list.
121        # This is a horrible stateful hack which only makes sense because the
122        # test suite is thrown away after being run once.
123        stash = []
124
125        if is_py:  # kernel implemented in python
126            test_name = "%s-python"%model_name
127            test_method_name = "test_%s_python" % model_info.id
128            test = ModelTestCase(test_name, model_info,
129                                 test_method_name,
130                                 platform="dll",  # so that
131                                 dtype="double",
132                                 stash=stash)
133            suite.addTest(test)
134        else:   # kernel implemented in C
135
136            # test using dll if desired
137            if 'dll' in loaders:
138                test_name = "%s-dll"%model_name
139                test_method_name = "test_%s_dll" % model_info.id
140                test = ModelTestCase(test_name, model_info,
141                                     test_method_name,
142                                     platform="dll",
143                                     dtype="double",
144                                     stash=stash)
145                suite.addTest(test)
146
147            # test using opencl if desired and available
148            if 'opencl' in loaders and use_opencl():
149                test_name = "%s-opencl"%model_name
150                test_method_name = "test_%s_opencl" % model_info.id
151                # Using dtype=None so that the models that are only
152                # correct for double precision are not tested using
153                # single precision.  The choice is determined by the
154                # presence of *single=False* in the model file.
155                test = ModelTestCase(test_name, model_info,
156                                     test_method_name,
157                                     platform="ocl", dtype=None,
158                                     stash=stash)
159                #print("defining", test_name)
160                suite.addTest(test)
161
162            # test using cuda if desired and available
163            if 'cuda' in loaders and use_cuda():
164                test_name = "%s-cuda"%model_name
165                test_method_name = "test_%s_cuda" % model_info.id
166                # Using dtype=None so that the models that are only
167                # correct for double precision are not tested using
168                # single precision.  The choice is determined by the
169                # presence of *single=False* in the model file.
170                test = ModelTestCase(test_name, model_info,
171                                     test_method_name,
172                                     platform="cuda", dtype=None,
173                                     stash=stash)
174                #print("defining", test_name)
175                suite.addTest(test)
176
177    return suite
178
179def _hide_model_case_from_nose():
180    # type: () -> type
181    class ModelTestCase(unittest.TestCase):
182        """
183        Test suit for a particular model with a particular kernel driver.
184
185        The test suite runs a simple smoke test to make sure the model
186        functions, then runs the list of tests at the bottom of the model
187        description file.
188        """
189        def __init__(self, test_name, model_info, test_method_name,
190                     platform, dtype, stash):
191            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
192            self.test_name = test_name
193            self.info = model_info
194            self.platform = platform
195            self.dtype = dtype
196            self.stash = stash  # container for the results of the first run
197
198            setattr(self, test_method_name, self.run_all)
199            unittest.TestCase.__init__(self, test_method_name)
200
201        def run_all(self):
202            # type: () -> None
203            """
204            Run all the tests in the test suite, including smoke tests.
205            """
206            smoke_tests = [
207                # test validity at reasonable values
208                ({}, 0.1, None),
209                ({}, (0.1, 0.1), None),
210                # test validity at q = 0
211                #({}, 0.0, None),
212                #({}, (0.0, 0.0), None),
213                # test vector form
214                ({}, [0.001, 0.01, 0.1], [None]*3),
215                ({}, [(0.1, 0.1)]*2, [None]*2),
216                # test that Fq will run
217                ({}, 0.1, None, None, None, None, None),
218                ]
219            tests = smoke_tests
220            #tests = []
221            if self.info.tests is not None:
222                tests += self.info.tests
223            try:
224                model = build_model(self.info, dtype=self.dtype,
225                                    platform=self.platform)
226                results = [self.run_one(model, test) for test in tests]
227                if self.stash:
228                    for test, target, actual in zip(tests, self.stash[0], results):
229                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
230                            ("GPU/CPU comparison expected %s but got %s for %s"
231                             % (target, actual, test[0]))
232                else:
233                    self.stash.append(results)
234
235                # Check for missing tests.  Only do so for the "dll" tests
236                # to reduce noise from both opencl and cuda, and because
237                # python kernels use platform="dll".
238                if self.platform == "dll":
239                    missing = []
240                    ## Uncomment the following to require test cases
241                    #missing = self._find_missing_tests()
242                    if missing:
243                        raise ValueError("Missing tests for "+", ".join(missing))
244
245            except:
246                annotate_exception(self.test_name)
247                raise
248
249        def _find_missing_tests(self):
250            # type: () -> None
251            """make sure there are 1D and 2D tests as appropriate"""
252            model_has_1D = True
253            model_has_2D = any(p.type == 'orientation'
254                               for p in self.info.parameters.kernel_parameters)
255
256            # Lists of tests that have a result that is not None
257            single = [test for test in self.info.tests
258                      if not isinstance(test[2], list) and test[2] is not None]
259            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
260            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
261
262            multiple = [test for test in self.info.tests
263                        if isinstance(test[2], list)
264                        and not all(result is None for result in test[2])]
265            tests_has_1D_multiple = any(isinstance(test[1][0], float)
266                                        for test in multiple)
267            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
268                                        for test in multiple)
269
270            missing = []
271            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
272                missing.append("1D")
273            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
274                missing.append("2D")
275
276            return missing
277
278        def run_one(self, model, test):
279            # type: (KernelModel, TestCondition) -> None
280            """Run a single test case."""
281            user_pars, x, y = test[:3]
282            pars = expand_pars(self.info.parameters, user_pars)
283            invalid = invalid_pars(self.info.parameters, pars)
284            if invalid:
285                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
286
287            if not isinstance(y, list):
288                y = [y]
289            if not isinstance(x, list):
290                x = [x]
291
292            self.assertEqual(len(y), len(x))
293
294            if isinstance(x[0], tuple):
295                qx, qy = zip(*x)
296                q_vectors = [np.array(qx), np.array(qy)]
297            else:
298                q_vectors = [np.array(x)]
299
300            kernel = model.make_kernel(q_vectors)
301            if len(test) == 3:
302                actual = call_kernel(kernel, pars)
303                self._check_vectors(x, y, actual, 'I')
304                return actual
305            else:
306                y1 = y
307                y2 = test[3] if not isinstance(test[3], list) else [test[3]]
308                F1, F2, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
309                if F1 is not None:  # F1 is none for models with Iq instead of Fq
310                    self._check_vectors(x, y1, F1, 'F')
311                self._check_vectors(x, y2, F2, 'F^2')
312                self._check_scalar(test[4], R_eff, 'R_eff')
313                self._check_scalar(test[5], volume, 'volume')
314                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
315                return F2
316
317        def _check_scalar(self, target, actual, name):
318            if target is None:
319                # smoke test --- make sure it runs and produces a value
320                self.assertTrue(not np.isnan(actual),
321                                'invalid %s: %s' % (name, actual))
322            elif np.isnan(target):
323                # make sure nans match
324                self.assertTrue(np.isnan(actual),
325                                '%s: expected:%s; actual:%s'
326                                % (name, target, actual))
327            else:
328                # is_near does not work for infinite values, so also test
329                # for exact values.
330                self.assertTrue(target == actual or is_near(target, actual, 5),
331                                '%s: expected:%s; actual:%s'
332                                % (name, target, actual))
333
334        def _check_vectors(self, x, target, actual, name='I'):
335            self.assertTrue(len(actual) > 0,
336                            '%s(...) expected return'%name)
337            if target is None:
338                return
339            self.assertEqual(len(target), len(actual),
340                             '%s(...) returned wrong length'%name)
341            for xi, yi, actual_yi in zip(x, target, actual):
342                if yi is None:
343                    # smoke test --- make sure it runs and produces a value
344                    self.assertTrue(not np.isnan(actual_yi),
345                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
346                elif np.isnan(yi):
347                    # make sure nans match
348                    self.assertTrue(np.isnan(actual_yi),
349                                    '%s(%s): expected:%s; actual:%s'
350                                    % (name, xi, yi, actual_yi))
351                else:
352                    # is_near does not work for infinite values, so also test
353                    # for exact values.
354                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
355                                    '%s(%s); expected:%s; actual:%s'
356                                    % (name, xi, yi, actual_yi))
357
358    return ModelTestCase
359
360def invalid_pars(partable, pars):
361    # type: (ParameterTable, Dict[str, float])
362    """
363    Return a list of parameter names that are not part of the model.
364    """
365    names = set(p.id for p in partable.call_parameters)
366    invalid = []
367    for par in sorted(pars.keys()):
368        # special handling of R_eff mode, which is not a usual parameter
369        if par == 'radius_effective_type':
370            continue
371        parts = par.split('_pd')
372        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
373            invalid.append(par)
374            continue
375        if parts[0] not in names:
376            invalid.append(par)
377    return invalid
378
379
380def is_near(target, actual, digits=5):
381    # type: (float, float, int) -> bool
382    """
383    Returns true if *actual* is within *digits* significant digits of *target*.
384    """
385    import math
386    shift = 10**math.ceil(math.log10(abs(target)))
387    return abs(target-actual)/shift < 1.5*10**-digits
388
389def run_one(model):
390    # type: (str) -> str
391    """
392    Run the tests for a single model, printing the results to stdout.
393
394    *model* can by a python file, which is handy for checking user defined
395    plugin models.
396    """
397    # Note that running main() directly did not work from within the
398    # wxPython pycrust console.  Instead of the results appearing in the
399    # window they were printed to the underlying console.
400    from unittest.runner import TextTestResult, _WritelnDecorator
401
402    # Build a object to capture and print the test results
403    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
404    verbosity = 2
405    descriptions = True
406    result = TextTestResult(stream, descriptions, verbosity)
407
408    # Build a test suite containing just the model
409    loader = 'opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll'
410    models = [model]
411    try:
412        suite = make_suite([loader], models)
413    except Exception:
414        import traceback
415        stream.writeln(traceback.format_exc())
416        return
417
418    # Warn if there are no user defined tests.
419    # Note: the test suite constructed above only has one test in it, which
420    # runs through some smoke tests to make sure the model runs, then runs
421    # through the input-output pairs given in the model definition file.  To
422    # check if any such pairs are defined, therefore, we just need to check if
423    # they are in the first test of the test suite.  We do this with an
424    # iterator since we don't have direct access to the list of tests in the
425    # test suite.
426    # In Qt5 suite.run() will clear all tests in the suite after running
427    # with no way of retaining them for the test below, so let's check
428    # for user tests before running the suite.
429    for test in suite:
430        if not test.info.tests:
431            stream.writeln("Note: %s has no user defined tests."%model)
432        break
433    else:
434        stream.writeln("Note: no test suite created --- this should never happen")
435
436    # Run the test suite
437    suite.run(result)
438
439    # Print the failures and errors
440    for _, tb in result.errors:
441        stream.writeln(tb)
442    for _, tb in result.failures:
443        stream.writeln(tb)
444
445    output = stream.getvalue()
446    stream.close()
447    return output
448
449
450def main(*models):
451    # type: (*str) -> int
452    """
453    Run tests given is models.
454
455    Returns 0 if success or 1 if any tests fail.
456    """
457    try:
458        from xmlrunner import XMLTestRunner as TestRunner
459        test_args = {'output': 'logs'}
460    except ImportError:
461        from unittest import TextTestRunner as TestRunner
462        test_args = {}
463
464    if models and models[0] == '-v':
465        verbosity = 2
466        models = models[1:]
467    else:
468        verbosity = 1
469    if models and models[0] == 'opencl':
470        if not use_opencl():
471            print("opencl is not available")
472            return 1
473        loaders = ['opencl']
474        models = models[1:]
475    elif models and models[0] == 'cuda':
476        if not use_cuda():
477            print("cuda is not available")
478            return 1
479        loaders = ['cuda']
480        models = models[1:]
481    elif models and models[0] == 'dll':
482        # TODO: test if compiler is available?
483        loaders = ['dll']
484        models = models[1:]
485    else:
486        loaders = ['dll']
487        if use_opencl():
488            loaders.append('opencl')
489        if use_cuda():
490            loaders.append('cuda')
491    if not models:
492        print("""\
493usage:
494  python -m sasmodels.model_test [-v] [opencl|cuda|dll] model1 model2 ...
495
496If -v is included on the command line, then use verbose output.
497
498If no platform is specified, then models will be tested with dll, and
499if available, OpenCL and CUDA; the compute target is ignored for pure python models.
500
501If model1 is 'all', then all except the remaining models will be tested.
502
503""")
504
505        return 1
506
507    runner = TestRunner(verbosity=verbosity, **test_args)
508    result = runner.run(make_suite(loaders, models))
509    return 1 if result.failures or result.errors else 0
510
511
512def model_tests():
513    # type: () -> Iterator[Callable[[], None]]
514    """
515    Test runner visible to nosetests.
516
517    Run "nosetests sasmodels" on the command line to invoke it.
518    """
519    loaders = ['dll']
520    if use_opencl():
521        loaders.append('opencl')
522    if use_cuda():
523        loaders.append('cuda')
524    tests = make_suite(loaders, ['all'])
525    def build_test(test):
526        # In order for nosetest to show the test name, wrap the test.run_all
527        # instance in function that takes the test name as a parameter which
528        # will be displayed when the test is run.  Do this as a function so
529        # that it properly captures the context for tests that captured and
530        # run later.  If done directly in the for loop, then the looping
531        # variable test will be shared amongst all the tests, and we will be
532        # repeatedly testing vesicle.
533
534        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
535        # requires that the test.description field be set.
536        wrap = lambda: test.run_all()
537        wrap.description = test.test_name
538        return wrap
539        # The following would work with nosetests and pytest:
540        #     return lambda name: test.run_all(), test.test_name
541
542    for test in tests:
543        yield build_test(test)
544
545
546if __name__ == "__main__":
547    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.