source: sasmodels/sasmodels/model_test.py @ 74e9b5f

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 74e9b5f was 74e9b5f, checked in by pkienzle, 5 years ago

autotag functions as device functions for cuda. Refs #1076.

  • Property mode set to 100755
File size: 19.8 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50try:
51    from StringIO import StringIO
52except ImportError:
53    # StringIO.StringIO renamed to io.StringIO in Python 3
54    # Note: io.StringIO exists in python 2, but using unicode instead of str
55    from io import StringIO
56
57import numpy as np  # type: ignore
58
59from . import core
60from .core import list_models, load_model_info, build_model
61from .direct_model import call_kernel, call_ER, call_VR
62from .exception import annotate_exception
63from .modelinfo import expand_pars
64from .kernelcl import use_opencl
65from .kernelcuda import use_cuda
66
67# pylint: disable=unused-import
68try:
69    from typing import List, Iterator, Callable
70except ImportError:
71    pass
72else:
73    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
74    from .kernel import KernelModel
75# pylint: enable=unused-import
76
77
78def make_suite(loaders, models):
79    # type: (List[str], List[str]) -> unittest.TestSuite
80    """
81    Construct the pyunit test suite.
82
83    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
84    For python model the python driver is always used.
85
86    *models* is the list of models to test, or *["all"]* to test all models.
87    """
88    ModelTestCase = _hide_model_case_from_nose()
89    suite = unittest.TestSuite()
90
91    if models[0] in core.KINDS:
92        skip = models[1:]
93        models = list_models(models[0])
94    else:
95        skip = []
96    for model_name in models:
97        if model_name in skip:
98            continue
99        model_info = load_model_info(model_name)
100
101        #print('------')
102        #print('found tests in', model_name)
103        #print('------')
104
105        # if ispy then use the dll loader to call pykernel
106        # don't try to call cl kernel since it will not be
107        # available in some environmentes.
108        is_py = callable(model_info.Iq)
109
110        # Some OpenCL drivers seem to be flaky, and are not producing the
111        # expected result.  Since we don't have known test values yet for
112        # all of our models, we are instead going to compare the results
113        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
114        # parameters just to see that the model runs to completion) between
115        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
116        # shared between OpenCL and DLL tests.  This is just a list.  If the
117        # list is empty (which it will be when DLL runs, if the DLL runs
118        # first), then the results are appended to the list.  If the list
119        # is not empty (which it will be when OpenCL runs second), the results
120        # are compared to the results stored in the first element of the list.
121        # This is a horrible stateful hack which only makes sense because the
122        # test suite is thrown away after being run once.
123        stash = []
124
125        if is_py:  # kernel implemented in python
126            test_name = "%s-python"%model_name
127            test_method_name = "test_%s_python" % model_info.id
128            test = ModelTestCase(test_name, model_info,
129                                 test_method_name,
130                                 platform="dll",  # so that
131                                 dtype="double",
132                                 stash=stash)
133            suite.addTest(test)
134        else:   # kernel implemented in C
135
136            # test using dll if desired
137            if 'dll' in loaders:
138                test_name = "%s-dll"%model_name
139                test_method_name = "test_%s_dll" % model_info.id
140                test = ModelTestCase(test_name, model_info,
141                                     test_method_name,
142                                     platform="dll",
143                                     dtype="double",
144                                     stash=stash)
145                suite.addTest(test)
146
147            # test using opencl if desired and available
148            if 'opencl' in loaders and use_opencl():
149                test_name = "%s-opencl"%model_name
150                test_method_name = "test_%s_opencl" % model_info.id
151                # Using dtype=None so that the models that are only
152                # correct for double precision are not tested using
153                # single precision.  The choice is determined by the
154                # presence of *single=False* in the model file.
155                test = ModelTestCase(test_name, model_info,
156                                     test_method_name,
157                                     platform="ocl", dtype=None,
158                                     stash=stash)
159                #print("defining", test_name)
160                suite.addTest(test)
161
162            # test using cuda if desired and available
163            if 'cuda' in loaders and use_cuda():
164                test_name = "%s-cuda"%model_name
165                test_method_name = "test_%s_cuda" % model_info.id
166                # Using dtype=None so that the models that are only
167                # correct for double precision are not tested using
168                # single precision.  The choice is determined by the
169                # presence of *single=False* in the model file.
170                test = ModelTestCase(test_name, model_info,
171                                     test_method_name,
172                                     platform="cuda", dtype=None,
173                                     stash=stash)
174                #print("defining", test_name)
175                suite.addTest(test)
176
177    return suite
178
179def _hide_model_case_from_nose():
180    # type: () -> type
181    class ModelTestCase(unittest.TestCase):
182        """
183        Test suit for a particular model with a particular kernel driver.
184
185        The test suite runs a simple smoke test to make sure the model
186        functions, then runs the list of tests at the bottom of the model
187        description file.
188        """
189        def __init__(self, test_name, model_info, test_method_name,
190                     platform, dtype, stash):
191            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
192            self.test_name = test_name
193            self.info = model_info
194            self.platform = platform
195            self.dtype = dtype
196            self.stash = stash  # container for the results of the first run
197
198            setattr(self, test_method_name, self.run_all)
199            unittest.TestCase.__init__(self, test_method_name)
200
201        def run_all(self):
202            # type: () -> None
203            """
204            Run all the tests in the test suite, including smoke tests.
205            """
206            smoke_tests = [
207                # test validity at reasonable values
208                ({}, 0.1, None),
209                ({}, (0.1, 0.1), None),
210                # test validity at q = 0
211                #({}, 0.0, None),
212                #({}, (0.0, 0.0), None),
213                # test vector form
214                ({}, [0.001, 0.01, 0.1], [None]*3),
215                ({}, [(0.1, 0.1)]*2, [None]*2),
216                # test that ER/VR will run if they exist
217                ({}, 'ER', None),
218                ({}, 'VR', None),
219                ]
220            tests = smoke_tests
221            #tests = []
222            if self.info.tests is not None:
223                tests += self.info.tests
224            try:
225                model = build_model(self.info, dtype=self.dtype,
226                                    platform=self.platform)
227                results = [self.run_one(model, test) for test in tests]
228                if self.stash:
229                    for test, target, actual in zip(tests, self.stash[0], results):
230                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
231                            ("GPU/CPU comparison expected %s but got %s for %s"
232                             % (target, actual, test[0]))
233                else:
234                    self.stash.append(results)
235
236                # Check for missing tests.  Only do so for the "dll" tests
237                # to reduce noise from both opencl and cuda, and because
238                # python kernels use platform="dll".
239                if self.platform == "dll":
240                    missing = []
241                    ## Uncomment the following to require test cases
242                    #missing = self._find_missing_tests()
243                    if missing:
244                        raise ValueError("Missing tests for "+", ".join(missing))
245
246            except:
247                annotate_exception(self.test_name)
248                raise
249
250        def _find_missing_tests(self):
251            # type: () -> None
252            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
253            model_has_VR = callable(self.info.VR)
254            model_has_ER = callable(self.info.ER)
255            model_has_1D = True
256            model_has_2D = any(p.type == 'orientation'
257                               for p in self.info.parameters.kernel_parameters)
258
259            # Lists of tests that have a result that is not None
260            single = [test for test in self.info.tests
261                      if not isinstance(test[2], list) and test[2] is not None]
262            tests_has_VR = any(test[1] == 'VR' for test in single)
263            tests_has_ER = any(test[1] == 'ER' for test in single)
264            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
265            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
266
267            multiple = [test for test in self.info.tests
268                        if isinstance(test[2], list)
269                        and not all(result is None for result in test[2])]
270            tests_has_1D_multiple = any(isinstance(test[1][0], float)
271                                        for test in multiple)
272            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
273                                        for test in multiple)
274
275            missing = []
276            if model_has_VR and not tests_has_VR:
277                missing.append("VR")
278            if model_has_ER and not tests_has_ER:
279                missing.append("ER")
280            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
281                missing.append("1D")
282            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
283                missing.append("2D")
284
285            return missing
286
287        def run_one(self, model, test):
288            # type: (KernelModel, TestCondition) -> None
289            """Run a single test case."""
290            user_pars, x, y = test
291            pars = expand_pars(self.info.parameters, user_pars)
292            invalid = invalid_pars(self.info.parameters, pars)
293            if invalid:
294                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
295
296            if not isinstance(y, list):
297                y = [y]
298            if not isinstance(x, list):
299                x = [x]
300
301            self.assertEqual(len(y), len(x))
302
303            if x[0] == 'ER':
304                actual = np.array([call_ER(model.info, pars)])
305            elif x[0] == 'VR':
306                actual = np.array([call_VR(model.info, pars)])
307            elif isinstance(x[0], tuple):
308                qx, qy = zip(*x)
309                q_vectors = [np.array(qx), np.array(qy)]
310                kernel = model.make_kernel(q_vectors)
311                actual = call_kernel(kernel, pars)
312            else:
313                q_vectors = [np.array(x)]
314                kernel = model.make_kernel(q_vectors)
315                actual = call_kernel(kernel, pars)
316
317            self.assertTrue(len(actual) > 0)
318            self.assertEqual(len(y), len(actual))
319
320            for xi, yi, actual_yi in zip(x, y, actual):
321                if yi is None:
322                    # smoke test --- make sure it runs and produces a value
323                    self.assertTrue(not np.isnan(actual_yi),
324                                    'invalid f(%s): %s' % (xi, actual_yi))
325                elif np.isnan(yi):
326                    self.assertTrue(np.isnan(actual_yi),
327                                    'f(%s): expected:%s; actual:%s'
328                                    % (xi, yi, actual_yi))
329                else:
330                    # is_near does not work for infinite values, so also test
331                    # for exact values.  Note that this will not
332                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
333                                    'f(%s); expected:%s; actual:%s'
334                                    % (xi, yi, actual_yi))
335            return actual
336
337    return ModelTestCase
338
339def invalid_pars(partable, pars):
340    # type: (ParameterTable, Dict[str, float])
341    """
342    Return a list of parameter names that are not part of the model.
343    """
344    names = set(p.id for p in partable.call_parameters)
345    invalid = []
346    for par in sorted(pars.keys()):
347        parts = par.split('_pd')
348        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
349            invalid.append(par)
350            continue
351        if parts[0] not in names:
352            invalid.append(par)
353    return invalid
354
355
356def is_near(target, actual, digits=5):
357    # type: (float, float, int) -> bool
358    """
359    Returns true if *actual* is within *digits* significant digits of *target*.
360    """
361    import math
362    shift = 10**math.ceil(math.log10(abs(target)))
363    return abs(target-actual)/shift < 1.5*10**-digits
364
365def run_one(model):
366    # type: (str) -> str
367    """
368    Run the tests for a single model, printing the results to stdout.
369
370    *model* can by a python file, which is handy for checking user defined
371    plugin models.
372    """
373    # Note that running main() directly did not work from within the
374    # wxPython pycrust console.  Instead of the results appearing in the
375    # window they were printed to the underlying console.
376    from unittest.runner import TextTestResult, _WritelnDecorator
377
378    # Build a object to capture and print the test results
379    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
380    verbosity = 2
381    descriptions = True
382    result = TextTestResult(stream, descriptions, verbosity)
383
384    # Build a test suite containing just the model
385    loader = 'opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll'
386    models = [model]
387    try:
388        suite = make_suite([loader], models)
389    except Exception:
390        import traceback
391        stream.writeln(traceback.format_exc())
392        return
393
394    # Warn if there are no user defined tests.
395    # Note: the test suite constructed above only has one test in it, which
396    # runs through some smoke tests to make sure the model runs, then runs
397    # through the input-output pairs given in the model definition file.  To
398    # check if any such pairs are defined, therefore, we just need to check if
399    # they are in the first test of the test suite.  We do this with an
400    # iterator since we don't have direct access to the list of tests in the
401    # test suite.
402    # In Qt5 suite.run() will clear all tests in the suite after running
403    # with no way of retaining them for the test below, so let's check
404    # for user tests before running the suite.
405    for test in suite:
406        if not test.info.tests:
407            stream.writeln("Note: %s has no user defined tests."%model)
408        break
409    else:
410        stream.writeln("Note: no test suite created --- this should never happen")
411
412    # Run the test suite
413    suite.run(result)
414
415    # Print the failures and errors
416    for _, tb in result.errors:
417        stream.writeln(tb)
418    for _, tb in result.failures:
419        stream.writeln(tb)
420
421    output = stream.getvalue()
422    stream.close()
423    return output
424
425
426def main(*models):
427    # type: (*str) -> int
428    """
429    Run tests given is models.
430
431    Returns 0 if success or 1 if any tests fail.
432    """
433    try:
434        from xmlrunner import XMLTestRunner as TestRunner
435        test_args = {'output': 'logs'}
436    except ImportError:
437        from unittest import TextTestRunner as TestRunner
438        test_args = {}
439
440    if models and models[0] == '-v':
441        verbosity = 2
442        models = models[1:]
443    else:
444        verbosity = 1
445    if models and models[0] == 'opencl':
446        if not use_opencl():
447            print("opencl is not available")
448            return 1
449        loaders = ['opencl']
450        models = models[1:]
451    elif models and models[0] == 'cuda':
452        if not use_cuda():
453            print("cuda is not available")
454            return 1
455        loaders = ['cuda']
456        models = models[1:]
457    elif models and models[0] == 'dll':
458        # TODO: test if compiler is available?
459        loaders = ['dll']
460        models = models[1:]
461    else:
462        loaders = ['dll']
463        if use_opencl():
464            loaders.append('opencl')
465        if use_cuda():
466            loaders.append('cuda')
467    if not models:
468        print("""\
469usage:
470  python -m sasmodels.model_test [-v] [opencl|cuda|dll] model1 model2 ...
471
472If -v is included on the command line, then use verbose output.
473
474If no platform is specified, then models will be tested with dll, and
475if available, OpenCL and CUDA; the compute target is ignored for pure python models.
476
477If model1 is 'all', then all except the remaining models will be tested.
478
479""")
480
481        return 1
482
483    runner = TestRunner(verbosity=verbosity, **test_args)
484    result = runner.run(make_suite(loaders, models))
485    return 1 if result.failures or result.errors else 0
486
487
488def model_tests():
489    # type: () -> Iterator[Callable[[], None]]
490    """
491    Test runner visible to nosetests.
492
493    Run "nosetests sasmodels" on the command line to invoke it.
494    """
495    loaders = ['dll']
496    if use_opencl():
497        loaders.append('opencl')
498    if use_cuda():
499        loaders.append('cuda')
500    tests = make_suite(loaders, ['all'])
501    def build_test(test):
502        # In order for nosetest to show the test name, wrap the test.run_all
503        # instance in function that takes the test name as a parameter which
504        # will be displayed when the test is run.  Do this as a function so
505        # that it properly captures the context for tests that captured and
506        # run later.  If done directly in the for loop, then the looping
507        # variable test will be shared amongst all the tests, and we will be
508        # repeatedly testing vesicle.
509
510        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
511        # requires that the test.description field be set.
512        wrap = lambda: test.run_all()
513        wrap.description = test.test_name
514        return wrap
515        # The following would work with nosetests and pytest:
516        #     return lambda name: test.run_all(), test.test_name
517
518    for test in tests:
519        yield build_test(test)
520
521
522if __name__ == "__main__":
523    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.