source: sasmodels/sasmodels/model_test.py @ 20fe0cd

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 20fe0cd was 20fe0cd, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

move paracrystal integration tests with the rest of the non-rotationally symmetric tests

  • Property mode set to 100644
File size: 18.2 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50try:
51    from StringIO import StringIO
52except ImportError:
53    # StringIO.StringIO renamed to io.StringIO in Python 3
54    # Note: io.StringIO exists in python 2, but using unicode instead of str
55    from io import StringIO
56
57import numpy as np  # type: ignore
58
59from . import core
60from .core import list_models, load_model_info, build_model
61from .direct_model import call_kernel, call_ER, call_VR
62from .exception import annotate_exception
63from .modelinfo import expand_pars
64
65try:
66    from typing import List, Iterator, Callable
67except ImportError:
68    pass
69else:
70    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
71    from .kernel import KernelModel
72
73
74def make_suite(loaders, models):
75    # type: (List[str], List[str]) -> unittest.TestSuite
76    """
77    Construct the pyunit test suite.
78
79    *loaders* is the list of kernel drivers to use, which is one of
80    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
81    the python driver is always used.
82
83    *models* is the list of models to test, or *["all"]* to test all models.
84    """
85    ModelTestCase = _hide_model_case_from_nose()
86    suite = unittest.TestSuite()
87
88    if models[0] in core.KINDS:
89        skip = models[1:]
90        models = list_models(models[0])
91    else:
92        skip = []
93    for model_name in models:
94        if model_name in skip:
95            continue
96        model_info = load_model_info(model_name)
97
98        #print('------')
99        #print('found tests in', model_name)
100        #print('------')
101
102        # if ispy then use the dll loader to call pykernel
103        # don't try to call cl kernel since it will not be
104        # available in some environmentes.
105        is_py = callable(model_info.Iq)
106
107        # Some OpenCL drivers seem to be flaky, and are not producing the
108        # expected result.  Since we don't have known test values yet for
109        # all of our models, we are instead going to compare the results
110        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
111        # parameters just to see that the model runs to completion) between
112        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
113        # shared between OpenCL and DLL tests.  This is just a list.  If the
114        # list is empty (which it will be when DLL runs, if the DLL runs
115        # first), then the results are appended to the list.  If the list
116        # is not empty (which it will be when OpenCL runs second), the results
117        # are compared to the results stored in the first element of the list.
118        # This is a horrible stateful hack which only makes sense because the
119        # test suite is thrown away after being run once.
120        stash = []
121
122        if is_py:  # kernel implemented in python
123            test_name = "Model: %s, Kernel: python"%model_name
124            test_method_name = "test_%s_python" % model_info.id
125            test = ModelTestCase(test_name, model_info,
126                                 test_method_name,
127                                 platform="dll",  # so that
128                                 dtype="double",
129                                 stash=stash)
130            suite.addTest(test)
131        else:   # kernel implemented in C
132
133            # test using dll if desired
134            if 'dll' in loaders or not core.HAVE_OPENCL:
135                test_name = "Model: %s, Kernel: dll"%model_name
136                test_method_name = "test_%s_dll" % model_info.id
137                test = ModelTestCase(test_name, model_info,
138                                     test_method_name,
139                                     platform="dll",
140                                     dtype="double",
141                                     stash=stash)
142                suite.addTest(test)
143
144            # test using opencl if desired and available
145            if 'opencl' in loaders and core.HAVE_OPENCL:
146                test_name = "Model: %s, Kernel: OpenCL"%model_name
147                test_method_name = "test_%s_opencl" % model_info.id
148                # Using dtype=None so that the models that are only
149                # correct for double precision are not tested using
150                # single precision.  The choice is determined by the
151                # presence of *single=False* in the model file.
152                test = ModelTestCase(test_name, model_info,
153                                     test_method_name,
154                                     platform="ocl", dtype=None,
155                                     stash=stash)
156                #print("defining", test_name)
157                suite.addTest(test)
158
159    return suite
160
161
162def _hide_model_case_from_nose():
163    # type: () -> type
164    class ModelTestCase(unittest.TestCase):
165        """
166        Test suit for a particular model with a particular kernel driver.
167
168        The test suite runs a simple smoke test to make sure the model
169        functions, then runs the list of tests at the bottom of the model
170        description file.
171        """
172        def __init__(self, test_name, model_info, test_method_name,
173                     platform, dtype, stash):
174            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
175            self.test_name = test_name
176            self.info = model_info
177            self.platform = platform
178            self.dtype = dtype
179            self.stash = stash  # container for the results of the first run
180
181            setattr(self, test_method_name, self.run_all)
182            unittest.TestCase.__init__(self, test_method_name)
183
184        def run_all(self):
185            # type: () -> None
186            """
187            Run all the tests in the test suite, including smoke tests.
188            """
189            smoke_tests = [
190                # test validity at reasonable values
191                ({}, 0.1, None),
192                ({}, (0.1, 0.1), None),
193                # test validity at q = 0
194                #({}, 0.0, None),
195                #({}, (0.0, 0.0), None),
196                # test vector form
197                ({}, [0.001, 0.01, 0.1], [None]*3),
198                ({}, [(0.1, 0.1)]*2, [None]*2),
199                # test that ER/VR will run if they exist
200                ({}, 'ER', None),
201                ({}, 'VR', None),
202                ]
203            tests = smoke_tests
204            #tests = []
205            if self.info.tests is not None:
206                tests += self.info.tests
207            try:
208                model = build_model(self.info, dtype=self.dtype,
209                                    platform=self.platform)
210                results = [self.run_one(model, test) for test in tests]
211                if self.stash:
212                    for test, target, actual in zip(tests, self.stash[0], results):
213                        assert np.all(abs(target-actual) < 5e-5*abs(actual)),\
214                            "GPU/CPU comparison expected %s but got %s for %s"%(target, actual, test[0])
215                else:
216                    self.stash.append(results)
217
218                # Check for missing tests.  Only do so for the "dll" tests
219                # to reduce noise from both opencl and dll, and because
220                # python kernels use platform="dll".
221                if self.platform == "dll":
222                    missing = []
223                    ## Uncomment the following to require test cases
224                    #missing = self._find_missing_tests()
225                    if missing:
226                        raise ValueError("Missing tests for "+", ".join(missing))
227
228            except:
229                annotate_exception(self.test_name)
230                raise
231
232        def _find_missing_tests(self):
233            # type: () -> None
234            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
235            model_has_VR = callable(self.info.VR)
236            model_has_ER = callable(self.info.ER)
237            model_has_1D = True
238            model_has_2D = any(p.type == 'orientation'
239                               for p in self.info.parameters.kernel_parameters)
240
241            # Lists of tests that have a result that is not None
242            single = [test for test in self.info.tests
243                      if not isinstance(test[2], list) and test[2] is not None]
244            tests_has_VR = any(test[1] == 'VR' for test in single)
245            tests_has_ER = any(test[1] == 'ER' for test in single)
246            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
247            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
248
249            multiple = [test for test in self.info.tests
250                        if isinstance(test[2], list)
251                        and not all(result is None for result in test[2])]
252            tests_has_1D_multiple = any(isinstance(test[1][0], float)
253                                        for test in multiple)
254            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
255                                        for test in multiple)
256
257            missing = []
258            if model_has_VR and not tests_has_VR:
259                missing.append("VR")
260            if model_has_ER and not tests_has_ER:
261                missing.append("ER")
262            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
263                missing.append("1D")
264            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
265                missing.append("2D")
266
267            return missing
268
269        def run_one(self, model, test):
270            # type: (KernelModel, TestCondition) -> None
271            """Run a single test case."""
272            user_pars, x, y = test
273            pars = expand_pars(self.info.parameters, user_pars)
274            invalid = invalid_pars(self.info.parameters, pars)
275            if invalid:
276                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
277
278            if not isinstance(y, list):
279                y = [y]
280            if not isinstance(x, list):
281                x = [x]
282
283            self.assertEqual(len(y), len(x))
284
285            if x[0] == 'ER':
286                actual = np.array([call_ER(model.info, pars)])
287            elif x[0] == 'VR':
288                actual = np.array([call_VR(model.info, pars)])
289            elif isinstance(x[0], tuple):
290                qx, qy = zip(*x)
291                q_vectors = [np.array(qx), np.array(qy)]
292                kernel = model.make_kernel(q_vectors)
293                actual = call_kernel(kernel, pars)
294            else:
295                q_vectors = [np.array(x)]
296                kernel = model.make_kernel(q_vectors)
297                actual = call_kernel(kernel, pars)
298
299            self.assertTrue(len(actual) > 0)
300            self.assertEqual(len(y), len(actual))
301
302            for xi, yi, actual_yi in zip(x, y, actual):
303                if yi is None:
304                    # smoke test --- make sure it runs and produces a value
305                    self.assertTrue(not np.isnan(actual_yi),
306                                    'invalid f(%s): %s' % (xi, actual_yi))
307                elif np.isnan(yi):
308                    self.assertTrue(np.isnan(actual_yi),
309                                    'f(%s): expected:%s; actual:%s'
310                                    % (xi, yi, actual_yi))
311                else:
312                    # is_near does not work for infinite values, so also test
313                    # for exact values.  Note that this will not
314                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
315                                    'f(%s); expected:%s; actual:%s'
316                                    % (xi, yi, actual_yi))
317            return actual
318
319    return ModelTestCase
320
321def invalid_pars(partable, pars):
322    # type: (ParameterTable, Dict[str, float])
323    """
324    Return a list of parameter names that are not part of the model.
325    """
326    names = set(p.id for p in partable.call_parameters)
327    invalid = []
328    for par in sorted(pars.keys()):
329        parts = par.split('_pd')
330        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
331            invalid.append(par)
332            continue
333        if parts[0] not in names:
334            invalid.append(par)
335    return invalid
336
337
338def is_near(target, actual, digits=5):
339    # type: (float, float, int) -> bool
340    """
341    Returns true if *actual* is within *digits* significant digits of *target*.
342    """
343    import math
344    shift = 10**math.ceil(math.log10(abs(target)))
345    return abs(target-actual)/shift < 1.5*10**-digits
346
347def run_one(model):
348    # type: (str) -> str
349    """
350    Run the tests for a single model, printing the results to stdout.
351
352    *model* can by a python file, which is handy for checking user defined
353    plugin models.
354    """
355    # Note that running main() directly did not work from within the
356    # wxPython pycrust console.  Instead of the results appearing in the
357    # window they were printed to the underlying console.
358    from unittest.runner import TextTestResult, _WritelnDecorator
359
360    # Build a object to capture and print the test results
361    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
362    verbosity = 2
363    descriptions = True
364    result = TextTestResult(stream, descriptions, verbosity)
365
366    # Build a test suite containing just the model
367    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
368    models = [model]
369    try:
370        suite = make_suite(loaders, models)
371    except Exception:
372        import traceback
373        stream.writeln(traceback.format_exc())
374        return
375    # Run the test suite
376    suite.run(result)
377
378    # Print the failures and errors
379    for _, tb in result.errors:
380        stream.writeln(tb)
381    for _, tb in result.failures:
382        stream.writeln(tb)
383
384    # Warn if there are no user defined tests.
385    # Note: the test suite constructed above only has one test in it, which
386    # runs through some smoke tests to make sure the model runs, then runs
387    # through the input-output pairs given in the model definition file.  To
388    # check if any such pairs are defined, therefore, we just need to check if
389    # they are in the first test of the test suite.  We do this with an
390    # iterator since we don't have direct access to the list of tests in the
391    # test suite.
392    for test in suite:
393        if not test.info.tests:
394            stream.writeln("Note: %s has no user defined tests."%model)
395        break
396    else:
397        stream.writeln("Note: no test suite created --- this should never happen")
398
399    output = stream.getvalue()
400    stream.close()
401    return output
402
403
404def main(*models):
405    # type: (*str) -> int
406    """
407    Run tests given is models.
408
409    Returns 0 if success or 1 if any tests fail.
410    """
411    try:
412        from xmlrunner import XMLTestRunner as TestRunner
413        test_args = {'output': 'logs'}
414    except ImportError:
415        from unittest import TextTestRunner as TestRunner
416        test_args = {}
417
418    if models and models[0] == '-v':
419        verbosity = 2
420        models = models[1:]
421    else:
422        verbosity = 1
423    if models and models[0] == 'opencl':
424        if not core.HAVE_OPENCL:
425            print("opencl is not available")
426            return 1
427        loaders = ['opencl']
428        models = models[1:]
429    elif models and models[0] == 'dll':
430        # TODO: test if compiler is available?
431        loaders = ['dll']
432        models = models[1:]
433    elif models and models[0] == 'opencl_and_dll':
434        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
435        models = models[1:]
436    else:
437        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
438    if not models:
439        print("""\
440usage:
441  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
442
443If -v is included on the command line, then use verbose output.
444
445If neither opencl nor dll is specified, then models will be tested with
446both OpenCL and dll; the compute target is ignored for pure python models.
447
448If model1 is 'all', then all except the remaining models will be tested.
449
450""")
451
452        return 1
453
454    runner = TestRunner(verbosity=verbosity, **test_args)
455    result = runner.run(make_suite(loaders, models))
456    return 1 if result.failures or result.errors else 0
457
458
459def model_tests():
460    # type: () -> Iterator[Callable[[], None]]
461    """
462    Test runner visible to nosetests.
463
464    Run "nosetests sasmodels" on the command line to invoke it.
465    """
466    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
467    tests = make_suite(loaders, ['all'])
468    for test_i in tests:
469        # In order for nosetest to see the correct test name, need to set
470        # the description attribute of the returned function.  Since we
471        # can't do this for the returned instance, wrap it in a lambda and
472        # set the description on the lambda.  Otherwise we could just do:
473        #    yield test_i.run_all
474        L = lambda: test_i.run_all()
475        L.description = test_i.test_name
476        yield L
477
478
479if __name__ == "__main__":
480    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.