source: sasmodels/sasmodels/model_test.py @ 8f04da4

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 8f04da4 was bb4b509, checked in by Paul Kienzle <pkienzle@…>, 7 years ago

check parameter names as part of test; PEP8 cleanup

  • Property mode set to 100644
File size: 17.8 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50import numpy as np  # type: ignore
51
52from . import core
53from .core import list_models, load_model_info, build_model
54from .direct_model import call_kernel, call_ER, call_VR
55from .exception import annotate_exception
56from .modelinfo import expand_pars
57
58try:
59    from typing import List, Iterator, Callable
60except ImportError:
61    pass
62else:
63    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
64    from .kernel import KernelModel
65
66
67def make_suite(loaders, models):
68    # type: (List[str], List[str]) -> unittest.TestSuite
69    """
70    Construct the pyunit test suite.
71
72    *loaders* is the list of kernel drivers to use, which is one of
73    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
74    the python driver is always used.
75
76    *models* is the list of models to test, or *["all"]* to test all models.
77    """
78    ModelTestCase = _hide_model_case_from_nose()
79    suite = unittest.TestSuite()
80
81    if models[0] == 'all':
82        skip = models[1:]
83        models = list_models()
84    else:
85        skip = []
86    for model_name in models:
87        if model_name in skip:
88            continue
89        model_info = load_model_info(model_name)
90
91        #print('------')
92        #print('found tests in', model_name)
93        #print('------')
94
95        # if ispy then use the dll loader to call pykernel
96        # don't try to call cl kernel since it will not be
97        # available in some environmentes.
98        is_py = callable(model_info.Iq)
99
100        # Some OpenCL drivers seem to be flaky, and are not producing the
101        # expected result.  Since we don't have known test values yet for
102        # all of our models, we are instead going to compare the results
103        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
104        # parameters just to see that the model runs to completion) between
105        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
106        # shared between OpenCL and DLL tests.  This is just a list.  If the
107        # list is empty (which it will be when DLL runs, if the DLL runs
108        # first), then the results are appended to the list.  If the list
109        # is not empty (which it will be when OpenCL runs second), the results
110        # are compared to the results stored in the first element of the list.
111        # This is a horrible stateful hack which only makes sense because the
112        # test suite is thrown away after being run once.
113        stash = []
114
115        if is_py:  # kernel implemented in python
116            test_name = "Model: %s, Kernel: python"%model_name
117            test_method_name = "test_%s_python" % model_info.id
118            test = ModelTestCase(test_name, model_info,
119                                 test_method_name,
120                                 platform="dll",  # so that
121                                 dtype="double",
122                                 stash=stash)
123            suite.addTest(test)
124        else:   # kernel implemented in C
125
126            # test using dll if desired
127            if 'dll' in loaders or not core.HAVE_OPENCL:
128                test_name = "Model: %s, Kernel: dll"%model_name
129                test_method_name = "test_%s_dll" % model_info.id
130                test = ModelTestCase(test_name, model_info,
131                                     test_method_name,
132                                     platform="dll",
133                                     dtype="double",
134                                     stash=stash)
135                suite.addTest(test)
136
137            # test using opencl if desired and available
138            if 'opencl' in loaders and core.HAVE_OPENCL:
139                test_name = "Model: %s, Kernel: OpenCL"%model_name
140                test_method_name = "test_%s_opencl" % model_info.id
141                # Using dtype=None so that the models that are only
142                # correct for double precision are not tested using
143                # single precision.  The choice is determined by the
144                # presence of *single=False* in the model file.
145                test = ModelTestCase(test_name, model_info,
146                                     test_method_name,
147                                     platform="ocl", dtype=None,
148                                     stash=stash)
149                #print("defining", test_name)
150                suite.addTest(test)
151
152    return suite
153
154
155def _hide_model_case_from_nose():
156    # type: () -> type
157    class ModelTestCase(unittest.TestCase):
158        """
159        Test suit for a particular model with a particular kernel driver.
160
161        The test suite runs a simple smoke test to make sure the model
162        functions, then runs the list of tests at the bottom of the model
163        description file.
164        """
165        def __init__(self, test_name, model_info, test_method_name,
166                     platform, dtype, stash):
167            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
168            self.test_name = test_name
169            self.info = model_info
170            self.platform = platform
171            self.dtype = dtype
172            self.stash = stash  # container for the results of the first run
173
174            setattr(self, test_method_name, self.run_all)
175            unittest.TestCase.__init__(self, test_method_name)
176
177        def run_all(self):
178            # type: () -> None
179            """
180            Run all the tests in the test suite, including smoke tests.
181            """
182            smoke_tests = [
183                # test validity at reasonable values
184                ({}, 0.1, None),
185                ({}, (0.1, 0.1), None),
186                # test validity at q = 0
187                #({}, 0.0, None),
188                #({}, (0.0, 0.0), None),
189                # test vector form
190                ({}, [0.001, 0.01, 0.1], [None]*3),
191                ({}, [(0.1, 0.1)]*2, [None]*2),
192                # test that ER/VR will run if they exist
193                ({}, 'ER', None),
194                ({}, 'VR', None),
195                ]
196
197            tests = smoke_tests + self.info.tests
198            try:
199                model = build_model(self.info, dtype=self.dtype,
200                                    platform=self.platform)
201                results = [self.run_one(model, test) for test in tests]
202                if self.stash:
203                    for test, target, actual in zip(tests, self.stash[0], results):
204                        assert np.all(abs(target-actual) < 5e-5*abs(actual)),\
205                            "GPU/CPU comparison expected %s but got %s for %s"%(target, actual, test[0])
206                else:
207                    self.stash.append(results)
208
209                # Check for missing tests.  Only do so for the "dll" tests
210                # to reduce noise from both opencl and dll, and because
211                # python kernels use platform="dll".
212                if self.platform == "dll":
213                    missing = []
214                    ## Uncomment the following to require test cases
215                    #missing = self._find_missing_tests()
216                    if missing:
217                        raise ValueError("Missing tests for "+", ".join(missing))
218
219            except:
220                annotate_exception(self.test_name)
221                raise
222
223        def _find_missing_tests(self):
224            # type: () -> None
225            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
226            model_has_VR = callable(self.info.VR)
227            model_has_ER = callable(self.info.ER)
228            model_has_1D = True
229            model_has_2D = any(p.type == 'orientation'
230                               for p in self.info.parameters.kernel_parameters)
231
232            # Lists of tests that have a result that is not None
233            single = [test for test in self.info.tests
234                      if not isinstance(test[2], list) and test[2] is not None]
235            tests_has_VR = any(test[1] == 'VR' for test in single)
236            tests_has_ER = any(test[1] == 'ER' for test in single)
237            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
238            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
239
240            multiple = [test for test in self.info.tests
241                        if isinstance(test[2], list)
242                        and not all(result is None for result in test[2])]
243            tests_has_1D_multiple = any(isinstance(test[1][0], float)
244                                        for test in multiple)
245            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
246                                        for test in multiple)
247
248            missing = []
249            if model_has_VR and not tests_has_VR:
250                missing.append("VR")
251            if model_has_ER and not tests_has_ER:
252                missing.append("ER")
253            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
254                missing.append("1D")
255            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
256                missing.append("2D")
257
258            return missing
259
260        def run_one(self, model, test):
261            # type: (KernelModel, TestCondition) -> None
262            """Run a single test case."""
263            user_pars, x, y = test
264            pars = expand_pars(self.info.parameters, user_pars)
265            invalid = invalid_pars(self.info.parameters, pars)
266            if invalid:
267                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
268
269            if not isinstance(y, list):
270                y = [y]
271            if not isinstance(x, list):
272                x = [x]
273
274            self.assertEqual(len(y), len(x))
275
276            if x[0] == 'ER':
277                actual = np.array([call_ER(model.info, pars)])
278            elif x[0] == 'VR':
279                actual = np.array([call_VR(model.info, pars)])
280            elif isinstance(x[0], tuple):
281                qx, qy = zip(*x)
282                q_vectors = [np.array(qx), np.array(qy)]
283                kernel = model.make_kernel(q_vectors)
284                actual = call_kernel(kernel, pars)
285            else:
286                q_vectors = [np.array(x)]
287                kernel = model.make_kernel(q_vectors)
288                actual = call_kernel(kernel, pars)
289
290            self.assertTrue(len(actual) > 0)
291            self.assertEqual(len(y), len(actual))
292
293            for xi, yi, actual_yi in zip(x, y, actual):
294                if yi is None:
295                    # smoke test --- make sure it runs and produces a value
296                    self.assertTrue(not np.isnan(actual_yi),
297                                    'invalid f(%s): %s' % (xi, actual_yi))
298                elif np.isnan(yi):
299                    self.assertTrue(np.isnan(actual_yi),
300                                    'f(%s): expected:%s; actual:%s'
301                                    % (xi, yi, actual_yi))
302                else:
303                    # is_near does not work for infinite values, so also test
304                    # for exact values.  Note that this will not
305                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
306                                    'f(%s); expected:%s; actual:%s'
307                                    % (xi, yi, actual_yi))
308            return actual
309
310    return ModelTestCase
311
312def invalid_pars(partable, pars):
313    # type: (ParameterTable, Dict[str, float])
314    """
315    Return a list of parameter names that are not part of the model.
316    """
317    names = set(p.id for p in partable.call_parameters)
318    invalid = []
319    for par in sorted(pars.keys()):
320        parts = par.split('_pd')
321        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
322            invalid.append(par)
323            continue
324        if parts[0] not in names:
325            invalid.append(par)
326    return invalid
327
328
329def is_near(target, actual, digits=5):
330    # type: (float, float, int) -> bool
331    """
332    Returns true if *actual* is within *digits* significant digits of *target*.
333    """
334    import math
335    shift = 10**math.ceil(math.log10(abs(target)))
336    return abs(target-actual)/shift < 1.5*10**-digits
337
338def run_one(model):
339    # type: (str) -> None
340    """
341    Run the tests for a single model, printing the results to stdout.
342
343    *model* can by a python file, which is handy for checking user defined
344    plugin models.
345    """
346    # Note that running main() directly did not work from within the
347    # wxPython pycrust console.  Instead of the results appearing in the
348    # window they were printed to the underlying console.
349    from unittest.runner import TextTestResult, _WritelnDecorator
350
351    # Build a object to capture and print the test results
352    stream = _WritelnDecorator(sys.stdout)  # Add writeln() method to stream
353    verbosity = 2
354    descriptions = True
355    result = TextTestResult(stream, descriptions, verbosity)
356
357    # Build a test suite containing just the model
358    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
359    models = [model]
360    try:
361        suite = make_suite(loaders, models)
362    except Exception:
363        import traceback
364        stream.writeln(traceback.format_exc())
365        return
366
367    # Run the test suite
368    suite.run(result)
369
370    # Print the failures and errors
371    for _, tb in result.errors:
372        stream.writeln(tb)
373    for _, tb in result.failures:
374        stream.writeln(tb)
375
376    # Warn if there are no user defined tests.
377    # Note: the test suite constructed above only has one test in it, which
378    # runs through some smoke tests to make sure the model runs, then runs
379    # through the input-output pairs given in the model definition file.  To
380    # check if any such pairs are defined, therefore, we just need to check if
381    # they are in the first test of the test suite.  We do this with an
382    # iterator since we don't have direct access to the list of tests in the
383    # test suite.
384    for test in suite:
385        if not test.info.tests:
386            stream.writeln("Note: %s has no user defined tests."%model)
387        break
388    else:
389        stream.writeln("Note: no test suite created --- this should never happen")
390
391
392def main(*models):
393    # type: (*str) -> int
394    """
395    Run tests given is models.
396
397    Returns 0 if success or 1 if any tests fail.
398    """
399    try:
400        from xmlrunner import XMLTestRunner as TestRunner
401        test_args = {'output': 'logs'}
402    except ImportError:
403        from unittest import TextTestRunner as TestRunner
404        test_args = {}
405
406    if models and models[0] == '-v':
407        verbosity = 2
408        models = models[1:]
409    else:
410        verbosity = 1
411    if models and models[0] == 'opencl':
412        if not core.HAVE_OPENCL:
413            print("opencl is not available")
414            return 1
415        loaders = ['opencl']
416        models = models[1:]
417    elif models and models[0] == 'dll':
418        # TODO: test if compiler is available?
419        loaders = ['dll']
420        models = models[1:]
421    elif models and models[0] == 'opencl_and_dll':
422        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
423        models = models[1:]
424    else:
425        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
426    if not models:
427        print("""\
428usage:
429  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
430
431If -v is included on the command line, then use verbose output.
432
433If neither opencl nor dll is specified, then models will be tested with
434both OpenCL and dll; the compute target is ignored for pure python models.
435
436If model1 is 'all', then all except the remaining models will be tested.
437
438""")
439
440        return 1
441
442    runner = TestRunner(verbosity=verbosity, **test_args)
443    result = runner.run(make_suite(loaders, models))
444    return 1 if result.failures or result.errors else 0
445
446
447def model_tests():
448    # type: () -> Iterator[Callable[[], None]]
449    """
450    Test runner visible to nosetests.
451
452    Run "nosetests sasmodels" on the command line to invoke it.
453    """
454    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
455    tests = make_suite(loaders, ['all'])
456    for test_i in tests:
457        # In order for nosetest to see the correct test name, need to set
458        # the description attribute of the returned function.  Since we
459        # can't do this for the returned instance, wrap it in a lambda and
460        # set the description on the lambda.  Otherwise we could just do:
461        #    yield test_i.run_all
462        L = lambda: test_i.run_all()
463        L.description = test_i.test_name
464        yield L
465
466
467if __name__ == "__main__":
468    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.