source: sasmodels/sasmodels/model_test.py @ f2cbeb7

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since f2cbeb7 was f2cbeb7, checked in by lewis, 7 years ago

Return unit test output as string in check_model

  • Property mode set to 100644
File size: 17.9 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49from StringIO import StringIO
50
51import numpy as np  # type: ignore
52
53from . import core
54from .core import list_models, load_model_info, build_model
55from .direct_model import call_kernel, call_ER, call_VR
56from .exception import annotate_exception
57from .modelinfo import expand_pars
58
59try:
60    from typing import List, Iterator, Callable
61except ImportError:
62    pass
63else:
64    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
65    from .kernel import KernelModel
66
67
68def make_suite(loaders, models):
69    # type: (List[str], List[str]) -> unittest.TestSuite
70    """
71    Construct the pyunit test suite.
72
73    *loaders* is the list of kernel drivers to use, which is one of
74    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
75    the python driver is always used.
76
77    *models* is the list of models to test, or *["all"]* to test all models.
78    """
79    ModelTestCase = _hide_model_case_from_nose()
80    suite = unittest.TestSuite()
81
82    if models[0] == 'all':
83        skip = models[1:]
84        models = list_models()
85    else:
86        skip = []
87    for model_name in models:
88        if model_name in skip:
89            continue
90        model_info = load_model_info(model_name)
91
92        #print('------')
93        #print('found tests in', model_name)
94        #print('------')
95
96        # if ispy then use the dll loader to call pykernel
97        # don't try to call cl kernel since it will not be
98        # available in some environmentes.
99        is_py = callable(model_info.Iq)
100
101        # Some OpenCL drivers seem to be flaky, and are not producing the
102        # expected result.  Since we don't have known test values yet for
103        # all of our models, we are instead going to compare the results
104        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
105        # parameters just to see that the model runs to completion) between
106        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
107        # shared between OpenCL and DLL tests.  This is just a list.  If the
108        # list is empty (which it will be when DLL runs, if the DLL runs
109        # first), then the results are appended to the list.  If the list
110        # is not empty (which it will be when OpenCL runs second), the results
111        # are compared to the results stored in the first element of the list.
112        # This is a horrible stateful hack which only makes sense because the
113        # test suite is thrown away after being run once.
114        stash = []
115
116        if is_py:  # kernel implemented in python
117            test_name = "Model: %s, Kernel: python"%model_name
118            test_method_name = "test_%s_python" % model_info.id
119            test = ModelTestCase(test_name, model_info,
120                                 test_method_name,
121                                 platform="dll",  # so that
122                                 dtype="double",
123                                 stash=stash)
124            suite.addTest(test)
125        else:   # kernel implemented in C
126
127            # test using dll if desired
128            if 'dll' in loaders or not core.HAVE_OPENCL:
129                test_name = "Model: %s, Kernel: dll"%model_name
130                test_method_name = "test_%s_dll" % model_info.id
131                test = ModelTestCase(test_name, model_info,
132                                     test_method_name,
133                                     platform="dll",
134                                     dtype="double",
135                                     stash=stash)
136                suite.addTest(test)
137
138            # test using opencl if desired and available
139            if 'opencl' in loaders and core.HAVE_OPENCL:
140                test_name = "Model: %s, Kernel: OpenCL"%model_name
141                test_method_name = "test_%s_opencl" % model_info.id
142                # Using dtype=None so that the models that are only
143                # correct for double precision are not tested using
144                # single precision.  The choice is determined by the
145                # presence of *single=False* in the model file.
146                test = ModelTestCase(test_name, model_info,
147                                     test_method_name,
148                                     platform="ocl", dtype=None,
149                                     stash=stash)
150                #print("defining", test_name)
151                suite.addTest(test)
152
153    return suite
154
155
156def _hide_model_case_from_nose():
157    # type: () -> type
158    class ModelTestCase(unittest.TestCase):
159        """
160        Test suit for a particular model with a particular kernel driver.
161
162        The test suite runs a simple smoke test to make sure the model
163        functions, then runs the list of tests at the bottom of the model
164        description file.
165        """
166        def __init__(self, test_name, model_info, test_method_name,
167                     platform, dtype, stash):
168            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
169            self.test_name = test_name
170            self.info = model_info
171            self.platform = platform
172            self.dtype = dtype
173            self.stash = stash  # container for the results of the first run
174
175            setattr(self, test_method_name, self.run_all)
176            unittest.TestCase.__init__(self, test_method_name)
177
178        def run_all(self):
179            # type: () -> None
180            """
181            Run all the tests in the test suite, including smoke tests.
182            """
183            smoke_tests = [
184                # test validity at reasonable values
185                ({}, 0.1, None),
186                ({}, (0.1, 0.1), None),
187                # test validity at q = 0
188                #({}, 0.0, None),
189                #({}, (0.0, 0.0), None),
190                # test vector form
191                ({}, [0.001, 0.01, 0.1], [None]*3),
192                ({}, [(0.1, 0.1)]*2, [None]*2),
193                # test that ER/VR will run if they exist
194                ({}, 'ER', None),
195                ({}, 'VR', None),
196                ]
197
198            tests = smoke_tests + self.info.tests
199            try:
200                model = build_model(self.info, dtype=self.dtype,
201                                    platform=self.platform)
202                results = [self.run_one(model, test) for test in tests]
203                if self.stash:
204                    for test, target, actual in zip(tests, self.stash[0], results):
205                        assert np.all(abs(target-actual) < 5e-5*abs(actual)),\
206                            "GPU/CPU comparison expected %s but got %s for %s"%(target, actual, test[0])
207                else:
208                    self.stash.append(results)
209
210                # Check for missing tests.  Only do so for the "dll" tests
211                # to reduce noise from both opencl and dll, and because
212                # python kernels use platform="dll".
213                if self.platform == "dll":
214                    missing = []
215                    ## Uncomment the following to require test cases
216                    #missing = self._find_missing_tests()
217                    if missing:
218                        raise ValueError("Missing tests for "+", ".join(missing))
219
220            except:
221                annotate_exception(self.test_name)
222                raise
223
224        def _find_missing_tests(self):
225            # type: () -> None
226            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
227            model_has_VR = callable(self.info.VR)
228            model_has_ER = callable(self.info.ER)
229            model_has_1D = True
230            model_has_2D = any(p.type == 'orientation'
231                               for p in self.info.parameters.kernel_parameters)
232
233            # Lists of tests that have a result that is not None
234            single = [test for test in self.info.tests
235                      if not isinstance(test[2], list) and test[2] is not None]
236            tests_has_VR = any(test[1] == 'VR' for test in single)
237            tests_has_ER = any(test[1] == 'ER' for test in single)
238            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
239            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
240
241            multiple = [test for test in self.info.tests
242                        if isinstance(test[2], list)
243                        and not all(result is None for result in test[2])]
244            tests_has_1D_multiple = any(isinstance(test[1][0], float)
245                                        for test in multiple)
246            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
247                                        for test in multiple)
248
249            missing = []
250            if model_has_VR and not tests_has_VR:
251                missing.append("VR")
252            if model_has_ER and not tests_has_ER:
253                missing.append("ER")
254            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
255                missing.append("1D")
256            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
257                missing.append("2D")
258
259            return missing
260
261        def run_one(self, model, test):
262            # type: (KernelModel, TestCondition) -> None
263            """Run a single test case."""
264            user_pars, x, y = test
265            pars = expand_pars(self.info.parameters, user_pars)
266            invalid = invalid_pars(self.info.parameters, pars)
267            if invalid:
268                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
269
270            if not isinstance(y, list):
271                y = [y]
272            if not isinstance(x, list):
273                x = [x]
274
275            self.assertEqual(len(y), len(x))
276
277            if x[0] == 'ER':
278                actual = np.array([call_ER(model.info, pars)])
279            elif x[0] == 'VR':
280                actual = np.array([call_VR(model.info, pars)])
281            elif isinstance(x[0], tuple):
282                qx, qy = zip(*x)
283                q_vectors = [np.array(qx), np.array(qy)]
284                kernel = model.make_kernel(q_vectors)
285                actual = call_kernel(kernel, pars)
286            else:
287                q_vectors = [np.array(x)]
288                kernel = model.make_kernel(q_vectors)
289                actual = call_kernel(kernel, pars)
290
291            self.assertTrue(len(actual) > 0)
292            self.assertEqual(len(y), len(actual))
293
294            for xi, yi, actual_yi in zip(x, y, actual):
295                if yi is None:
296                    # smoke test --- make sure it runs and produces a value
297                    self.assertTrue(not np.isnan(actual_yi),
298                                    'invalid f(%s): %s' % (xi, actual_yi))
299                elif np.isnan(yi):
300                    self.assertTrue(np.isnan(actual_yi),
301                                    'f(%s): expected:%s; actual:%s'
302                                    % (xi, yi, actual_yi))
303                else:
304                    # is_near does not work for infinite values, so also test
305                    # for exact values.  Note that this will not
306                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
307                                    'f(%s); expected:%s; actual:%s'
308                                    % (xi, yi, actual_yi))
309            return actual
310
311    return ModelTestCase
312
313def invalid_pars(partable, pars):
314    # type: (ParameterTable, Dict[str, float])
315    """
316    Return a list of parameter names that are not part of the model.
317    """
318    names = set(p.id for p in partable.call_parameters)
319    invalid = []
320    for par in sorted(pars.keys()):
321        parts = par.split('_pd')
322        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
323            invalid.append(par)
324            continue
325        if parts[0] not in names:
326            invalid.append(par)
327    return invalid
328
329
330def is_near(target, actual, digits=5):
331    # type: (float, float, int) -> bool
332    """
333    Returns true if *actual* is within *digits* significant digits of *target*.
334    """
335    import math
336    shift = 10**math.ceil(math.log10(abs(target)))
337    return abs(target-actual)/shift < 1.5*10**-digits
338
339def run_one(model):
340    # type: (str) -> None
341    """
342    Run the tests for a single model, printing the results to stdout.
343
344    *model* can by a python file, which is handy for checking user defined
345    plugin models.
346    """
347    # Note that running main() directly did not work from within the
348    # wxPython pycrust console.  Instead of the results appearing in the
349    # window they were printed to the underlying console.
350    from unittest.runner import TextTestResult, _WritelnDecorator
351
352    # Build a object to capture and print the test results
353    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
354    verbosity = 2
355    descriptions = True
356    result = TextTestResult(stream, descriptions, verbosity)
357
358    # Build a test suite containing just the model
359    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
360    models = [model]
361    try:
362        suite = make_suite(loaders, models)
363    except Exception:
364        import traceback
365        stream.writeln(traceback.format_exc())
366        return
367
368    # Run the test suite
369    suite.run(result)
370
371    # Print the failures and errors
372    for _, tb in result.errors:
373        stream.writeln(tb)
374    for _, tb in result.failures:
375        stream.writeln(tb)
376
377    # Warn if there are no user defined tests.
378    # Note: the test suite constructed above only has one test in it, which
379    # runs through some smoke tests to make sure the model runs, then runs
380    # through the input-output pairs given in the model definition file.  To
381    # check if any such pairs are defined, therefore, we just need to check if
382    # they are in the first test of the test suite.  We do this with an
383    # iterator since we don't have direct access to the list of tests in the
384    # test suite.
385    for test in suite:
386        if not test.info.tests:
387            stream.writeln("Note: %s has no user defined tests."%model)
388        break
389    else:
390        stream.writeln("Note: no test suite created --- this should never happen")
391
392    output = stream.getvalue()
393    stream.close()
394    return output
395
396
397def main(*models):
398    # type: (*str) -> int
399    """
400    Run tests given is models.
401
402    Returns 0 if success or 1 if any tests fail.
403    """
404    try:
405        from xmlrunner import XMLTestRunner as TestRunner
406        test_args = {'output': 'logs'}
407    except ImportError:
408        from unittest import TextTestRunner as TestRunner
409        test_args = {}
410
411    if models and models[0] == '-v':
412        verbosity = 2
413        models = models[1:]
414    else:
415        verbosity = 1
416    if models and models[0] == 'opencl':
417        if not core.HAVE_OPENCL:
418            print("opencl is not available")
419            return 1
420        loaders = ['opencl']
421        models = models[1:]
422    elif models and models[0] == 'dll':
423        # TODO: test if compiler is available?
424        loaders = ['dll']
425        models = models[1:]
426    elif models and models[0] == 'opencl_and_dll':
427        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
428        models = models[1:]
429    else:
430        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
431    if not models:
432        print("""\
433usage:
434  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
435
436If -v is included on the command line, then use verbose output.
437
438If neither opencl nor dll is specified, then models will be tested with
439both OpenCL and dll; the compute target is ignored for pure python models.
440
441If model1 is 'all', then all except the remaining models will be tested.
442
443""")
444
445        return 1
446
447    runner = TestRunner(verbosity=verbosity, **test_args)
448    result = runner.run(make_suite(loaders, models))
449    return 1 if result.failures or result.errors else 0
450
451
452def model_tests():
453    # type: () -> Iterator[Callable[[], None]]
454    """
455    Test runner visible to nosetests.
456
457    Run "nosetests sasmodels" on the command line to invoke it.
458    """
459    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
460    tests = make_suite(loaders, ['all'])
461    for test_i in tests:
462        # In order for nosetest to see the correct test name, need to set
463        # the description attribute of the returned function.  Since we
464        # can't do this for the returned instance, wrap it in a lambda and
465        # set the description on the lambda.  Otherwise we could just do:
466        #    yield test_i.run_all
467        L = lambda: test_i.run_all()
468        L.description = test_i.test_name
469        yield L
470
471
472if __name__ == "__main__":
473    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.