source: sasmodels/sasmodels/model_test.py @ 9826f82

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 9826f82 was 9826f82, checked in by lewis, 7 years ago

Fix import on Python 3

  • Property mode set to 100644
File size: 18.0 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50try:
51    from StringIO import StringIO
52except ImportError: # StringIO.StringIO renamed to io.StringIO in Python 3
53    from io import StringIO
54
55import numpy as np  # type: ignore
56
57from . import core
58from .core import list_models, load_model_info, build_model
59from .direct_model import call_kernel, call_ER, call_VR
60from .exception import annotate_exception
61from .modelinfo import expand_pars
62
63try:
64    from typing import List, Iterator, Callable
65except ImportError:
66    pass
67else:
68    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
69    from .kernel import KernelModel
70
71
72def make_suite(loaders, models):
73    # type: (List[str], List[str]) -> unittest.TestSuite
74    """
75    Construct the pyunit test suite.
76
77    *loaders* is the list of kernel drivers to use, which is one of
78    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
79    the python driver is always used.
80
81    *models* is the list of models to test, or *["all"]* to test all models.
82    """
83    ModelTestCase = _hide_model_case_from_nose()
84    suite = unittest.TestSuite()
85
86    if models[0] == 'all':
87        skip = models[1:]
88        models = list_models()
89    else:
90        skip = []
91    for model_name in models:
92        if model_name in skip:
93            continue
94        model_info = load_model_info(model_name)
95
96        #print('------')
97        #print('found tests in', model_name)
98        #print('------')
99
100        # if ispy then use the dll loader to call pykernel
101        # don't try to call cl kernel since it will not be
102        # available in some environmentes.
103        is_py = callable(model_info.Iq)
104
105        # Some OpenCL drivers seem to be flaky, and are not producing the
106        # expected result.  Since we don't have known test values yet for
107        # all of our models, we are instead going to compare the results
108        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
109        # parameters just to see that the model runs to completion) between
110        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
111        # shared between OpenCL and DLL tests.  This is just a list.  If the
112        # list is empty (which it will be when DLL runs, if the DLL runs
113        # first), then the results are appended to the list.  If the list
114        # is not empty (which it will be when OpenCL runs second), the results
115        # are compared to the results stored in the first element of the list.
116        # This is a horrible stateful hack which only makes sense because the
117        # test suite is thrown away after being run once.
118        stash = []
119
120        if is_py:  # kernel implemented in python
121            test_name = "Model: %s, Kernel: python"%model_name
122            test_method_name = "test_%s_python" % model_info.id
123            test = ModelTestCase(test_name, model_info,
124                                 test_method_name,
125                                 platform="dll",  # so that
126                                 dtype="double",
127                                 stash=stash)
128            suite.addTest(test)
129        else:   # kernel implemented in C
130
131            # test using dll if desired
132            if 'dll' in loaders or not core.HAVE_OPENCL:
133                test_name = "Model: %s, Kernel: dll"%model_name
134                test_method_name = "test_%s_dll" % model_info.id
135                test = ModelTestCase(test_name, model_info,
136                                     test_method_name,
137                                     platform="dll",
138                                     dtype="double",
139                                     stash=stash)
140                suite.addTest(test)
141
142            # test using opencl if desired and available
143            if 'opencl' in loaders and core.HAVE_OPENCL:
144                test_name = "Model: %s, Kernel: OpenCL"%model_name
145                test_method_name = "test_%s_opencl" % model_info.id
146                # Using dtype=None so that the models that are only
147                # correct for double precision are not tested using
148                # single precision.  The choice is determined by the
149                # presence of *single=False* in the model file.
150                test = ModelTestCase(test_name, model_info,
151                                     test_method_name,
152                                     platform="ocl", dtype=None,
153                                     stash=stash)
154                #print("defining", test_name)
155                suite.addTest(test)
156
157    return suite
158
159
160def _hide_model_case_from_nose():
161    # type: () -> type
162    class ModelTestCase(unittest.TestCase):
163        """
164        Test suit for a particular model with a particular kernel driver.
165
166        The test suite runs a simple smoke test to make sure the model
167        functions, then runs the list of tests at the bottom of the model
168        description file.
169        """
170        def __init__(self, test_name, model_info, test_method_name,
171                     platform, dtype, stash):
172            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
173            self.test_name = test_name
174            self.info = model_info
175            self.platform = platform
176            self.dtype = dtype
177            self.stash = stash  # container for the results of the first run
178
179            setattr(self, test_method_name, self.run_all)
180            unittest.TestCase.__init__(self, test_method_name)
181
182        def run_all(self):
183            # type: () -> None
184            """
185            Run all the tests in the test suite, including smoke tests.
186            """
187            smoke_tests = [
188                # test validity at reasonable values
189                ({}, 0.1, None),
190                ({}, (0.1, 0.1), None),
191                # test validity at q = 0
192                #({}, 0.0, None),
193                #({}, (0.0, 0.0), None),
194                # test vector form
195                ({}, [0.001, 0.01, 0.1], [None]*3),
196                ({}, [(0.1, 0.1)]*2, [None]*2),
197                # test that ER/VR will run if they exist
198                ({}, 'ER', None),
199                ({}, 'VR', None),
200                ]
201
202            tests = smoke_tests + self.info.tests
203            try:
204                model = build_model(self.info, dtype=self.dtype,
205                                    platform=self.platform)
206                results = [self.run_one(model, test) for test in tests]
207                if self.stash:
208                    for test, target, actual in zip(tests, self.stash[0], results):
209                        assert np.all(abs(target-actual) < 5e-5*abs(actual)),\
210                            "GPU/CPU comparison expected %s but got %s for %s"%(target, actual, test[0])
211                else:
212                    self.stash.append(results)
213
214                # Check for missing tests.  Only do so for the "dll" tests
215                # to reduce noise from both opencl and dll, and because
216                # python kernels use platform="dll".
217                if self.platform == "dll":
218                    missing = []
219                    ## Uncomment the following to require test cases
220                    #missing = self._find_missing_tests()
221                    if missing:
222                        raise ValueError("Missing tests for "+", ".join(missing))
223
224            except:
225                annotate_exception(self.test_name)
226                raise
227
228        def _find_missing_tests(self):
229            # type: () -> None
230            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
231            model_has_VR = callable(self.info.VR)
232            model_has_ER = callable(self.info.ER)
233            model_has_1D = True
234            model_has_2D = any(p.type == 'orientation'
235                               for p in self.info.parameters.kernel_parameters)
236
237            # Lists of tests that have a result that is not None
238            single = [test for test in self.info.tests
239                      if not isinstance(test[2], list) and test[2] is not None]
240            tests_has_VR = any(test[1] == 'VR' for test in single)
241            tests_has_ER = any(test[1] == 'ER' for test in single)
242            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
243            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
244
245            multiple = [test for test in self.info.tests
246                        if isinstance(test[2], list)
247                        and not all(result is None for result in test[2])]
248            tests_has_1D_multiple = any(isinstance(test[1][0], float)
249                                        for test in multiple)
250            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
251                                        for test in multiple)
252
253            missing = []
254            if model_has_VR and not tests_has_VR:
255                missing.append("VR")
256            if model_has_ER and not tests_has_ER:
257                missing.append("ER")
258            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
259                missing.append("1D")
260            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
261                missing.append("2D")
262
263            return missing
264
265        def run_one(self, model, test):
266            # type: (KernelModel, TestCondition) -> None
267            """Run a single test case."""
268            user_pars, x, y = test
269            pars = expand_pars(self.info.parameters, user_pars)
270            invalid = invalid_pars(self.info.parameters, pars)
271            if invalid:
272                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
273
274            if not isinstance(y, list):
275                y = [y]
276            if not isinstance(x, list):
277                x = [x]
278
279            self.assertEqual(len(y), len(x))
280
281            if x[0] == 'ER':
282                actual = np.array([call_ER(model.info, pars)])
283            elif x[0] == 'VR':
284                actual = np.array([call_VR(model.info, pars)])
285            elif isinstance(x[0], tuple):
286                qx, qy = zip(*x)
287                q_vectors = [np.array(qx), np.array(qy)]
288                kernel = model.make_kernel(q_vectors)
289                actual = call_kernel(kernel, pars)
290            else:
291                q_vectors = [np.array(x)]
292                kernel = model.make_kernel(q_vectors)
293                actual = call_kernel(kernel, pars)
294
295            self.assertTrue(len(actual) > 0)
296            self.assertEqual(len(y), len(actual))
297
298            for xi, yi, actual_yi in zip(x, y, actual):
299                if yi is None:
300                    # smoke test --- make sure it runs and produces a value
301                    self.assertTrue(not np.isnan(actual_yi),
302                                    'invalid f(%s): %s' % (xi, actual_yi))
303                elif np.isnan(yi):
304                    self.assertTrue(np.isnan(actual_yi),
305                                    'f(%s): expected:%s; actual:%s'
306                                    % (xi, yi, actual_yi))
307                else:
308                    # is_near does not work for infinite values, so also test
309                    # for exact values.  Note that this will not
310                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
311                                    'f(%s); expected:%s; actual:%s'
312                                    % (xi, yi, actual_yi))
313            return actual
314
315    return ModelTestCase
316
317def invalid_pars(partable, pars):
318    # type: (ParameterTable, Dict[str, float])
319    """
320    Return a list of parameter names that are not part of the model.
321    """
322    names = set(p.id for p in partable.call_parameters)
323    invalid = []
324    for par in sorted(pars.keys()):
325        parts = par.split('_pd')
326        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
327            invalid.append(par)
328            continue
329        if parts[0] not in names:
330            invalid.append(par)
331    return invalid
332
333
334def is_near(target, actual, digits=5):
335    # type: (float, float, int) -> bool
336    """
337    Returns true if *actual* is within *digits* significant digits of *target*.
338    """
339    import math
340    shift = 10**math.ceil(math.log10(abs(target)))
341    return abs(target-actual)/shift < 1.5*10**-digits
342
343def run_one(model):
344    # type: (str) -> None
345    """
346    Run the tests for a single model, printing the results to stdout.
347
348    *model* can by a python file, which is handy for checking user defined
349    plugin models.
350    """
351    # Note that running main() directly did not work from within the
352    # wxPython pycrust console.  Instead of the results appearing in the
353    # window they were printed to the underlying console.
354    from unittest.runner import TextTestResult, _WritelnDecorator
355
356    # Build a object to capture and print the test results
357    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
358    verbosity = 2
359    descriptions = True
360    result = TextTestResult(stream, descriptions, verbosity)
361
362    # Build a test suite containing just the model
363    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
364    models = [model]
365    try:
366        suite = make_suite(loaders, models)
367    except Exception:
368        import traceback
369        stream.writeln(traceback.format_exc())
370        return
371
372    # Run the test suite
373    suite.run(result)
374
375    # Print the failures and errors
376    for _, tb in result.errors:
377        stream.writeln(tb)
378    for _, tb in result.failures:
379        stream.writeln(tb)
380
381    # Warn if there are no user defined tests.
382    # Note: the test suite constructed above only has one test in it, which
383    # runs through some smoke tests to make sure the model runs, then runs
384    # through the input-output pairs given in the model definition file.  To
385    # check if any such pairs are defined, therefore, we just need to check if
386    # they are in the first test of the test suite.  We do this with an
387    # iterator since we don't have direct access to the list of tests in the
388    # test suite.
389    for test in suite:
390        if not test.info.tests:
391            stream.writeln("Note: %s has no user defined tests."%model)
392        break
393    else:
394        stream.writeln("Note: no test suite created --- this should never happen")
395
396    output = stream.getvalue()
397    stream.close()
398    return output
399
400
401def main(*models):
402    # type: (*str) -> int
403    """
404    Run tests given is models.
405
406    Returns 0 if success or 1 if any tests fail.
407    """
408    try:
409        from xmlrunner import XMLTestRunner as TestRunner
410        test_args = {'output': 'logs'}
411    except ImportError:
412        from unittest import TextTestRunner as TestRunner
413        test_args = {}
414
415    if models and models[0] == '-v':
416        verbosity = 2
417        models = models[1:]
418    else:
419        verbosity = 1
420    if models and models[0] == 'opencl':
421        if not core.HAVE_OPENCL:
422            print("opencl is not available")
423            return 1
424        loaders = ['opencl']
425        models = models[1:]
426    elif models and models[0] == 'dll':
427        # TODO: test if compiler is available?
428        loaders = ['dll']
429        models = models[1:]
430    elif models and models[0] == 'opencl_and_dll':
431        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
432        models = models[1:]
433    else:
434        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
435    if not models:
436        print("""\
437usage:
438  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
439
440If -v is included on the command line, then use verbose output.
441
442If neither opencl nor dll is specified, then models will be tested with
443both OpenCL and dll; the compute target is ignored for pure python models.
444
445If model1 is 'all', then all except the remaining models will be tested.
446
447""")
448
449        return 1
450
451    runner = TestRunner(verbosity=verbosity, **test_args)
452    result = runner.run(make_suite(loaders, models))
453    return 1 if result.failures or result.errors else 0
454
455
456def model_tests():
457    # type: () -> Iterator[Callable[[], None]]
458    """
459    Test runner visible to nosetests.
460
461    Run "nosetests sasmodels" on the command line to invoke it.
462    """
463    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
464    tests = make_suite(loaders, ['all'])
465    for test_i in tests:
466        # In order for nosetest to see the correct test name, need to set
467        # the description attribute of the returned function.  Since we
468        # can't do this for the returned instance, wrap it in a lambda and
469        # set the description on the lambda.  Otherwise we could just do:
470        #    yield test_i.run_all
471        L = lambda: test_i.run_all()
472        L.description = test_i.test_name
473        yield L
474
475
476if __name__ == "__main__":
477    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.