source: sasmodels/sasmodels/model_test.py @ 7b5898f

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 7b5898f was 7b5898f, checked in by Adam Washington <adam.washington@…>, 6 years ago

Move to proper argument parser in model_test

I replaced argv handling in model_test.py with a call to the argparse
library. The replacement offers the same interface, but uses less
code, is a bit more readable, provides proper help messages, and is
eight lines shorter.

  • Property mode set to 100755
File size: 19.0 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import argparse
48import sys
49import unittest
50
51try:
52    from StringIO import StringIO
53except ImportError:
54    # StringIO.StringIO renamed to io.StringIO in Python 3
55    # Note: io.StringIO exists in python 2, but using unicode instead of str
56    from io import StringIO
57
58import numpy as np  # type: ignore
59
60from . import core
61from .core import list_models, load_model_info, build_model
62from .direct_model import call_kernel, call_ER, call_VR
63from .exception import annotate_exception
64from .modelinfo import expand_pars
65from .kernelcl import use_opencl
66
67# pylint: disable=unused-import
68try:
69    from typing import List, Iterator, Callable
70except ImportError:
71    pass
72else:
73    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
74    from .kernel import KernelModel
75# pylint: enable=unused-import
76
77
78def make_suite(loaders, models):
79    # type: (List[str], List[str]) -> unittest.TestSuite
80    """
81    Construct the pyunit test suite.
82
83    *loaders* is the list of kernel drivers to use, which is one of
84    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
85    the python driver is always used.
86
87    *models* is the list of models to test, or *["all"]* to test all models.
88    """
89    ModelTestCase = _hide_model_case_from_nose()
90    suite = unittest.TestSuite()
91
92    if models[0] in core.KINDS:
93        skip = models[1:]
94        models = list_models(models[0])
95    else:
96        skip = []
97    for model_name in models:
98        if model_name in skip:
99            continue
100        model_info = load_model_info(model_name)
101
102        #print('------')
103        #print('found tests in', model_name)
104        #print('------')
105
106        # if ispy then use the dll loader to call pykernel
107        # don't try to call cl kernel since it will not be
108        # available in some environmentes.
109        is_py = callable(model_info.Iq)
110
111        # Some OpenCL drivers seem to be flaky, and are not producing the
112        # expected result.  Since we don't have known test values yet for
113        # all of our models, we are instead going to compare the results
114        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
115        # parameters just to see that the model runs to completion) between
116        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
117        # shared between OpenCL and DLL tests.  This is just a list.  If the
118        # list is empty (which it will be when DLL runs, if the DLL runs
119        # first), then the results are appended to the list.  If the list
120        # is not empty (which it will be when OpenCL runs second), the results
121        # are compared to the results stored in the first element of the list.
122        # This is a horrible stateful hack which only makes sense because the
123        # test suite is thrown away after being run once.
124        stash = []
125
126        if is_py:  # kernel implemented in python
127            test_name = "%s-python"%model_name
128            test_method_name = "test_%s_python" % model_info.id
129            test = ModelTestCase(test_name, model_info,
130                                 test_method_name,
131                                 platform="dll",  # so that
132                                 dtype="double",
133                                 stash=stash)
134            suite.addTest(test)
135        else:   # kernel implemented in C
136
137            # test using dll if desired
138            if 'dll' in loaders or not use_opencl():
139                test_name = "%s-dll"%model_name
140                test_method_name = "test_%s_dll" % model_info.id
141                test = ModelTestCase(test_name, model_info,
142                                     test_method_name,
143                                     platform="dll",
144                                     dtype="double",
145                                     stash=stash)
146                suite.addTest(test)
147
148            # test using opencl if desired and available
149            if 'opencl' in loaders and use_opencl():
150                test_name = "%s-opencl"%model_name
151                test_method_name = "test_%s_opencl" % model_info.id
152                # Using dtype=None so that the models that are only
153                # correct for double precision are not tested using
154                # single precision.  The choice is determined by the
155                # presence of *single=False* in the model file.
156                test = ModelTestCase(test_name, model_info,
157                                     test_method_name,
158                                     platform="ocl", dtype=None,
159                                     stash=stash)
160                #print("defining", test_name)
161                suite.addTest(test)
162
163    return suite
164
165def _hide_model_case_from_nose():
166    # type: () -> type
167    class ModelTestCase(unittest.TestCase):
168        """
169        Test suit for a particular model with a particular kernel driver.
170
171        The test suite runs a simple smoke test to make sure the model
172        functions, then runs the list of tests at the bottom of the model
173        description file.
174        """
175        def __init__(self, test_name, model_info, test_method_name,
176                     platform, dtype, stash):
177            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
178            self.test_name = test_name
179            self.info = model_info
180            self.platform = platform
181            self.dtype = dtype
182            self.stash = stash  # container for the results of the first run
183
184            setattr(self, test_method_name, self.run_all)
185            unittest.TestCase.__init__(self, test_method_name)
186
187        def run_all(self):
188            # type: () -> None
189            """
190            Run all the tests in the test suite, including smoke tests.
191            """
192            smoke_tests = [
193                # test validity at reasonable values
194                ({}, 0.1, None),
195                ({}, (0.1, 0.1), None),
196                # test validity at q = 0
197                #({}, 0.0, None),
198                #({}, (0.0, 0.0), None),
199                # test vector form
200                ({}, [0.001, 0.01, 0.1], [None]*3),
201                ({}, [(0.1, 0.1)]*2, [None]*2),
202                # test that ER/VR will run if they exist
203                ({}, 'ER', None),
204                ({}, 'VR', None),
205                ]
206            tests = smoke_tests
207            #tests = []
208            if self.info.tests is not None:
209                tests += self.info.tests
210            try:
211                model = build_model(self.info, dtype=self.dtype,
212                                    platform=self.platform)
213                results = [self.run_one(model, test) for test in tests]
214                if self.stash:
215                    for test, target, actual in zip(tests, self.stash[0], results):
216                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
217                            ("GPU/CPU comparison expected %s but got %s for %s"
218                             % (target, actual, test[0]))
219                else:
220                    self.stash.append(results)
221
222                # Check for missing tests.  Only do so for the "dll" tests
223                # to reduce noise from both opencl and dll, and because
224                # python kernels use platform="dll".
225                if self.platform == "dll":
226                    missing = []
227                    ## Uncomment the following to require test cases
228                    #missing = self._find_missing_tests()
229                    if missing:
230                        raise ValueError("Missing tests for "+", ".join(missing))
231
232            except:
233                annotate_exception(self.test_name)
234                raise
235
236        def _find_missing_tests(self):
237            # type: () -> None
238            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
239            model_has_VR = callable(self.info.VR)
240            model_has_ER = callable(self.info.ER)
241            model_has_1D = True
242            model_has_2D = any(p.type == 'orientation'
243                               for p in self.info.parameters.kernel_parameters)
244
245            # Lists of tests that have a result that is not None
246            single = [test for test in self.info.tests
247                      if not isinstance(test[2], list) and test[2] is not None]
248            tests_has_VR = any(test[1] == 'VR' for test in single)
249            tests_has_ER = any(test[1] == 'ER' for test in single)
250            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
251            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
252
253            multiple = [test for test in self.info.tests
254                        if isinstance(test[2], list)
255                        and not all(result is None for result in test[2])]
256            tests_has_1D_multiple = any(isinstance(test[1][0], float)
257                                        for test in multiple)
258            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
259                                        for test in multiple)
260
261            missing = []
262            if model_has_VR and not tests_has_VR:
263                missing.append("VR")
264            if model_has_ER and not tests_has_ER:
265                missing.append("ER")
266            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
267                missing.append("1D")
268            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
269                missing.append("2D")
270
271            return missing
272
273        def run_one(self, model, test):
274            # type: (KernelModel, TestCondition) -> None
275            """Run a single test case."""
276            user_pars, x, y = test
277            pars = expand_pars(self.info.parameters, user_pars)
278            invalid = invalid_pars(self.info.parameters, pars)
279            if invalid:
280                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
281
282            if not isinstance(y, list):
283                y = [y]
284            if not isinstance(x, list):
285                x = [x]
286
287            self.assertEqual(len(y), len(x))
288
289            if x[0] == 'ER':
290                actual = np.array([call_ER(model.info, pars)])
291            elif x[0] == 'VR':
292                actual = np.array([call_VR(model.info, pars)])
293            elif isinstance(x[0], tuple):
294                qx, qy = zip(*x)
295                q_vectors = [np.array(qx), np.array(qy)]
296                kernel = model.make_kernel(q_vectors)
297                actual = call_kernel(kernel, pars)
298            else:
299                q_vectors = [np.array(x)]
300                kernel = model.make_kernel(q_vectors)
301                actual = call_kernel(kernel, pars)
302
303            self.assertTrue(len(actual) > 0)
304            self.assertEqual(len(y), len(actual))
305
306            for xi, yi, actual_yi in zip(x, y, actual):
307                if yi is None:
308                    # smoke test --- make sure it runs and produces a value
309                    self.assertTrue(not np.isnan(actual_yi),
310                                    'invalid f(%s): %s' % (xi, actual_yi))
311                elif np.isnan(yi):
312                    self.assertTrue(np.isnan(actual_yi),
313                                    'f(%s): expected:%s; actual:%s'
314                                    % (xi, yi, actual_yi))
315                else:
316                    # is_near does not work for infinite values, so also test
317                    # for exact values.  Note that this will not
318                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
319                                    'f(%s); expected:%s; actual:%s'
320                                    % (xi, yi, actual_yi))
321            return actual
322
323    return ModelTestCase
324
325def invalid_pars(partable, pars):
326    # type: (ParameterTable, Dict[str, float])
327    """
328    Return a list of parameter names that are not part of the model.
329    """
330    names = set(p.id for p in partable.call_parameters)
331    invalid = []
332    for par in sorted(pars.keys()):
333        parts = par.split('_pd')
334        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
335            invalid.append(par)
336            continue
337        if parts[0] not in names:
338            invalid.append(par)
339    return invalid
340
341
342def is_near(target, actual, digits=5):
343    # type: (float, float, int) -> bool
344    """
345    Returns true if *actual* is within *digits* significant digits of *target*.
346    """
347    import math
348    shift = 10**math.ceil(math.log10(abs(target)))
349    return abs(target-actual)/shift < 1.5*10**-digits
350
351def run_one(model):
352    # type: (str) -> str
353    """
354    Run the tests for a single model, printing the results to stdout.
355
356    *model* can by a python file, which is handy for checking user defined
357    plugin models.
358    """
359    # Note that running main() directly did not work from within the
360    # wxPython pycrust console.  Instead of the results appearing in the
361    # window they were printed to the underlying console.
362    from unittest.runner import TextTestResult, _WritelnDecorator
363
364    # Build a object to capture and print the test results
365    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
366    verbosity = 2
367    descriptions = True
368    result = TextTestResult(stream, descriptions, verbosity)
369
370    # Build a test suite containing just the model
371    loaders = ['opencl'] if use_opencl() else ['dll']
372    models = [model]
373    try:
374        suite = make_suite(loaders, models)
375    except Exception:
376        import traceback
377        stream.writeln(traceback.format_exc())
378        return
379
380    # Warn if there are no user defined tests.
381    # Note: the test suite constructed above only has one test in it, which
382    # runs through some smoke tests to make sure the model runs, then runs
383    # through the input-output pairs given in the model definition file.  To
384    # check if any such pairs are defined, therefore, we just need to check if
385    # they are in the first test of the test suite.  We do this with an
386    # iterator since we don't have direct access to the list of tests in the
387    # test suite.
388    # In Qt5 suite.run() will clear all tests in the suite after running
389    # with no way of retaining them for the test below, so let's check
390    # for user tests before running the suite.
391    for test in suite:
392        if not test.info.tests:
393            stream.writeln("Note: %s has no user defined tests."%model)
394        break
395    else:
396        stream.writeln("Note: no test suite created --- this should never happen")
397
398    # Run the test suite
399    suite.run(result)
400
401    # Print the failures and errors
402    for _, tb in result.errors:
403        stream.writeln(tb)
404    for _, tb in result.failures:
405        stream.writeln(tb)
406
407    output = stream.getvalue()
408    stream.close()
409    return output
410
411
412def main():
413    # type: (*str) -> int
414    """
415    Run tests given is models.
416
417    Returns 0 if success or 1 if any tests fail.
418    """
419    try:
420        from xmlrunner import XMLTestRunner as TestRunner
421        test_args = {'output': 'logs'}
422    except ImportError:
423        from unittest import TextTestRunner as TestRunner
424        test_args = {}
425
426    parser = argparse.ArgumentParser(description="Test SasModels Models")
427    parser.add_argument("-v", "--verbose", action="store_const",
428                        default=1, const=2, help="Use verbose output")
429    parser.add_argument("engine", metavar="[engine]",
430                        help="Engines on which to run the test.  "
431                        "Valid values are opencl, dll, and opencl_and_dll. "
432                        "Defaults to opencl_and_dll if no value is given")
433    parser.add_argument("models", nargs="*",
434                        help='The names of the models to be tested.  '
435                        'If the first model is "all", then all except the '
436                        'remaining models will be tested.')
437    args, models = parser.parse_known_args()
438
439    if args.engine == "opencl":
440        if not use_opencl():
441            print("opencl is not available")
442            return 1
443        loaders = ['opencl']
444    elif args.engine == "dll":
445        loaders = ["dll"]
446    elif args.engine == "opencl_and_dll":
447        loaders = ['opencl', 'dll'] if use_opencl() else ['dll']
448    else:
449        # Default to running both engines
450        loaders = ['opencl', 'dll'] if use_opencl() else ['dll']
451        args.models.insert(0, args.engine)
452
453    runner = TestRunner(verbosity=args.verbose, **test_args)
454    result = runner.run(make_suite(loaders, args.models))
455    return 1 if result.failures or result.errors else 0
456
457
458def model_tests():
459    # type: () -> Iterator[Callable[[], None]]
460    """
461    Test runner visible to nosetests.
462
463    Run "nosetests sasmodels" on the command line to invoke it.
464    """
465    loaders = ['opencl', 'dll'] if use_opencl() else ['dll']
466    tests = make_suite(loaders, ['all'])
467    def build_test(test):
468        # In order for nosetest to show the test name, wrap the test.run_all
469        # instance in function that takes the test name as a parameter which
470        # will be displayed when the test is run.  Do this as a function so
471        # that it properly captures the context for tests that captured and
472        # run later.  If done directly in the for loop, then the looping
473        # variable test will be shared amongst all the tests, and we will be
474        # repeatedly testing vesicle.
475
476        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
477        # requires that the test.description field be set.
478        wrap = lambda: test.run_all()
479        wrap.description = test.test_name
480        return wrap
481        # The following would work with nosetests and pytest:
482        #     return lambda name: test.run_all(), test.test_name
483
484    for test in tests:
485        yield build_test(test)
486
487
488if __name__ == "__main__":
489    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.