source: sasmodels/sasmodels/model_test.py @ b9c7379

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since b9c7379 was b9c7379, checked in by Adam Washington <adam.washington@…>, 5 years ago

Merge branch 'beta_approx' of github.com:SasView/sasmodels into test_args

  • Property mode set to 100755
File size: 22.1 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and Fq is called to make sure R_eff, volume and volume ratio are computed.
13The return values at these points are not considered.  The test is only to
14verify that the models run to completion, and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test and volume ratio tests, use the extended output
20form, which checks each output of kernel.Fq. For 1-D tests, either specify
21the q value or a list of q-values, and the corresponding I(q) value, or
22list of I(q) values.
23
24That is::
25
26    tests = [
27        [ {parameters}, q, I(q)],
28        [ {parameters}, [q], [I(q)] ],
29        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
30
31        [ {parameters}, (qx, qy), I(qx, Iqy)],
32        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
33                        [I(qx1, qy1), I(qx2, qy2), ...]],
34
35        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import argparse
48import sys
49import unittest
50
51try:
52    from StringIO import StringIO
53except ImportError:
54    # StringIO.StringIO renamed to io.StringIO in Python 3
55    # Note: io.StringIO exists in python 2, but using unicode instead of str
56    from io import StringIO
57
58import numpy as np  # type: ignore
59
60from . import core
61from .core import list_models, load_model_info, build_model
62from .direct_model import call_kernel, call_Fq
63from .exception import annotate_exception
64from .modelinfo import expand_pars
65from .kernelcl import use_opencl
66from .kernelcuda import use_cuda
67from . import product
68
69# pylint: disable=unused-import
70try:
71    from typing import List, Iterator, Callable
72except ImportError:
73    pass
74else:
75    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
76    from .kernel import KernelModel
77# pylint: enable=unused-import
78
79
80def make_suite(loaders, models):
81    # type: (List[str], List[str]) -> unittest.TestSuite
82    """
83    Construct the pyunit test suite.
84
85    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
86    For python model the python driver is always used.
87
88    *models* is the list of models to test, or *["all"]* to test all models.
89    """
90    ModelTestCase = _hide_model_case_from_nose()
91    suite = unittest.TestSuite()
92
93    if models[0] in core.KINDS:
94        skip = models[1:]
95        models = list_models(models[0])
96    else:
97        skip = []
98    for model_name in models:
99        if model_name in skip:
100            continue
101        model_info = load_model_info(model_name)
102
103        #print('------')
104        #print('found tests in', model_name)
105        #print('------')
106
107        # if ispy then use the dll loader to call pykernel
108        # don't try to call cl kernel since it will not be
109        # available in some environmentes.
110        is_py = callable(model_info.Iq)
111
112        # Some OpenCL drivers seem to be flaky, and are not producing the
113        # expected result.  Since we don't have known test values yet for
114        # all of our models, we are instead going to compare the results
115        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
116        # parameters just to see that the model runs to completion) between
117        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
118        # shared between OpenCL and DLL tests.  This is just a list.  If the
119        # list is empty (which it will be when DLL runs, if the DLL runs
120        # first), then the results are appended to the list.  If the list
121        # is not empty (which it will be when OpenCL runs second), the results
122        # are compared to the results stored in the first element of the list.
123        # This is a horrible stateful hack which only makes sense because the
124        # test suite is thrown away after being run once.
125        stash = []
126
127        if is_py:  # kernel implemented in python
128            test_name = "%s-python"%model_name
129            test_method_name = "test_%s_python" % model_info.id
130            test = ModelTestCase(test_name, model_info,
131                                 test_method_name,
132                                 platform="dll",  # so that
133                                 dtype="double",
134                                 stash=stash)
135            suite.addTest(test)
136        else:   # kernel implemented in C
137
138            # test using dll if desired
139            if 'dll' in loaders:
140                test_name = "%s-dll"%model_name
141                test_method_name = "test_%s_dll" % model_info.id
142                test = ModelTestCase(test_name, model_info,
143                                     test_method_name,
144                                     platform="dll",
145                                     dtype="double",
146                                     stash=stash)
147                suite.addTest(test)
148
149            # test using opencl if desired and available
150            if 'opencl' in loaders and use_opencl():
151                test_name = "%s-opencl"%model_name
152                test_method_name = "test_%s_opencl" % model_info.id
153                # Using dtype=None so that the models that are only
154                # correct for double precision are not tested using
155                # single precision.  The choice is determined by the
156                # presence of *single=False* in the model file.
157                test = ModelTestCase(test_name, model_info,
158                                     test_method_name,
159                                     platform="ocl", dtype=None,
160                                     stash=stash)
161                #print("defining", test_name)
162                suite.addTest(test)
163
164            # test using cuda if desired and available
165            if 'cuda' in loaders and use_cuda():
166                test_name = "%s-cuda"%model_name
167                test_method_name = "test_%s_cuda" % model_info.id
168                # Using dtype=None so that the models that are only
169                # correct for double precision are not tested using
170                # single precision.  The choice is determined by the
171                # presence of *single=False* in the model file.
172                test = ModelTestCase(test_name, model_info,
173                                     test_method_name,
174                                     platform="cuda", dtype=None,
175                                     stash=stash)
176                #print("defining", test_name)
177                suite.addTest(test)
178
179    return suite
180
181def _hide_model_case_from_nose():
182    # type: () -> type
183    class ModelTestCase(unittest.TestCase):
184        """
185        Test suit for a particular model with a particular kernel driver.
186
187        The test suite runs a simple smoke test to make sure the model
188        functions, then runs the list of tests at the bottom of the model
189        description file.
190        """
191        def __init__(self, test_name, model_info, test_method_name,
192                     platform, dtype, stash):
193            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
194            self.test_name = test_name
195            self.info = model_info
196            self.platform = platform
197            self.dtype = dtype
198            self.stash = stash  # container for the results of the first run
199
200            setattr(self, test_method_name, self.run_all)
201            unittest.TestCase.__init__(self, test_method_name)
202
203        def run_all(self):
204            # type: () -> None
205            """
206            Run all the tests in the test suite, including smoke tests.
207            """
208            smoke_tests = [
209                # test validity at reasonable values
210                ({}, 0.1, None),
211                ({}, (0.1, 0.1), None),
212                # test validity at q = 0
213                #({}, 0.0, None),
214                #({}, (0.0, 0.0), None),
215                # test vector form
216                ({}, [0.001, 0.01, 0.1], [None]*3),
217                ({}, [(0.1, 0.1)]*2, [None]*2),
218                # test that Fq will run, and return R_eff, V, V_r
219                ({}, 0.1, None, None, None, None, None),
220                ]
221            tests = smoke_tests
222            #tests = []
223            if self.info.tests is not None:
224                tests += self.info.tests
225            S_tests = [test for test in tests if '@S' in test[0]]
226            P_tests = [test for test in tests if '@S' not in test[0]]
227            try:
228                model = build_model(self.info, dtype=self.dtype,
229                                    platform=self.platform)
230                results = [self.run_one(model, test) for test in P_tests]
231                for test in S_tests:
232                    # pull the S model name out of the test defn
233                    pars = test[0].copy()
234                    s_name = pars.pop('@S')
235                    ps_test = [pars] + list(test[1:])
236                    # build the P@S model
237                    s_info = load_model_info(s_name)
238                    ps_info = product.make_product_info(self.info, s_info)
239                    ps_model = build_model(ps_info, dtype=self.dtype,
240                                           platform=self.platform)
241                    # run the tests
242                    results.append(self.run_one(ps_model, ps_test))
243
244                if self.stash:
245                    for test, target, actual in zip(tests, self.stash[0], results):
246                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
247                            ("GPU/CPU comparison expected %s but got %s for %s"
248                             % (target, actual, test[0]))
249                else:
250                    self.stash.append(results)
251
252                # Check for missing tests.  Only do so for the "dll" tests
253                # to reduce noise from both opencl and cuda, and because
254                # python kernels use platform="dll".
255                if self.platform == "dll":
256                    missing = []
257                    ## Uncomment the following to require test cases
258                    #missing = self._find_missing_tests()
259                    if missing:
260                        raise ValueError("Missing tests for "+", ".join(missing))
261
262            except:
263                annotate_exception(self.test_name)
264                raise
265
266        def _find_missing_tests(self):
267            # type: () -> None
268            """make sure there are 1D and 2D tests as appropriate"""
269            model_has_1D = True
270            model_has_2D = any(p.type == 'orientation'
271                               for p in self.info.parameters.kernel_parameters)
272
273            # Lists of tests that have a result that is not None
274            single = [test for test in self.info.tests
275                      if not isinstance(test[2], list) and test[2] is not None]
276            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
277            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
278
279            multiple = [test for test in self.info.tests
280                        if isinstance(test[2], list)
281                        and not all(result is None for result in test[2])]
282            tests_has_1D_multiple = any(isinstance(test[1][0], float)
283                                        for test in multiple)
284            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
285                                        for test in multiple)
286
287            missing = []
288            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
289                missing.append("1D")
290            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
291                missing.append("2D")
292
293            return missing
294
295        def run_one(self, model, test):
296            # type: (KernelModel, TestCondition) -> None
297            """Run a single test case."""
298            user_pars, x, y = test[:3]
299            pars = expand_pars(self.info.parameters, user_pars)
300            invalid = invalid_pars(self.info.parameters, pars)
301            if invalid:
302                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
303
304            if not isinstance(y, list):
305                y = [y]
306            if not isinstance(x, list):
307                x = [x]
308
309            self.assertEqual(len(y), len(x))
310
311            if isinstance(x[0], tuple):
312                qx, qy = zip(*x)
313                q_vectors = [np.array(qx), np.array(qy)]
314            else:
315                q_vectors = [np.array(x)]
316
317            kernel = model.make_kernel(q_vectors)
318            if len(test) == 3:
319                actual = call_kernel(kernel, pars)
320                self._check_vectors(x, y, actual, 'I')
321                return actual
322            else:
323                y1 = y
324                y2 = test[3] if not isinstance(test[3], list) else [test[3]]
325                F1, F2, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
326                if F1 is not None:  # F1 is none for models with Iq instead of Fq
327                    self._check_vectors(x, y1, F1, 'F')
328                self._check_vectors(x, y2, F2, 'F^2')
329                self._check_scalar(test[4], R_eff, 'R_eff')
330                self._check_scalar(test[5], volume, 'volume')
331                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
332                return F2
333
334        def _check_scalar(self, target, actual, name):
335            if target is None:
336                # smoke test --- make sure it runs and produces a value
337                self.assertTrue(not np.isnan(actual),
338                                'invalid %s: %s' % (name, actual))
339            elif np.isnan(target):
340                # make sure nans match
341                self.assertTrue(np.isnan(actual),
342                                '%s: expected:%s; actual:%s'
343                                % (name, target, actual))
344            else:
345                # is_near does not work for infinite values, so also test
346                # for exact values.
347                self.assertTrue(target == actual or is_near(target, actual, 5),
348                                '%s: expected:%s; actual:%s'
349                                % (name, target, actual))
350
351        def _check_vectors(self, x, target, actual, name='I'):
352            self.assertTrue(len(actual) > 0,
353                            '%s(...) expected return'%name)
354            if target is None:
355                return
356            self.assertEqual(len(target), len(actual),
357                             '%s(...) returned wrong length'%name)
358            for xi, yi, actual_yi in zip(x, target, actual):
359                if yi is None:
360                    # smoke test --- make sure it runs and produces a value
361                    self.assertTrue(not np.isnan(actual_yi),
362                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
363                elif np.isnan(yi):
364                    # make sure nans match
365                    self.assertTrue(np.isnan(actual_yi),
366                                    '%s(%s): expected:%s; actual:%s'
367                                    % (name, xi, yi, actual_yi))
368                else:
369                    # is_near does not work for infinite values, so also test
370                    # for exact values.
371                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
372                                    '%s(%s); expected:%s; actual:%s'
373                                    % (name, xi, yi, actual_yi))
374
375    return ModelTestCase
376
377def invalid_pars(partable, pars):
378    # type: (ParameterTable, Dict[str, float])
379    """
380    Return a list of parameter names that are not part of the model.
381    """
382    names = set(p.id for p in partable.call_parameters)
383    invalid = []
384    for par in sorted(pars.keys()):
385        # special handling of R_eff mode, which is not a usual parameter
386        if par == 'radius_effective_type':
387            continue
388        parts = par.split('_pd')
389        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
390            invalid.append(par)
391            continue
392        if parts[0] not in names:
393            invalid.append(par)
394    return invalid
395
396
397def is_near(target, actual, digits=5):
398    # type: (float, float, int) -> bool
399    """
400    Returns true if *actual* is within *digits* significant digits of *target*.
401    """
402    import math
403    shift = 10**math.ceil(math.log10(abs(target)))
404    return abs(target-actual)/shift < 1.5*10**-digits
405
406def run_one(model):
407    # type: (str) -> str
408    """
409    Run the tests for a single model, printing the results to stdout.
410
411    *model* can by a python file, which is handy for checking user defined
412    plugin models.
413    """
414    # Note that running main() directly did not work from within the
415    # wxPython pycrust console.  Instead of the results appearing in the
416    # window they were printed to the underlying console.
417    from unittest.runner import TextTestResult, _WritelnDecorator
418
419    # Build a object to capture and print the test results
420    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
421    verbosity = 2
422    descriptions = True
423    result = TextTestResult(stream, descriptions, verbosity)
424
425    # Build a test suite containing just the model
426    loader = 'opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll'
427    models = [model]
428    try:
429        suite = make_suite([loader], models)
430    except Exception:
431        import traceback
432        stream.writeln(traceback.format_exc())
433        return
434
435    # Warn if there are no user defined tests.
436    # Note: the test suite constructed above only has one test in it, which
437    # runs through some smoke tests to make sure the model runs, then runs
438    # through the input-output pairs given in the model definition file.  To
439    # check if any such pairs are defined, therefore, we just need to check if
440    # they are in the first test of the test suite.  We do this with an
441    # iterator since we don't have direct access to the list of tests in the
442    # test suite.
443    # In Qt5 suite.run() will clear all tests in the suite after running
444    # with no way of retaining them for the test below, so let's check
445    # for user tests before running the suite.
446    for test in suite:
447        if not test.info.tests:
448            stream.writeln("Note: %s has no user defined tests."%model)
449        break
450    else:
451        stream.writeln("Note: no test suite created --- this should never happen")
452
453    # Run the test suite
454    suite.run(result)
455
456    # Print the failures and errors
457    for _, tb in result.errors:
458        stream.writeln(tb)
459    for _, tb in result.failures:
460        stream.writeln(tb)
461
462    output = stream.getvalue()
463    stream.close()
464    return output
465
466
467def main():
468    # type: (*str) -> int
469    """
470    Run tests given is models.
471
472    Returns 0 if success or 1 if any tests fail.
473    """
474    try:
475        from xmlrunner import XMLTestRunner as TestRunner
476        test_args = {'output': 'logs'}
477    except ImportError:
478        from unittest import TextTestRunner as TestRunner
479        test_args = {}
480
481    parser = argparse.ArgumentParser(description="Test SasModels Models")
482    parser.add_argument("-v", "--verbose", action="store_const",
483                        default=1, const=2, help="Use verbose output")
484    parser.add_argument("engine", metavar="[engine]",
485                        help="Engines on which to run the test.  "
486                        "Valid values are opencl, dll, and opencl_and_dll. "
487                        "Defaults to opencl_and_dll if no value is given")
488    parser.add_argument("models", nargs="*",
489                        help='The names of the models to be tested.  '
490                        'If the first model is "all", then all except the '
491                        'remaining models will be tested.')
492    args, models = parser.parse_known_args()
493
494    if args.engine == "opencl":
495        if not use_opencl():
496            print("opencl is not available")
497            return 1
498        loaders = ['opencl']
499    elif args.engine == "dll":
500        loaders = ["dll"]
501    elif args.engine == "cuda":
502        if not use_cuda():
503            print("cuda is not available")
504            return 1
505        loaders = ['cuda']
506    else:
507        # Default to running both engines
508        loaders = ['dll']
509        if use_opencl():
510            loaders.append('opencl')
511        if use_cuda():
512            loaders.append('cuda')
513        args.models.insert(0, args.engine)
514
515    runner = TestRunner(verbosity=args.verbose, **test_args)
516    result = runner.run(make_suite(loaders, args.models))
517    return 1 if result.failures or result.errors else 0
518
519
520def model_tests():
521    # type: () -> Iterator[Callable[[], None]]
522    """
523    Test runner visible to nosetests.
524
525    Run "nosetests sasmodels" on the command line to invoke it.
526    """
527    loaders = ['dll']
528    if use_opencl():
529        loaders.append('opencl')
530    if use_cuda():
531        loaders.append('cuda')
532    tests = make_suite(loaders, ['all'])
533    def build_test(test):
534        # In order for nosetest to show the test name, wrap the test.run_all
535        # instance in function that takes the test name as a parameter which
536        # will be displayed when the test is run.  Do this as a function so
537        # that it properly captures the context for tests that captured and
538        # run later.  If done directly in the for loop, then the looping
539        # variable test will be shared amongst all the tests, and we will be
540        # repeatedly testing vesicle.
541
542        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
543        # requires that the test.description field be set.
544        wrap = lambda: test.run_all()
545        wrap.description = test.test_name
546        return wrap
547        # The following would work with nosetests and pytest:
548        #     return lambda name: test.run_all(), test.test_name
549
550    for test in tests:
551        yield build_test(test)
552
553
554if __name__ == "__main__":
555    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.