source: sasmodels/sasmodels/model_test.py @ d8eaa3d

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since d8eaa3d was d92182f, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

tweak option handling for model_test

  • Property mode set to 100755
File size: 22.6 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll|all] model1 model2 ...
8
9If model1 is 'all', then all except the remaining models will be tested.
10Subgroups are also possible, such as 'py', 'single' or '1d'.  See
11:func:`core.list_models` for details.
12
13Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
14and Fq is called to make sure R_eff, volume and volume ratio are computed.
15The return values at these points are not considered.  The test is only to
16verify that the models run to completion, and do not produce inf or NaN.
17
18Tests are defined with the *tests* attribute in the model.py file.  *tests*
19is a list of individual tests to run, where each test consists of the
20parameter values for the test, the q-values and the expected results.  For
21the effective radius test and volume ratio tests, use the extended output
22form, which checks each output of kernel.Fq. For 1-D tests, either specify
23the q value or a list of q-values, and the corresponding I(q) value, or
24list of I(q) values.
25
26That is::
27
28    tests = [
29        [ {parameters}, q, I(q)],
30        [ {parameters}, [q], [I(q)] ],
31        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
32
33        [ {parameters}, (qx, qy), I(qx, Iqy)],
34        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
35                        [I(qx1, qy1), I(qx2, qy2), ...]],
36
37        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
38        ...
39    ]
40
41Parameters are *key:value* pairs, where key is one of the parameters of the
42model and value is the value to use for the test.  Any parameters not given
43in the parameter list will take on the default parameter value.
44
45Precision defaults to 5 digits (relative).
46"""
47from __future__ import print_function
48
49import argparse
50import sys
51import unittest
52import traceback
53
54try:
55    from StringIO import StringIO
56except ImportError:
57    # StringIO.StringIO renamed to io.StringIO in Python 3
58    # Note: io.StringIO exists in python 2, but using unicode instead of str
59    from io import StringIO
60
61import numpy as np  # type: ignore
62
63from . import core
64from .core import list_models, load_model_info, build_model
65from .direct_model import call_kernel, call_Fq
66from .exception import annotate_exception
67from .modelinfo import expand_pars
68from .kernelcl import use_opencl
69from .kernelcuda import use_cuda
70from . import product
71
72# pylint: disable=unused-import
73try:
74    from typing import List, Iterator, Callable
75except ImportError:
76    pass
77else:
78    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
79    from .kernel import KernelModel
80# pylint: enable=unused-import
81
82def make_suite(loaders, models):
83    # type: (List[str], List[str]) -> unittest.TestSuite
84    """
85    Construct the pyunit test suite.
86
87    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
88    For python model the python driver is always used.
89
90    *models* is the list of models to test, or *["all"]* to test all models.
91    """
92    suite = unittest.TestSuite()
93
94    try:
95        # See if the first model parses as a model group
96        group = list_models(models[0])
97        skip = models[1:]
98        models = group
99    except Exception:
100        skip = []
101    for model_name in models:
102        if model_name not in skip:
103            model_info = load_model_info(model_name)
104            _add_model_to_suite(loaders, suite, model_info)
105
106    return suite
107
108def _add_model_to_suite(loaders, suite, model_info):
109    ModelTestCase = _hide_model_case_from_nose()
110
111    #print('------')
112    #print('found tests in', model_name)
113    #print('------')
114
115    # if ispy then use the dll loader to call pykernel
116    # don't try to call cl kernel since it will not be
117    # available in some environmentes.
118    is_py = callable(model_info.Iq)
119
120    # Some OpenCL drivers seem to be flaky, and are not producing the
121    # expected result.  Since we don't have known test values yet for
122    # all of our models, we are instead going to compare the results
123    # for the 'smoke test' (that is, evaluation at q=0.1 for the default
124    # parameters just to see that the model runs to completion) between
125    # the OpenCL and the DLL.  To do this, we define a 'stash' which is
126    # shared between OpenCL and DLL tests.  This is just a list.  If the
127    # list is empty (which it will be when DLL runs, if the DLL runs
128    # first), then the results are appended to the list.  If the list
129    # is not empty (which it will be when OpenCL runs second), the results
130    # are compared to the results stored in the first element of the list.
131    # This is a horrible stateful hack which only makes sense because the
132    # test suite is thrown away after being run once.
133    stash = []
134
135    if is_py:  # kernel implemented in python
136        test_name = "%s-python"%model_info.name
137        test_method_name = "test_%s_python" % model_info.id
138        test = ModelTestCase(test_name, model_info,
139                                test_method_name,
140                                platform="dll",  # so that
141                                dtype="double",
142                                stash=stash)
143        suite.addTest(test)
144    else:   # kernel implemented in C
145
146        # test using dll if desired
147        if 'dll' in loaders or not use_opencl():
148            test_name = "%s-dll"%model_info.name
149            test_method_name = "test_%s_dll" % model_info.id
150            test = ModelTestCase(test_name, model_info,
151                                    test_method_name,
152                                    platform="dll",
153                                    dtype="double",
154                                    stash=stash)
155            suite.addTest(test)
156
157        # test using opencl if desired and available
158        if 'opencl' in loaders and use_opencl():
159            test_name = "%s-opencl"%model_info.name
160            test_method_name = "test_%s_opencl" % model_info.id
161            # Using dtype=None so that the models that are only
162            # correct for double precision are not tested using
163            # single precision.  The choice is determined by the
164            # presence of *single=False* in the model file.
165            test = ModelTestCase(test_name, model_info,
166                                    test_method_name,
167                                    platform="ocl", dtype=None,
168                                    stash=stash)
169            #print("defining", test_name)
170            suite.addTest(test)
171
172        # test using cuda if desired and available
173        if 'cuda' in loaders and use_cuda():
174            test_name = "%s-cuda" % model_info.id
175            test_method_name = "test_%s_cuda" % model_info.id
176            # Using dtype=None so that the models that are only
177            # correct for double precision are not tested using
178            # single precision.  The choice is determined by the
179            # presence of *single=False* in the model file.
180            test = ModelTestCase(test_name, model_info,
181                                    test_method_name,
182                                    platform="cuda", dtype=None,
183                                    stash=stash)
184            #print("defining", test_name)
185            suite.addTest(test)
186
187
188def _hide_model_case_from_nose():
189    # type: () -> type
190    class ModelTestCase(unittest.TestCase):
191        """
192        Test suit for a particular model with a particular kernel driver.
193
194        The test suite runs a simple smoke test to make sure the model
195        functions, then runs the list of tests at the bottom of the model
196        description file.
197        """
198        def __init__(self, test_name, model_info, test_method_name,
199                     platform, dtype, stash):
200            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
201            self.test_name = test_name
202            self.info = model_info
203            self.platform = platform
204            self.dtype = dtype
205            self.stash = stash  # container for the results of the first run
206
207            setattr(self, test_method_name, self.run_all)
208            unittest.TestCase.__init__(self, test_method_name)
209
210        def run_all(self):
211            # type: () -> None
212            """
213            Run all the tests in the test suite, including smoke tests.
214            """
215            smoke_tests = [
216                # test validity at reasonable values
217                ({}, 0.1, None),
218                ({}, (0.1, 0.1), None),
219                # test validity at q = 0
220                #({}, 0.0, None),
221                #({}, (0.0, 0.0), None),
222                # test vector form
223                ({}, [0.001, 0.01, 0.1], [None]*3),
224                ({}, [(0.1, 0.1)]*2, [None]*2),
225                # test that Fq will run, and return R_eff, V, V_r
226                ({}, 0.1, None, None, None, None, None),
227                ]
228            tests = smoke_tests
229            #tests = []
230            if self.info.tests is not None:
231                tests += self.info.tests
232            S_tests = [test for test in tests if '@S' in test[0]]
233            P_tests = [test for test in tests if '@S' not in test[0]]
234            try:
235                model = build_model(self.info, dtype=self.dtype,
236                                    platform=self.platform)
237                results = [self.run_one(model, test) for test in P_tests]
238                for test in S_tests:
239                    # pull the S model name out of the test defn
240                    pars = test[0].copy()
241                    s_name = pars.pop('@S')
242                    ps_test = [pars] + list(test[1:])
243                    # build the P@S model
244                    s_info = load_model_info(s_name)
245                    ps_info = product.make_product_info(self.info, s_info)
246                    ps_model = build_model(ps_info, dtype=self.dtype,
247                                           platform=self.platform)
248                    # run the tests
249                    results.append(self.run_one(ps_model, ps_test))
250
251                if self.stash:
252                    for test, target, actual in zip(tests, self.stash[0], results):
253                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
254                            ("GPU/CPU comparison expected %s but got %s for %s"
255                             % (target, actual, test[0]))
256                else:
257                    self.stash.append(results)
258
259                # Check for missing tests.  Only do so for the "dll" tests
260                # to reduce noise from both opencl and cuda, and because
261                # python kernels use platform="dll".
262                if self.platform == "dll":
263                    missing = []
264                    ## Uncomment the following to require test cases
265                    #missing = self._find_missing_tests()
266                    if missing:
267                        raise ValueError("Missing tests for "+", ".join(missing))
268
269            except:
270                annotate_exception(self.test_name)
271                raise
272
273        def _find_missing_tests(self):
274            # type: () -> None
275            """make sure there are 1D and 2D tests as appropriate"""
276            model_has_1D = True
277            model_has_2D = any(p.type == 'orientation'
278                               for p in self.info.parameters.kernel_parameters)
279
280            # Lists of tests that have a result that is not None
281            single = [test for test in self.info.tests
282                      if not isinstance(test[2], list) and test[2] is not None]
283            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
284            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
285
286            multiple = [test for test in self.info.tests
287                        if isinstance(test[2], list)
288                        and not all(result is None for result in test[2])]
289            tests_has_1D_multiple = any(isinstance(test[1][0], float)
290                                        for test in multiple)
291            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
292                                        for test in multiple)
293
294            missing = []
295            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
296                missing.append("1D")
297            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
298                missing.append("2D")
299
300            return missing
301
302        def run_one(self, model, test):
303            # type: (KernelModel, TestCondition) -> None
304            """Run a single test case."""
305            user_pars, x, y = test[:3]
306            pars = expand_pars(self.info.parameters, user_pars)
307            invalid = invalid_pars(self.info.parameters, pars)
308            if invalid:
309                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
310
311            if not isinstance(y, list):
312                y = [y]
313            if not isinstance(x, list):
314                x = [x]
315
316            self.assertEqual(len(y), len(x))
317
318            if isinstance(x[0], tuple):
319                qx, qy = zip(*x)
320                q_vectors = [np.array(qx), np.array(qy)]
321            else:
322                q_vectors = [np.array(x)]
323
324            kernel = model.make_kernel(q_vectors)
325            if len(test) == 3:
326                actual = call_kernel(kernel, pars)
327                self._check_vectors(x, y, actual, 'I')
328                return actual
329            else:
330                y1 = y
331                y2 = test[3] if not isinstance(test[3], list) else [test[3]]
332                F1, F2, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
333                if F1 is not None:  # F1 is none for models with Iq instead of Fq
334                    self._check_vectors(x, y1, F1, 'F')
335                self._check_vectors(x, y2, F2, 'F^2')
336                self._check_scalar(test[4], R_eff, 'R_eff')
337                self._check_scalar(test[5], volume, 'volume')
338                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
339                return F2
340
341        def _check_scalar(self, target, actual, name):
342            if target is None:
343                # smoke test --- make sure it runs and produces a value
344                self.assertTrue(not np.isnan(actual),
345                                'invalid %s: %s' % (name, actual))
346            elif np.isnan(target):
347                # make sure nans match
348                self.assertTrue(np.isnan(actual),
349                                '%s: expected:%s; actual:%s'
350                                % (name, target, actual))
351            else:
352                # is_near does not work for infinite values, so also test
353                # for exact values.
354                self.assertTrue(target == actual or is_near(target, actual, 5),
355                                '%s: expected:%s; actual:%s'
356                                % (name, target, actual))
357
358        def _check_vectors(self, x, target, actual, name='I'):
359            self.assertTrue(len(actual) > 0,
360                            '%s(...) expected return'%name)
361            if target is None:
362                return
363            self.assertEqual(len(target), len(actual),
364                             '%s(...) returned wrong length'%name)
365            for xi, yi, actual_yi in zip(x, target, actual):
366                if yi is None:
367                    # smoke test --- make sure it runs and produces a value
368                    self.assertTrue(not np.isnan(actual_yi),
369                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
370                elif np.isnan(yi):
371                    # make sure nans match
372                    self.assertTrue(np.isnan(actual_yi),
373                                    '%s(%s): expected:%s; actual:%s'
374                                    % (name, xi, yi, actual_yi))
375                else:
376                    # is_near does not work for infinite values, so also test
377                    # for exact values.
378                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
379                                    '%s(%s); expected:%s; actual:%s'
380                                    % (name, xi, yi, actual_yi))
381
382    return ModelTestCase
383
384def invalid_pars(partable, pars):
385    # type: (ParameterTable, Dict[str, float])
386    """
387    Return a list of parameter names that are not part of the model.
388    """
389    names = set(p.id for p in partable.call_parameters)
390    invalid = []
391    for par in sorted(pars.keys()):
392        # special handling of R_eff mode, which is not a usual parameter
393        if par == product.RADIUS_MODE_ID:
394            continue
395        parts = par.split('_pd')
396        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
397            invalid.append(par)
398            continue
399        if parts[0] not in names:
400            invalid.append(par)
401    return invalid
402
403
404def is_near(target, actual, digits=5):
405    # type: (float, float, int) -> bool
406    """
407    Returns true if *actual* is within *digits* significant digits of *target*.
408    """
409    import math
410    shift = 10**math.ceil(math.log10(abs(target)))
411    return abs(target-actual)/shift < 1.5*10**-digits
412
413# CRUFT: old interface; should be deprecated and removed
414def run_one(model_name):
415    # msg = "use check_model(model_info) rather than run_one(model_name)"
416    # warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
417    try:
418        model_info = load_model_info(model_name)
419    except Exception:
420        output = traceback.format_exc()
421        return output
422
423    success, output = check_model(model_info)
424    return output
425
426def check_model(model_info):
427    # type: (ModelInfo) -> str
428    """
429    Run the tests for a single model, capturing the output.
430
431    Returns success status and the output string.
432    """
433    # Note that running main() directly did not work from within the
434    # wxPython pycrust console.  Instead of the results appearing in the
435    # window they were printed to the underlying console.
436    from unittest.runner import TextTestResult, _WritelnDecorator
437
438    # Build a object to capture and print the test results
439    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
440    verbosity = 2
441    descriptions = True
442    result = TextTestResult(stream, descriptions, verbosity)
443
444    # Build a test suite containing just the model
445    loaders = ['opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll']
446    suite = unittest.TestSuite()
447    _add_model_to_suite(loaders, suite, model_info)
448
449    # Warn if there are no user defined tests.
450    # Note: the test suite constructed above only has one test in it, which
451    # runs through some smoke tests to make sure the model runs, then runs
452    # through the input-output pairs given in the model definition file.  To
453    # check if any such pairs are defined, therefore, we just need to check if
454    # they are in the first test of the test suite.  We do this with an
455    # iterator since we don't have direct access to the list of tests in the
456    # test suite.
457    # In Qt5 suite.run() will clear all tests in the suite after running
458    # with no way of retaining them for the test below, so let's check
459    # for user tests before running the suite.
460    for test in suite:
461        if not test.info.tests:
462            stream.writeln("Note: %s has no user defined tests."%model_info.name)
463        break
464    else:
465        stream.writeln("Note: no test suite created --- this should never happen")
466
467    # Run the test suite
468    suite.run(result)
469
470    # Print the failures and errors
471    for _, tb in result.errors:
472        stream.writeln(tb)
473    for _, tb in result.failures:
474        stream.writeln(tb)
475
476    output = stream.getvalue()
477    stream.close()
478    return result.wasSuccessful(), output
479
480
481def model_tests():
482    # type: () -> Iterator[Callable[[], None]]
483    """
484    Test runner visible to nosetests.
485
486    Run "nosetests sasmodels" on the command line to invoke it.
487    """
488    loaders = ['dll']
489    if use_opencl():
490        loaders.append('opencl')
491    if use_cuda():
492        loaders.append('cuda')
493    tests = make_suite(loaders, ['all'])
494    def build_test(test):
495        # In order for nosetest to show the test name, wrap the test.run_all
496        # instance in function that takes the test name as a parameter which
497        # will be displayed when the test is run.  Do this as a function so
498        # that it properly captures the context for tests that captured and
499        # run later.  If done directly in the for loop, then the looping
500        # variable test will be shared amongst all the tests, and we will be
501        # repeatedly testing vesicle.
502
503        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
504        # requires that the test.description field be set.
505        wrap = lambda: test.run_all()
506        wrap.description = test.test_name
507        return wrap
508        # The following would work with nosetests and pytest:
509        #     return lambda name: test.run_all(), test.test_name
510
511    for test in tests:
512        yield build_test(test)
513
514
515def main():
516    # type: (*str) -> int
517    """
518    Run tests given is models.
519
520    Returns 0 if success or 1 if any tests fail.
521    """
522    try:
523        from xmlrunner import XMLTestRunner as TestRunner
524        test_args = {'output': 'logs'}
525    except ImportError:
526        from unittest import TextTestRunner as TestRunner
527        test_args = {}
528
529    parser = argparse.ArgumentParser(description="Test SasModels Models")
530    parser.add_argument("-v", "--verbose", action="store_const",
531                        default=1, const=2, help="Use verbose output")
532    parser.add_argument("-e", "--engine", default="all",
533                        help="Engines on which to run the test.  "
534                        "Valid values are opencl, cuda, dll, and all. "
535                        "Defaults to all if no value is given")
536    parser.add_argument("models", nargs="*",
537                        help="The names of the models to be tested.  "
538                        "If the first model is 'all', then all but the listed "
539                        "models will be tested.  See core.list_models() for "
540                        "names of other groups, such as 'py' or 'single'.")
541    args, models = parser.parse_known_args()
542
543    if args.engine == "opencl":
544        if not use_opencl():
545            print("opencl is not available")
546            return 1
547        loaders = ['opencl']
548    elif args.engine == "dll":
549        loaders = ["dll"]
550    elif args.engine == "cuda":
551        if not use_cuda():
552            print("cuda is not available")
553            return 1
554        loaders = ['cuda']
555    elif args.engine == "all":
556        loaders = ['dll']
557        if use_opencl():
558            loaders.append('opencl')
559        if use_cuda():
560            loaders.append('cuda')
561    else:
562        print("unknown engine " + args.engine)
563        return 1
564
565    runner = TestRunner(verbosity=args.verbose, **test_args)
566    result = runner.run(make_suite(loaders, args.models))
567    return 1 if result.failures or result.errors else 0
568
569
570if __name__ == "__main__":
571    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.