source: sasmodels/sasmodels/model_test.py @ 6652522

ticket-1257-vesicle-productticket_1156ticket_822_more_unit_tests
Last change on this file since 6652522 was 6652522, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

update cylinder R_eff tests

  • Property mode set to 100755
File size: 23.0 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll|all] model1 model2 ...
8
9If model1 is 'all', then all except the remaining models will be tested.
10Subgroups are also possible, such as 'py', 'single' or '1d'.  See
11:func:`core.list_models` for details.
12
13Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
14and Fq is called to make sure R_eff, volume and volume ratio are computed.
15The return values at these points are not considered.  The test is only to
16verify that the models run to completion, and do not produce inf or NaN.
17
18Tests are defined with the *tests* attribute in the model.py file.  *tests*
19is a list of individual tests to run, where each test consists of the
20parameter values for the test, the q-values and the expected results.  For
21the effective radius test and volume ratio tests, use the extended output
22form, which checks each output of kernel.Fq. For 1-D tests, either specify
23the q value or a list of q-values, and the corresponding I(q) value, or
24list of I(q) values.
25
26That is::
27
28    tests = [
29        [ {parameters}, q, I(q)],
30        [ {parameters}, [q], [I(q)] ],
31        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
32
33        [ {parameters}, (qx, qy), I(qx, Iqy)],
34        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
35                        [I(qx1, qy1), I(qx2, qy2), ...]],
36
37        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
38        ...
39    ]
40
41Parameters are *key:value* pairs, where key is one of the parameters of the
42model and value is the value to use for the test.  Any parameters not given
43in the parameter list will take on the default parameter value.
44
45Precision defaults to 5 digits (relative).
46"""
47from __future__ import print_function
48
49import argparse
50import sys
51import unittest
52import traceback
53
54try:
55    from StringIO import StringIO
56except ImportError:
57    # StringIO.StringIO renamed to io.StringIO in Python 3
58    # Note: io.StringIO exists in python 2, but using unicode instead of str
59    from io import StringIO
60
61import numpy as np  # type: ignore
62
63from .core import list_models, load_model_info, build_model
64from .direct_model import call_kernel, call_Fq
65from .exception import annotate_exception
66from .modelinfo import expand_pars
67from .kernelcl import use_opencl
68from .kernelcuda import use_cuda
69from . import product
70
71# pylint: disable=unused-import
72try:
73    from typing import List, Iterator, Callable
74except ImportError:
75    pass
76else:
77    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
78    from .kernel import KernelModel
79# pylint: enable=unused-import
80
81def make_suite(loaders, models):
82    # type: (List[str], List[str]) -> unittest.TestSuite
83    """
84    Construct the pyunit test suite.
85
86    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
87    For python model the python driver is always used.
88
89    *models* is the list of models to test, or *["all"]* to test all models.
90    """
91    suite = unittest.TestSuite()
92
93    try:
94        # See if the first model parses as a model group
95        group = list_models(models[0])
96        skip = models[1:]
97        models = group
98    except Exception:
99        skip = []
100    for model_name in models:
101        if model_name not in skip:
102            model_info = load_model_info(model_name)
103            _add_model_to_suite(loaders, suite, model_info)
104
105    return suite
106
107def _add_model_to_suite(loaders, suite, model_info):
108    ModelTestCase = _hide_model_case_from_nose()
109
110    #print('------')
111    #print('found tests in', model_name)
112    #print('------')
113
114    # if ispy then use the dll loader to call pykernel
115    # don't try to call cl kernel since it will not be
116    # available in some environmentes.
117    is_py = callable(model_info.Iq)
118
119    # Some OpenCL drivers seem to be flaky, and are not producing the
120    # expected result.  Since we don't have known test values yet for
121    # all of our models, we are instead going to compare the results
122    # for the 'smoke test' (that is, evaluation at q=0.1 for the default
123    # parameters just to see that the model runs to completion) between
124    # the OpenCL and the DLL.  To do this, we define a 'stash' which is
125    # shared between OpenCL and DLL tests.  This is just a list.  If the
126    # list is empty (which it will be when DLL runs, if the DLL runs
127    # first), then the results are appended to the list.  If the list
128    # is not empty (which it will be when OpenCL runs second), the results
129    # are compared to the results stored in the first element of the list.
130    # This is a horrible stateful hack which only makes sense because the
131    # test suite is thrown away after being run once.
132    stash = []
133
134    if is_py:  # kernel implemented in python
135        test_name = "%s-python"%model_info.name
136        test_method_name = "test_%s_python" % model_info.id
137        test = ModelTestCase(test_name, model_info,
138                             test_method_name,
139                             platform="dll",  # so that
140                             dtype="double",
141                             stash=stash)
142        suite.addTest(test)
143    else:   # kernel implemented in C
144
145        # test using dll if desired
146        if 'dll' in loaders or not use_opencl():
147            test_name = "%s-dll"%model_info.name
148            test_method_name = "test_%s_dll" % model_info.id
149            test = ModelTestCase(test_name, model_info,
150                                 test_method_name,
151                                 platform="dll",
152                                 dtype="double",
153                                 stash=stash)
154            suite.addTest(test)
155
156        # test using opencl if desired and available
157        if 'opencl' in loaders and use_opencl():
158            test_name = "%s-opencl"%model_info.name
159            test_method_name = "test_%s_opencl" % model_info.id
160            # Using dtype=None so that the models that are only
161            # correct for double precision are not tested using
162            # single precision.  The choice is determined by the
163            # presence of *single=False* in the model file.
164            test = ModelTestCase(test_name, model_info,
165                                 test_method_name,
166                                 platform="ocl", dtype=None,
167                                 stash=stash)
168            #print("defining", test_name)
169            suite.addTest(test)
170
171        # test using cuda if desired and available
172        if 'cuda' in loaders and use_cuda():
173            test_name = "%s-cuda" % model_info.id
174            test_method_name = "test_%s_cuda" % model_info.id
175            # Using dtype=None so that the models that are only
176            # correct for double precision are not tested using
177            # single precision.  The choice is determined by the
178            # presence of *single=False* in the model file.
179            test = ModelTestCase(test_name, model_info,
180                                 test_method_name,
181                                 platform="cuda", dtype=None,
182                                 stash=stash)
183            #print("defining", test_name)
184            suite.addTest(test)
185
186
187def _hide_model_case_from_nose():
188    # type: () -> type
189    class ModelTestCase(unittest.TestCase):
190        """
191        Test suit for a particular model with a particular kernel driver.
192
193        The test suite runs a simple smoke test to make sure the model
194        functions, then runs the list of tests at the bottom of the model
195        description file.
196        """
197        def __init__(self, test_name, model_info, test_method_name,
198                     platform, dtype, stash):
199            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
200            self.test_name = test_name
201            self.info = model_info
202            self.platform = platform
203            self.dtype = dtype
204            self.stash = stash  # container for the results of the first run
205
206            setattr(self, test_method_name, self.run_all)
207            unittest.TestCase.__init__(self, test_method_name)
208
209        def run_all(self):
210            # type: () -> None
211            """
212            Run all the tests in the test suite, including smoke tests.
213            """
214            smoke_tests = [
215                # test validity at reasonable values
216                ({}, 0.1, None),
217                ({}, (0.1, 0.1), None),
218                # test validity at q = 0
219                #({}, 0.0, None),
220                #({}, (0.0, 0.0), None),
221                # test vector form
222                ({}, [0.001, 0.01, 0.1], [None]*3),
223                ({}, [(0.1, 0.1)]*2, [None]*2),
224                # test that Fq will run, and return R_eff, V, V_r
225                ({}, 0.1, None, None, None, None, None),
226                ]
227            tests = smoke_tests
228            #tests = []
229            if self.info.tests is not None:
230                tests += self.info.tests
231            S_tests = [test for test in tests if '@S' in test[0]]
232            P_tests = [test for test in tests if '@S' not in test[0]]
233            try:
234                model = build_model(self.info, dtype=self.dtype,
235                                    platform=self.platform)
236                results = [self.run_one(model, test) for test in P_tests]
237                for test in S_tests:
238                    # pull the S model name out of the test defn
239                    pars = test[0].copy()
240                    s_name = pars.pop('@S')
241                    ps_test = [pars] + list(test[1:])
242                    # build the P@S model
243                    s_info = load_model_info(s_name)
244                    #print("in run_all: s_info:", s_info)
245                    ps_info = product.make_product_info(self.info, s_info)
246                    ps_model = build_model(ps_info, dtype=self.dtype,
247                                           platform=self.platform)
248                    # run the tests
249                    results.append(self.run_one(ps_model, ps_test))
250
251                if self.stash:
252                    for test, target, actual in zip(tests, self.stash[0], results):
253                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
254                            ("GPU/CPU comparison expected %s but got %s for %s"
255                             % (target, actual, test[0]))
256                else:
257                    self.stash.append(results)
258
259                # Check for missing tests.  Only do so for the "dll" tests
260                # to reduce noise from both opencl and cuda, and because
261                # python kernels use platform="dll".
262                if self.platform == "dll":
263                    missing = []
264                    ## Uncomment the following to require test cases
265                    #missing = self._find_missing_tests()
266                    if missing:
267                        raise ValueError("Missing tests for "+", ".join(missing))
268
269            except:
270                annotate_exception(self.test_name)
271                raise
272
273        def _find_missing_tests(self):
274            # type: () -> None
275            """make sure there are 1D and 2D tests as appropriate"""
276            model_has_1D = True
277            model_has_2D = any(p.type == 'orientation'
278                               for p in self.info.parameters.kernel_parameters)
279
280            # Lists of tests that have a result that is not None
281            single = [test for test in self.info.tests
282                      if not isinstance(test[2], list) and test[2] is not None]
283            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
284            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
285
286            multiple = [test for test in self.info.tests
287                        if isinstance(test[2], list)
288                        and not all(result is None for result in test[2])]
289            tests_has_1D_multiple = any(isinstance(test[1][0], float)
290                                        for test in multiple)
291            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
292                                        for test in multiple)
293
294            missing = []
295            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
296                missing.append("1D")
297            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
298                missing.append("2D")
299
300            return missing
301
302        def run_one(self, model, test):
303            # type: (KernelModel, TestCondition) -> None
304            """Run a single test case."""
305            user_pars, x, y = test[:3]
306            pars = expand_pars(self.info.parameters, user_pars)
307            invalid = invalid_pars(self.info.parameters, pars)
308            if invalid:
309                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
310
311            if not isinstance(y, list):
312                y = [y]
313            if not isinstance(x, list):
314                x = [x]
315
316            self.assertEqual(len(y), len(x))
317
318            if isinstance(x[0], tuple):
319                qx, qy = zip(*x)
320                q_vectors = [np.array(qx), np.array(qy)]
321            else:
322                q_vectors = [np.array(x)]
323
324            kernel = model.make_kernel(q_vectors)
325            if len(test) == 3:
326                actual = call_kernel(kernel, pars)
327                self._check_vectors(x, y, actual, 'I')
328                return actual
329            else:
330                y1 = y
331                y2 = test[3] if isinstance(test[3], list) else [test[3]]
332                F, Fsq, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
333                if F is not None:  # F is none for models with Iq instead of Fq
334                    self._check_vectors(x, y1, F, 'F')
335                self._check_vectors(x, y2, Fsq, 'F^2')
336                self._check_scalar(test[4], R_eff, 'R_eff')
337                self._check_scalar(test[5], volume, 'volume')
338                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
339                return Fsq
340
341        def _check_scalar(self, target, actual, name):
342            if target is None:
343                # smoke test --- make sure it runs and produces a value
344                self.assertTrue(not np.isnan(actual),
345                                'invalid %s: %s' % (name, actual))
346            elif np.isnan(target):
347                # make sure nans match
348                self.assertTrue(np.isnan(actual),
349                                '%s: expected:%s; actual:%s'
350                                % (name, target, actual))
351            else:
352                # is_near does not work for infinite values, so also test
353                # for exact values.
354                self.assertTrue(target == actual or is_near(target, actual, 5),
355                                '%s: expected:%s; actual:%s'
356                                % (name, target, actual))
357
358        def _check_vectors(self, x, target, actual, name='I'):
359            self.assertTrue(len(actual) > 0,
360                            '%s(...) expected return'%name)
361            if target is None:
362                return
363            self.assertEqual(len(target), len(actual),
364                             '%s(...) returned wrong length'%name)
365            for xi, yi, actual_yi in zip(x, target, actual):
366                if yi is None:
367                    # smoke test --- make sure it runs and produces a value
368                    self.assertTrue(not np.isnan(actual_yi),
369                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
370                elif np.isnan(yi):
371                    # make sure nans match
372                    self.assertTrue(np.isnan(actual_yi),
373                                    '%s(%s): expected:%s; actual:%s'
374                                    % (name, xi, yi, actual_yi))
375                else:
376                    # is_near does not work for infinite values, so also test
377                    # for exact values.
378                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
379                                    '%s(%s); expected:%s; actual:%s'
380                                    % (name, xi, yi, actual_yi))
381
382    return ModelTestCase
383
384def invalid_pars(partable, pars):
385    # type: (ParameterTable, Dict[str, float])
386    """
387    Return a list of parameter names that are not part of the model.
388    """
389    names = set(p.id for p in partable.call_parameters)
390    invalid = []
391    for par in sorted(pars.keys()):
392        # special handling of R_eff mode, which is not a usual parameter
393        if par == product.RADIUS_MODE_ID:
394            continue
395        if par == product.RADIUS_TYPE_ID:
396            continue
397        if par == product.STRUCTURE_MODE_ID:
398            continue
399        if par == "radius_effective":    # test should not need this??
400            continue
401        parts = par.split('_pd')
402        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
403            invalid.append(par)
404            continue
405        if parts[0] not in names:
406            invalid.append(par)
407    return invalid
408
409
410def is_near(target, actual, digits=5):
411    # type: (float, float, int) -> bool
412    """
413    Returns true if *actual* is within *digits* significant digits of *target*.
414    """
415    import math
416    if target == 0.:
417        return actual == 0.
418    shift = 10**math.ceil(math.log10(np.abs(target)))
419    return np.abs(target-actual)/shift < 1.5*10**-digits
420
421# CRUFT: old interface; should be deprecated and removed
422def run_one(model_name):
423    # type: (str) -> str
424    """
425    [Deprecated] Run the tests associated with *model_name*.
426
427    Use the following instead::
428
429        succss, output = check_model(load_model_info(model_name))
430    """
431    # msg = "use check_model(model_info) rather than run_one(model_name)"
432    # warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
433    try:
434        model_info = load_model_info(model_name)
435    except Exception:
436        output = traceback.format_exc()
437        return output
438
439    _, output = check_model(model_info)
440    return output
441
442def check_model(model_info):
443    # type: (ModelInfo) -> Tuple[bool, str]
444    """
445    Run the tests for a single model, capturing the output.
446
447    Returns success status and the output string.
448    """
449    # Note that running main() directly did not work from within the
450    # wxPython pycrust console.  Instead of the results appearing in the
451    # window they were printed to the underlying console.
452    from unittest.runner import TextTestResult, _WritelnDecorator
453
454    # Build a object to capture and print the test results
455    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
456    verbosity = 2
457    descriptions = True
458    result = TextTestResult(stream, descriptions, verbosity)
459
460    # Build a test suite containing just the model
461    loaders = ['opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll']
462    suite = unittest.TestSuite()
463    _add_model_to_suite(loaders, suite, model_info)
464
465    # Warn if there are no user defined tests.
466    # Note: the test suite constructed above only has one test in it, which
467    # runs through some smoke tests to make sure the model runs, then runs
468    # through the input-output pairs given in the model definition file.  To
469    # check if any such pairs are defined, therefore, we just need to check if
470    # they are in the first test of the test suite.  We do this with an
471    # iterator since we don't have direct access to the list of tests in the
472    # test suite.
473    # In Qt5 suite.run() will clear all tests in the suite after running
474    # with no way of retaining them for the test below, so let's check
475    # for user tests before running the suite.
476    for test in suite:
477        if not test.info.tests:
478            stream.writeln("Note: %s has no user defined tests."%model_info.name)
479        break
480    else:
481        stream.writeln("Note: no test suite created --- this should never happen")
482
483    # Run the test suite
484    suite.run(result)
485
486    # Print the failures and errors
487    for _, tb in result.errors:
488        stream.writeln(tb)
489    for _, tb in result.failures:
490        stream.writeln(tb)
491
492    output = stream.getvalue()
493    stream.close()
494    return result.wasSuccessful(), output
495
496
497def model_tests():
498    # type: () -> Iterator[Callable[[], None]]
499    """
500    Test runner visible to nosetests.
501
502    Run "nosetests sasmodels" on the command line to invoke it.
503    """
504    loaders = ['dll']
505    if use_opencl():
506        loaders.append('opencl')
507    if use_cuda():
508        loaders.append('cuda')
509    tests = make_suite(loaders, ['all'])
510    def _build_test(test):
511        # In order for nosetest to show the test name, wrap the test.run_all
512        # instance in function that takes the test name as a parameter which
513        # will be displayed when the test is run.  Do this as a function so
514        # that it properly captures the context for tests that captured and
515        # run later.  If done directly in the for loop, then the looping
516        # variable test will be shared amongst all the tests, and we will be
517        # repeatedly testing vesicle.
518
519        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
520        # requires that the test.description field be set.
521        wrap = lambda: test.run_all()
522        wrap.description = test.test_name
523        return wrap
524        # The following would work with nosetests and pytest:
525        #     return lambda name: test.run_all(), test.test_name
526
527    for test in tests:
528        yield _build_test(test)
529
530
531def main():
532    # type: (*str) -> int
533    """
534    Run tests given is models.
535
536    Returns 0 if success or 1 if any tests fail.
537    """
538    try:
539        from xmlrunner import XMLTestRunner as TestRunner
540        test_args = {'output': 'logs'}
541    except ImportError:
542        from unittest import TextTestRunner as TestRunner
543        test_args = {}
544
545    parser = argparse.ArgumentParser(description="Test SasModels Models")
546    parser.add_argument("-v", "--verbose", action="store_const",
547                        default=1, const=2, help="Use verbose output")
548    parser.add_argument("-e", "--engine", default="all",
549                        help="Engines on which to run the test.  "
550                        "Valid values are opencl, cuda, dll, and all. "
551                        "Defaults to all if no value is given")
552    parser.add_argument("models", nargs="*",
553                        help="The names of the models to be tested.  "
554                        "If the first model is 'all', then all but the listed "
555                        "models will be tested.  See core.list_models() for "
556                        "names of other groups, such as 'py' or 'single'.")
557    opts = parser.parse_args()
558
559    if opts.engine == "opencl":
560        if not use_opencl():
561            print("opencl is not available")
562            return 1
563        loaders = ['opencl']
564    elif opts.engine == "dll":
565        loaders = ["dll"]
566    elif opts.engine == "cuda":
567        if not use_cuda():
568            print("cuda is not available")
569            return 1
570        loaders = ['cuda']
571    elif opts.engine == "all":
572        loaders = ['dll']
573        if use_opencl():
574            loaders.append('opencl')
575        if use_cuda():
576            loaders.append('cuda')
577    else:
578        print("unknown engine " + opts.engine)
579        return 1
580
581    runner = TestRunner(verbosity=opts.verbose, **test_args)
582    result = runner.run(make_suite(loaders, opts.models))
583    return 1 if result.failures or result.errors else 0
584
585
586if __name__ == "__main__":
587    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.