source: sasmodels/sasmodels/model_test.py @ d8e81f7

ticket-1257-vesicle-productticket_1156ticket_822_more_unit_tests
Last change on this file since d8e81f7 was d8e81f7, checked in by ajj, 5 years ago

Merge branch 'ticket_822_v5_unit_tests' of https://github.com/SasView/sasmodels into ticket_822_v5_unit_tests

  • Property mode set to 100755
File size: 23.4 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll|all] model1 model2 ...
8
9If model1 is 'all', then all except the remaining models will be tested.
10Subgroups are also possible, such as 'py', 'single' or '1d'.  See
11:func:`core.list_models` for details.
12
13Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
14and Fq is called to make sure R_eff, volume and volume ratio are computed.
15The return values at these points are not considered.  The test is only to
16verify that the models run to completion, and do not produce inf or NaN.
17
18Tests are defined with the *tests* attribute in the model.py file.  *tests*
19is a list of individual tests to run, where each test consists of the
20parameter values for the test, the q-values and the expected results.  For
21the effective radius test and volume ratio tests, use the extended output
22form, which checks each output of kernel.Fq. For 1-D tests, either specify
23the q value or a list of q-values, and the corresponding I(q) value, or
24list of I(q) values.
25
26That is::
27
28    tests = [
29        [ {parameters}, q, I(q)],
30        [ {parameters}, [q], [I(q)] ],
31        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
32
33        [ {parameters}, (qx, qy), I(qx, Iqy)],
34        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
35                        [I(qx1, qy1), I(qx2, qy2), ...]],
36
37        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
38        ...
39    ]
40
41Parameters are *key:value* pairs, where key is one of the parameters of the
42model and value is the value to use for the test.  Any parameters not given
43in the parameter list will take on the default parameter value.
44
45Precision defaults to 5 digits (relative).
46"""
47from __future__ import print_function
48
49import argparse
50import sys
51import unittest
52import traceback
53
54try:
55    from StringIO import StringIO
56except ImportError:
57    # StringIO.StringIO renamed to io.StringIO in Python 3
58    # Note: io.StringIO exists in python 2, but using unicode instead of str
59    from io import StringIO
60
61import numpy as np  # type: ignore
62
63from .core import list_models, load_model_info, build_model
64from .direct_model import call_kernel, call_Fq
65from .exception import annotate_exception
66from .modelinfo import expand_pars
67from .kernelcl import use_opencl
68from .kernelcuda import use_cuda
69from . import product
70
71# pylint: disable=unused-import
72try:
73    from typing import List, Iterator, Callable
74except ImportError:
75    pass
76else:
77    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
78    from .kernel import KernelModel
79# pylint: enable=unused-import
80
81def make_suite(loaders, models):
82    # type: (List[str], List[str]) -> unittest.TestSuite
83    """
84    Construct the pyunit test suite.
85
86    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
87    For python model the python driver is always used.
88
89    *models* is the list of models to test, or *["all"]* to test all models.
90    """
91    suite = unittest.TestSuite()
92
93    try:
94        # See if the first model parses as a model group
95        group = list_models(models[0])
96        skip = models[1:]
97        models = group
98    except Exception:
99        skip = []
100    for model_name in models:
101        if model_name not in skip:
102            model_info = load_model_info(model_name)
103            _add_model_to_suite(loaders, suite, model_info)
104
105    return suite
106
107def _add_model_to_suite(loaders, suite, model_info):
108    ModelTestCase = _hide_model_case_from_nose()
109
110    #print('------')
111    #print('found tests in', model_name)
112    #print('------')
113
114    # if ispy then use the dll loader to call pykernel
115    # don't try to call cl kernel since it will not be
116    # available in some environmentes.
117    is_py = callable(model_info.Iq)
118
119    # Some OpenCL drivers seem to be flaky, and are not producing the
120    # expected result.  Since we don't have known test values yet for
121    # all of our models, we are instead going to compare the results
122    # for the 'smoke test' (that is, evaluation at q=0.1 for the default
123    # parameters just to see that the model runs to completion) between
124    # the OpenCL and the DLL.  To do this, we define a 'stash' which is
125    # shared between OpenCL and DLL tests.  This is just a list.  If the
126    # list is empty (which it will be when DLL runs, if the DLL runs
127    # first), then the results are appended to the list.  If the list
128    # is not empty (which it will be when OpenCL runs second), the results
129    # are compared to the results stored in the first element of the list.
130    # This is a horrible stateful hack which only makes sense because the
131    # test suite is thrown away after being run once.
132    stash = []
133
134    if is_py:  # kernel implemented in python
135        test_name = "%s-python"%model_info.name
136        test_method_name = "test_%s_python" % model_info.id
137        test = ModelTestCase(test_name, model_info,
138                             test_method_name,
139                             platform="dll",  # so that
140                             dtype="double",
141                             stash=stash)
142        suite.addTest(test)
143    else:   # kernel implemented in C
144
145        # test using dll if desired
146        if 'dll' in loaders or not use_opencl():
147            test_name = "%s-dll"%model_info.name
148            test_method_name = "test_%s_dll" % model_info.id
149            test = ModelTestCase(test_name, model_info,
150                                 test_method_name,
151                                 platform="dll",
152                                 dtype="double",
153                                 stash=stash)
154            suite.addTest(test)
155
156        # test using opencl if desired and available
157        if 'opencl' in loaders and use_opencl():
158            test_name = "%s-opencl"%model_info.name
159            test_method_name = "test_%s_opencl" % model_info.id
160            # Using dtype=None so that the models that are only
161            # correct for double precision are not tested using
162            # single precision.  The choice is determined by the
163            # presence of *single=False* in the model file.
164            test = ModelTestCase(test_name, model_info,
165                                 test_method_name,
166                                 platform="ocl", dtype=None,
167                                 stash=stash)
168            #print("defining", test_name)
169            suite.addTest(test)
170
171        # test using cuda if desired and available
172        if 'cuda' in loaders and use_cuda():
173            test_name = "%s-cuda" % model_info.id
174            test_method_name = "test_%s_cuda" % model_info.id
175            # Using dtype=None so that the models that are only
176            # correct for double precision are not tested using
177            # single precision.  The choice is determined by the
178            # presence of *single=False* in the model file.
179            test = ModelTestCase(test_name, model_info,
180                                 test_method_name,
181                                 platform="cuda", dtype=None,
182                                 stash=stash)
183            #print("defining", test_name)
184            suite.addTest(test)
185
186
187def _hide_model_case_from_nose():
188    # type: () -> type
189    class ModelTestCase(unittest.TestCase):
190        """
191        Test suit for a particular model with a particular kernel driver.
192
193        The test suite runs a simple smoke test to make sure the model
194        functions, then runs the list of tests at the bottom of the model
195        description file.
196        """
197        def __init__(self, test_name, model_info, test_method_name,
198                     platform, dtype, stash):
199            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
200            self.test_name = test_name
201            self.info = model_info
202            self.platform = platform
203            self.dtype = dtype
204            self.stash = stash  # container for the results of the first run
205
206            setattr(self, test_method_name, self.run_all)
207            unittest.TestCase.__init__(self, test_method_name)
208
209        def run_all(self):
210            # type: () -> None
211            """
212            Run all the tests in the test suite, including smoke tests.
213            """
214            smoke_tests = [
215                # test validity at reasonable values
216                ({}, 0.1, None),
217                ({}, (0.1, 0.1), None),
218                # test validity at q = 0
219                #({}, 0.0, None),
220                #({}, (0.0, 0.0), None),
221                # test vector form
222                ({}, [0.001, 0.01, 0.1], [None]*3),
223                ({}, [(0.1, 0.1)]*2, [None]*2),
224                # test that Fq will run, and return R_eff, V, V_r
225                ({}, 0.1, None, None, None, None, None),
226                ]
227            tests = smoke_tests
228            #tests = []
229            if self.info.tests is not None:
230                tests += self.info.tests
231            S_tests = [test for test in tests if '@S' in test[0]]
232            P_tests = [test for test in tests if '@S' not in test[0]]
233            try:
234                model = build_model(self.info, dtype=self.dtype,
235                                    platform=self.platform)
236                results = [self.run_one(model, test) for test in P_tests]
237                for test in S_tests:
238                    # pull the S model name out of the test defn
239                    pars = test[0].copy()
240                    s_name = pars.pop('@S')
241                    ps_test = [pars] + list(test[1:])
242                    #print("PS TEST PARAMS!!!",ps_test)
243                    # build the P@S model
244                    s_info = load_model_info(s_name)
245                    ps_info = product.make_product_info(self.info, s_info)
246                    ps_model = build_model(ps_info, dtype=self.dtype,
247                                           platform=self.platform)
248                    # run the tests
249<<<<<<< HEAD
250                    #self.info = ps_model.info
251                    #print("SELF.INFO PARAMS!!!",[p.id for p in self.info.parameters.call_parameters])
252                    #print("PS MODEL PARAMETERS:",[p.id for p in ps_model.info.parameters.call_parameters])
253=======
254>>>>>>> 2ed7de04217c5103c5fd8d7c14e29e04923269d5
255                    results.append(self.run_one(ps_model, ps_test))
256
257                if self.stash:
258                    for test, target, actual in zip(tests, self.stash[0], results):
259                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
260                            ("GPU/CPU comparison expected %s but got %s for %s"
261                             % (target, actual, test[0]))
262                else:
263                    self.stash.append(results)
264
265                # Check for missing tests.  Only do so for the "dll" tests
266                # to reduce noise from both opencl and cuda, and because
267                # python kernels use platform="dll".
268                if self.platform == "dll":
269                    missing = []
270                    ## Uncomment the following to require test cases
271                    #missing = self._find_missing_tests()
272                    if missing:
273                        raise ValueError("Missing tests for "+", ".join(missing))
274
275            except:
276                annotate_exception(self.test_name)
277                raise
278
279        def _find_missing_tests(self):
280            # type: () -> None
281            """make sure there are 1D and 2D tests as appropriate"""
282            model_has_1D = True
283            model_has_2D = any(p.type == 'orientation'
284                               for p in self.info.parameters.kernel_parameters)
285
286            # Lists of tests that have a result that is not None
287            single = [test for test in self.info.tests
288                      if not isinstance(test[2], list) and test[2] is not None]
289            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
290            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
291
292            multiple = [test for test in self.info.tests
293                        if isinstance(test[2], list)
294                        and not all(result is None for result in test[2])]
295            tests_has_1D_multiple = any(isinstance(test[1][0], float)
296                                        for test in multiple)
297            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
298                                        for test in multiple)
299
300            missing = []
301            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
302                missing.append("1D")
303            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
304                missing.append("2D")
305
306            return missing
307
308        def run_one(self, model, test):
309            # type: (KernelModel, TestCondition) -> None
310            """Run a single test case."""
311            user_pars, x, y = test[:3]
312            print("PS MODEL PARAMETERS:",[p.id for p in model.info.parameters.call_parameters])
313            pars = expand_pars(model.info.parameters, user_pars)
314            invalid = invalid_pars(model.info.parameters, pars)
315            if invalid:
316                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
317
318            if not isinstance(y, list):
319                y = [y]
320            if not isinstance(x, list):
321                x = [x]
322
323            self.assertEqual(len(y), len(x))
324
325            if isinstance(x[0], tuple):
326                qx, qy = zip(*x)
327                q_vectors = [np.array(qx), np.array(qy)]
328            else:
329                q_vectors = [np.array(x)]
330
331            kernel = model.make_kernel(q_vectors)
332            if len(test) == 3:
333                actual = call_kernel(kernel, pars)
334                self._check_vectors(x, y, actual, 'I')
335                return actual
336            else:
337                y1 = y
338                y2 = test[3] if isinstance(test[3], list) else [test[3]]
339                F, Fsq, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
340                if F is not None:  # F is none for models with Iq instead of Fq
341                    self._check_vectors(x, y1, F, 'F')
342                self._check_vectors(x, y2, Fsq, 'F^2')
343                self._check_scalar(test[4], R_eff, 'R_eff')
344                self._check_scalar(test[5], volume, 'volume')
345                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
346                return Fsq
347
348        def _check_scalar(self, target, actual, name):
349            if target is None:
350                # smoke test --- make sure it runs and produces a value
351                self.assertTrue(not np.isnan(actual),
352                                'invalid %s: %s' % (name, actual))
353            elif np.isnan(target):
354                # make sure nans match
355                self.assertTrue(np.isnan(actual),
356                                '%s: expected:%s; actual:%s'
357                                % (name, target, actual))
358            else:
359                # is_near does not work for infinite values, so also test
360                # for exact values.
361                self.assertTrue(target == actual or is_near(target, actual, 5),
362                                '%s: expected:%s; actual:%s'
363                                % (name, target, actual))
364
365        def _check_vectors(self, x, target, actual, name='I'):
366            self.assertTrue(len(actual) > 0,
367                            '%s(...) expected return'%name)
368            if target is None:
369                return
370            self.assertEqual(len(target), len(actual),
371                             '%s(...) returned wrong length'%name)
372            for xi, yi, actual_yi in zip(x, target, actual):
373                if yi is None:
374                    # smoke test --- make sure it runs and produces a value
375                    self.assertTrue(not np.isnan(actual_yi),
376                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
377                elif np.isnan(yi):
378                    # make sure nans match
379                    self.assertTrue(np.isnan(actual_yi),
380                                    '%s(%s): expected:%s; actual:%s'
381                                    % (name, xi, yi, actual_yi))
382                else:
383                    # is_near does not work for infinite values, so also test
384                    # for exact values.
385                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
386                                    '%s(%s); expected:%s; actual:%s'
387                                    % (name, xi, yi, actual_yi))
388
389    return ModelTestCase
390
391def invalid_pars(partable, pars):
392    # type: (ParameterTable, Dict[str, float])
393    """
394    Return a list of parameter names that are not part of the model.
395    """
396    names = set(p.id for p in partable.call_parameters)
397    invalid = []
398    for par in sorted(pars.keys()):
399        # Ignore the R_eff mode parameter when checking for valid parameters.
400        # It is an allowed parameter for a model even though it does not exist
401        # in the parameter table.  The call_Fq() function pops it from the
402        # parameter list and sends it directly to kernel.Fq().
403        if par == product.RADIUS_MODE_ID:
404            continue
405        parts = par.split('_pd')
406        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
407            invalid.append(par)
408            continue
409        if parts[0] not in names:
410            invalid.append(par)
411    return invalid
412
413
414def is_near(target, actual, digits=5):
415    # type: (float, float, int) -> bool
416    """
417    Returns true if *actual* is within *digits* significant digits of *target*.
418    """
419    import math
420    if target == 0.:
421        return actual == 0.
422    shift = 10**math.ceil(math.log10(np.abs(target)))
423    return np.abs(target-actual)/shift < 1.5*10**-digits
424
425# CRUFT: old interface; should be deprecated and removed
426def run_one(model_name):
427    # type: (str) -> str
428    """
429    [Deprecated] Run the tests associated with *model_name*.
430
431    Use the following instead::
432
433        succss, output = check_model(load_model_info(model_name))
434    """
435    # msg = "use check_model(model_info) rather than run_one(model_name)"
436    # warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
437    try:
438        model_info = load_model_info(model_name)
439    except Exception:
440        output = traceback.format_exc()
441        return output
442
443    _, output = check_model(model_info)
444    return output
445
446def check_model(model_info):
447    # type: (ModelInfo) -> Tuple[bool, str]
448    """
449    Run the tests for a single model, capturing the output.
450
451    Returns success status and the output string.
452    """
453    # Note that running main() directly did not work from within the
454    # wxPython pycrust console.  Instead of the results appearing in the
455    # window they were printed to the underlying console.
456    from unittest.runner import TextTestResult, _WritelnDecorator
457
458    # Build a object to capture and print the test results
459    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
460    verbosity = 2
461    descriptions = True
462    result = TextTestResult(stream, descriptions, verbosity)
463
464    # Build a test suite containing just the model
465    loaders = ['opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll']
466    suite = unittest.TestSuite()
467    _add_model_to_suite(loaders, suite, model_info)
468
469    # Warn if there are no user defined tests.
470    # Note: the test suite constructed above only has one test in it, which
471    # runs through some smoke tests to make sure the model runs, then runs
472    # through the input-output pairs given in the model definition file.  To
473    # check if any such pairs are defined, therefore, we just need to check if
474    # they are in the first test of the test suite.  We do this with an
475    # iterator since we don't have direct access to the list of tests in the
476    # test suite.
477    # In Qt5 suite.run() will clear all tests in the suite after running
478    # with no way of retaining them for the test below, so let's check
479    # for user tests before running the suite.
480    for test in suite:
481        if not test.info.tests:
482            stream.writeln("Note: %s has no user defined tests."%model_info.name)
483        break
484    else:
485        stream.writeln("Note: no test suite created --- this should never happen")
486
487    # Run the test suite
488    suite.run(result)
489
490    # Print the failures and errors
491    for _, tb in result.errors:
492        stream.writeln(tb)
493    for _, tb in result.failures:
494        stream.writeln(tb)
495
496    output = stream.getvalue()
497    stream.close()
498    return result.wasSuccessful(), output
499
500
501def model_tests():
502    # type: () -> Iterator[Callable[[], None]]
503    """
504    Test runner visible to nosetests.
505
506    Run "nosetests sasmodels" on the command line to invoke it.
507    """
508    loaders = ['dll']
509    if use_opencl():
510        loaders.append('opencl')
511    if use_cuda():
512        loaders.append('cuda')
513    tests = make_suite(loaders, ['all'])
514    def _build_test(test):
515        # In order for nosetest to show the test name, wrap the test.run_all
516        # instance in function that takes the test name as a parameter which
517        # will be displayed when the test is run.  Do this as a function so
518        # that it properly captures the context for tests that captured and
519        # run later.  If done directly in the for loop, then the looping
520        # variable test will be shared amongst all the tests, and we will be
521        # repeatedly testing vesicle.
522
523        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
524        # requires that the test.description field be set.
525        wrap = lambda: test.run_all()
526        wrap.description = test.test_name
527        return wrap
528        # The following would work with nosetests and pytest:
529        #     return lambda name: test.run_all(), test.test_name
530
531    for test in tests:
532        yield _build_test(test)
533
534
535def main():
536    # type: (*str) -> int
537    """
538    Run tests given is models.
539
540    Returns 0 if success or 1 if any tests fail.
541    """
542    try:
543        from xmlrunner import XMLTestRunner as TestRunner
544        test_args = {'output': 'logs'}
545    except ImportError:
546        from unittest import TextTestRunner as TestRunner
547        test_args = {}
548
549    parser = argparse.ArgumentParser(description="Test SasModels Models")
550    parser.add_argument("-v", "--verbose", action="store_const",
551                        default=1, const=2, help="Use verbose output")
552    parser.add_argument("-e", "--engine", default="all",
553                        help="Engines on which to run the test.  "
554                        "Valid values are opencl, cuda, dll, and all. "
555                        "Defaults to all if no value is given")
556    parser.add_argument("models", nargs="*",
557                        help="The names of the models to be tested.  "
558                        "If the first model is 'all', then all but the listed "
559                        "models will be tested.  See core.list_models() for "
560                        "names of other groups, such as 'py' or 'single'.")
561    opts = parser.parse_args()
562
563    if opts.engine == "opencl":
564        if not use_opencl():
565            print("opencl is not available")
566            return 1
567        loaders = ['opencl']
568    elif opts.engine == "dll":
569        loaders = ["dll"]
570    elif opts.engine == "cuda":
571        if not use_cuda():
572            print("cuda is not available")
573            return 1
574        loaders = ['cuda']
575    elif opts.engine == "all":
576        loaders = ['dll']
577        if use_opencl():
578            loaders.append('opencl')
579        if use_cuda():
580            loaders.append('cuda')
581    else:
582        print("unknown engine " + opts.engine)
583        return 1
584
585    runner = TestRunner(verbosity=opts.verbose, **test_args)
586    result = runner.run(make_suite(loaders, opts.models))
587    return 1 if result.failures or result.errors else 0
588
589
590if __name__ == "__main__":
591    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.