source: sasmodels/sasmodels/model_test.py @ 81751c2

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 81751c2 was 81751c2, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

allow structure factor tests in model file. Refs #822.

  • Property mode set to 100755
File size: 21.9 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and Fq is called to make sure R_eff, volume and volume ratio are computed.
13The return values at these points are not considered.  The test is only to
14verify that the models run to completion, and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test and volume ratio tests, use the extended output
20form, which checks each output of kernel.Fq. For 1-D tests, either specify
21the q value or a list of q-values, and the corresponding I(q) value, or
22list of I(q) values.
23
24That is::
25
26    tests = [
27        [ {parameters}, q, I(q)],
28        [ {parameters}, [q], [I(q)] ],
29        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
30
31        [ {parameters}, (qx, qy), I(qx, Iqy)],
32        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
33                        [I(qx1, qy1), I(qx2, qy2), ...]],
34
35        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50try:
51    from StringIO import StringIO
52except ImportError:
53    # StringIO.StringIO renamed to io.StringIO in Python 3
54    # Note: io.StringIO exists in python 2, but using unicode instead of str
55    from io import StringIO
56
57import numpy as np  # type: ignore
58
59from . import core
60from .core import list_models, load_model_info, build_model
61from .direct_model import call_kernel, call_Fq
62from .exception import annotate_exception
63from .modelinfo import expand_pars
64from .kernelcl import use_opencl
65from .kernelcuda import use_cuda
66from . import product
67
68# pylint: disable=unused-import
69try:
70    from typing import List, Iterator, Callable
71except ImportError:
72    pass
73else:
74    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
75    from .kernel import KernelModel
76# pylint: enable=unused-import
77
78
79def make_suite(loaders, models):
80    # type: (List[str], List[str]) -> unittest.TestSuite
81    """
82    Construct the pyunit test suite.
83
84    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
85    For python model the python driver is always used.
86
87    *models* is the list of models to test, or *["all"]* to test all models.
88    """
89    ModelTestCase = _hide_model_case_from_nose()
90    suite = unittest.TestSuite()
91
92    if models[0] in core.KINDS:
93        skip = models[1:]
94        models = list_models(models[0])
95    else:
96        skip = []
97    for model_name in models:
98        if model_name in skip:
99            continue
100        model_info = load_model_info(model_name)
101
102        #print('------')
103        #print('found tests in', model_name)
104        #print('------')
105
106        # if ispy then use the dll loader to call pykernel
107        # don't try to call cl kernel since it will not be
108        # available in some environmentes.
109        is_py = callable(model_info.Iq)
110
111        # Some OpenCL drivers seem to be flaky, and are not producing the
112        # expected result.  Since we don't have known test values yet for
113        # all of our models, we are instead going to compare the results
114        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
115        # parameters just to see that the model runs to completion) between
116        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
117        # shared between OpenCL and DLL tests.  This is just a list.  If the
118        # list is empty (which it will be when DLL runs, if the DLL runs
119        # first), then the results are appended to the list.  If the list
120        # is not empty (which it will be when OpenCL runs second), the results
121        # are compared to the results stored in the first element of the list.
122        # This is a horrible stateful hack which only makes sense because the
123        # test suite is thrown away after being run once.
124        stash = []
125
126        if is_py:  # kernel implemented in python
127            test_name = "%s-python"%model_name
128            test_method_name = "test_%s_python" % model_info.id
129            test = ModelTestCase(test_name, model_info,
130                                 test_method_name,
131                                 platform="dll",  # so that
132                                 dtype="double",
133                                 stash=stash)
134            suite.addTest(test)
135        else:   # kernel implemented in C
136
137            # test using dll if desired
138            if 'dll' in loaders:
139                test_name = "%s-dll"%model_name
140                test_method_name = "test_%s_dll" % model_info.id
141                test = ModelTestCase(test_name, model_info,
142                                     test_method_name,
143                                     platform="dll",
144                                     dtype="double",
145                                     stash=stash)
146                suite.addTest(test)
147
148            # test using opencl if desired and available
149            if 'opencl' in loaders and use_opencl():
150                test_name = "%s-opencl"%model_name
151                test_method_name = "test_%s_opencl" % model_info.id
152                # Using dtype=None so that the models that are only
153                # correct for double precision are not tested using
154                # single precision.  The choice is determined by the
155                # presence of *single=False* in the model file.
156                test = ModelTestCase(test_name, model_info,
157                                     test_method_name,
158                                     platform="ocl", dtype=None,
159                                     stash=stash)
160                #print("defining", test_name)
161                suite.addTest(test)
162
163            # test using cuda if desired and available
164            if 'cuda' in loaders and use_cuda():
165                test_name = "%s-cuda"%model_name
166                test_method_name = "test_%s_cuda" % model_info.id
167                # Using dtype=None so that the models that are only
168                # correct for double precision are not tested using
169                # single precision.  The choice is determined by the
170                # presence of *single=False* in the model file.
171                test = ModelTestCase(test_name, model_info,
172                                     test_method_name,
173                                     platform="cuda", dtype=None,
174                                     stash=stash)
175                #print("defining", test_name)
176                suite.addTest(test)
177
178    return suite
179
180def _hide_model_case_from_nose():
181    # type: () -> type
182    class ModelTestCase(unittest.TestCase):
183        """
184        Test suit for a particular model with a particular kernel driver.
185
186        The test suite runs a simple smoke test to make sure the model
187        functions, then runs the list of tests at the bottom of the model
188        description file.
189        """
190        def __init__(self, test_name, model_info, test_method_name,
191                     platform, dtype, stash):
192            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
193            self.test_name = test_name
194            self.info = model_info
195            self.platform = platform
196            self.dtype = dtype
197            self.stash = stash  # container for the results of the first run
198
199            setattr(self, test_method_name, self.run_all)
200            unittest.TestCase.__init__(self, test_method_name)
201
202        def run_all(self):
203            # type: () -> None
204            """
205            Run all the tests in the test suite, including smoke tests.
206            """
207            smoke_tests = [
208                # test validity at reasonable values
209                ({}, 0.1, None),
210                ({}, (0.1, 0.1), None),
211                # test validity at q = 0
212                #({}, 0.0, None),
213                #({}, (0.0, 0.0), None),
214                # test vector form
215                ({}, [0.001, 0.01, 0.1], [None]*3),
216                ({}, [(0.1, 0.1)]*2, [None]*2),
217                # test that Fq will run, and return R_eff, V, V_r
218                ({}, 0.1, None, None, None, None, None),
219                ]
220            tests = smoke_tests
221            #tests = []
222            if self.info.tests is not None:
223                tests += self.info.tests
224            S_tests = [test for test in tests if '@S' in test[0]]
225            P_tests = [test for test in tests if '@S' not in test[0]]
226            try:
227                model = build_model(self.info, dtype=self.dtype,
228                                    platform=self.platform)
229                results = [self.run_one(model, test) for test in P_tests]
230                for test in S_tests:
231                    # pull the S model name out of the test defn
232                    pars = test[0].copy()
233                    s_name = pars.pop('@S')
234                    ps_test = [pars] + list(test[1:])
235                    # build the P@S model
236                    s_info = load_model_info(s_name)
237                    ps_info = product.make_product_info(self.info, s_info)
238                    ps_model = build_model(ps_info, dtype=self.dtype,
239                                           platform=self.platform)
240                    # run the tests
241                    results.append(self.run_one(ps_model, ps_test))
242
243                if self.stash:
244                    for test, target, actual in zip(tests, self.stash[0], results):
245                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
246                            ("GPU/CPU comparison expected %s but got %s for %s"
247                             % (target, actual, test[0]))
248                else:
249                    self.stash.append(results)
250
251                # Check for missing tests.  Only do so for the "dll" tests
252                # to reduce noise from both opencl and cuda, and because
253                # python kernels use platform="dll".
254                if self.platform == "dll":
255                    missing = []
256                    ## Uncomment the following to require test cases
257                    #missing = self._find_missing_tests()
258                    if missing:
259                        raise ValueError("Missing tests for "+", ".join(missing))
260
261            except:
262                annotate_exception(self.test_name)
263                raise
264
265        def _find_missing_tests(self):
266            # type: () -> None
267            """make sure there are 1D and 2D tests as appropriate"""
268            model_has_1D = True
269            model_has_2D = any(p.type == 'orientation'
270                               for p in self.info.parameters.kernel_parameters)
271
272            # Lists of tests that have a result that is not None
273            single = [test for test in self.info.tests
274                      if not isinstance(test[2], list) and test[2] is not None]
275            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
276            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
277
278            multiple = [test for test in self.info.tests
279                        if isinstance(test[2], list)
280                        and not all(result is None for result in test[2])]
281            tests_has_1D_multiple = any(isinstance(test[1][0], float)
282                                        for test in multiple)
283            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
284                                        for test in multiple)
285
286            missing = []
287            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
288                missing.append("1D")
289            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
290                missing.append("2D")
291
292            return missing
293
294        def run_one(self, model, test):
295            # type: (KernelModel, TestCondition) -> None
296            """Run a single test case."""
297            user_pars, x, y = test[:3]
298            pars = expand_pars(self.info.parameters, user_pars)
299            invalid = invalid_pars(self.info.parameters, pars)
300            if invalid:
301                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
302
303            if not isinstance(y, list):
304                y = [y]
305            if not isinstance(x, list):
306                x = [x]
307
308            self.assertEqual(len(y), len(x))
309
310            if isinstance(x[0], tuple):
311                qx, qy = zip(*x)
312                q_vectors = [np.array(qx), np.array(qy)]
313            else:
314                q_vectors = [np.array(x)]
315
316            kernel = model.make_kernel(q_vectors)
317            if len(test) == 3:
318                actual = call_kernel(kernel, pars)
319                self._check_vectors(x, y, actual, 'I')
320                return actual
321            else:
322                y1 = y
323                y2 = test[3] if not isinstance(test[3], list) else [test[3]]
324                F1, F2, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
325                if F1 is not None:  # F1 is none for models with Iq instead of Fq
326                    self._check_vectors(x, y1, F1, 'F')
327                self._check_vectors(x, y2, F2, 'F^2')
328                self._check_scalar(test[4], R_eff, 'R_eff')
329                self._check_scalar(test[5], volume, 'volume')
330                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
331                return F2
332
333        def _check_scalar(self, target, actual, name):
334            if target is None:
335                # smoke test --- make sure it runs and produces a value
336                self.assertTrue(not np.isnan(actual),
337                                'invalid %s: %s' % (name, actual))
338            elif np.isnan(target):
339                # make sure nans match
340                self.assertTrue(np.isnan(actual),
341                                '%s: expected:%s; actual:%s'
342                                % (name, target, actual))
343            else:
344                # is_near does not work for infinite values, so also test
345                # for exact values.
346                self.assertTrue(target == actual or is_near(target, actual, 5),
347                                '%s: expected:%s; actual:%s'
348                                % (name, target, actual))
349
350        def _check_vectors(self, x, target, actual, name='I'):
351            self.assertTrue(len(actual) > 0,
352                            '%s(...) expected return'%name)
353            if target is None:
354                return
355            self.assertEqual(len(target), len(actual),
356                             '%s(...) returned wrong length'%name)
357            for xi, yi, actual_yi in zip(x, target, actual):
358                if yi is None:
359                    # smoke test --- make sure it runs and produces a value
360                    self.assertTrue(not np.isnan(actual_yi),
361                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
362                elif np.isnan(yi):
363                    # make sure nans match
364                    self.assertTrue(np.isnan(actual_yi),
365                                    '%s(%s): expected:%s; actual:%s'
366                                    % (name, xi, yi, actual_yi))
367                else:
368                    # is_near does not work for infinite values, so also test
369                    # for exact values.
370                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
371                                    '%s(%s); expected:%s; actual:%s'
372                                    % (name, xi, yi, actual_yi))
373
374    return ModelTestCase
375
376def invalid_pars(partable, pars):
377    # type: (ParameterTable, Dict[str, float])
378    """
379    Return a list of parameter names that are not part of the model.
380    """
381    names = set(p.id for p in partable.call_parameters)
382    invalid = []
383    for par in sorted(pars.keys()):
384        # special handling of R_eff mode, which is not a usual parameter
385        if par == 'radius_effective_type':
386            continue
387        parts = par.split('_pd')
388        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
389            invalid.append(par)
390            continue
391        if parts[0] not in names:
392            invalid.append(par)
393    return invalid
394
395
396def is_near(target, actual, digits=5):
397    # type: (float, float, int) -> bool
398    """
399    Returns true if *actual* is within *digits* significant digits of *target*.
400    """
401    import math
402    shift = 10**math.ceil(math.log10(abs(target)))
403    return abs(target-actual)/shift < 1.5*10**-digits
404
405def run_one(model):
406    # type: (str) -> str
407    """
408    Run the tests for a single model, printing the results to stdout.
409
410    *model* can by a python file, which is handy for checking user defined
411    plugin models.
412    """
413    # Note that running main() directly did not work from within the
414    # wxPython pycrust console.  Instead of the results appearing in the
415    # window they were printed to the underlying console.
416    from unittest.runner import TextTestResult, _WritelnDecorator
417
418    # Build a object to capture and print the test results
419    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
420    verbosity = 2
421    descriptions = True
422    result = TextTestResult(stream, descriptions, verbosity)
423
424    # Build a test suite containing just the model
425    loader = 'opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll'
426    models = [model]
427    try:
428        suite = make_suite([loader], models)
429    except Exception:
430        import traceback
431        stream.writeln(traceback.format_exc())
432        return
433
434    # Warn if there are no user defined tests.
435    # Note: the test suite constructed above only has one test in it, which
436    # runs through some smoke tests to make sure the model runs, then runs
437    # through the input-output pairs given in the model definition file.  To
438    # check if any such pairs are defined, therefore, we just need to check if
439    # they are in the first test of the test suite.  We do this with an
440    # iterator since we don't have direct access to the list of tests in the
441    # test suite.
442    # In Qt5 suite.run() will clear all tests in the suite after running
443    # with no way of retaining them for the test below, so let's check
444    # for user tests before running the suite.
445    for test in suite:
446        if not test.info.tests:
447            stream.writeln("Note: %s has no user defined tests."%model)
448        break
449    else:
450        stream.writeln("Note: no test suite created --- this should never happen")
451
452    # Run the test suite
453    suite.run(result)
454
455    # Print the failures and errors
456    for _, tb in result.errors:
457        stream.writeln(tb)
458    for _, tb in result.failures:
459        stream.writeln(tb)
460
461    output = stream.getvalue()
462    stream.close()
463    return output
464
465
466def main(*models):
467    # type: (*str) -> int
468    """
469    Run tests given is models.
470
471    Returns 0 if success or 1 if any tests fail.
472    """
473    try:
474        from xmlrunner import XMLTestRunner as TestRunner
475        test_args = {'output': 'logs'}
476    except ImportError:
477        from unittest import TextTestRunner as TestRunner
478        test_args = {}
479
480    if models and models[0] == '-v':
481        verbosity = 2
482        models = models[1:]
483    else:
484        verbosity = 1
485    if models and models[0] == 'opencl':
486        if not use_opencl():
487            print("opencl is not available")
488            return 1
489        loaders = ['opencl']
490        models = models[1:]
491    elif models and models[0] == 'cuda':
492        if not use_cuda():
493            print("cuda is not available")
494            return 1
495        loaders = ['cuda']
496        models = models[1:]
497    elif models and models[0] == 'dll':
498        # TODO: test if compiler is available?
499        loaders = ['dll']
500        models = models[1:]
501    else:
502        loaders = ['dll']
503        if use_opencl():
504            loaders.append('opencl')
505        if use_cuda():
506            loaders.append('cuda')
507    if not models:
508        print("""\
509usage:
510  python -m sasmodels.model_test [-v] [opencl|cuda|dll] model1 model2 ...
511
512If -v is included on the command line, then use verbose output.
513
514If no platform is specified, then models will be tested with dll, and
515if available, OpenCL and CUDA; the compute target is ignored for pure python models.
516
517If model1 is 'all', then all except the remaining models will be tested.
518
519""")
520
521        return 1
522
523    runner = TestRunner(verbosity=verbosity, **test_args)
524    result = runner.run(make_suite(loaders, models))
525    return 1 if result.failures or result.errors else 0
526
527
528def model_tests():
529    # type: () -> Iterator[Callable[[], None]]
530    """
531    Test runner visible to nosetests.
532
533    Run "nosetests sasmodels" on the command line to invoke it.
534    """
535    loaders = ['dll']
536    if use_opencl():
537        loaders.append('opencl')
538    if use_cuda():
539        loaders.append('cuda')
540    tests = make_suite(loaders, ['all'])
541    def build_test(test):
542        # In order for nosetest to show the test name, wrap the test.run_all
543        # instance in function that takes the test name as a parameter which
544        # will be displayed when the test is run.  Do this as a function so
545        # that it properly captures the context for tests that captured and
546        # run later.  If done directly in the for loop, then the looping
547        # variable test will be shared amongst all the tests, and we will be
548        # repeatedly testing vesicle.
549
550        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
551        # requires that the test.description field be set.
552        wrap = lambda: test.run_all()
553        wrap.description = test.test_name
554        return wrap
555        # The following would work with nosetests and pytest:
556        #     return lambda name: test.run_all(), test.test_name
557
558    for test in tests:
559        yield build_test(test)
560
561
562if __name__ == "__main__":
563    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.