source: sasmodels/sasmodels/model_test.py @ 5024a56

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 5024a56 was 5024a56, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

Make sure that the label radius_effective_mode is used throughout

  • Property mode set to 100755
File size: 22.1 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|cuda|dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and Fq is called to make sure R_eff, volume and volume ratio are computed.
13The return values at these points are not considered.  The test is only to
14verify that the models run to completion, and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test and volume ratio tests, use the extended output
20form, which checks each output of kernel.Fq. For 1-D tests, either specify
21the q value or a list of q-values, and the corresponding I(q) value, or
22list of I(q) values.
23
24That is::
25
26    tests = [
27        [ {parameters}, q, I(q)],
28        [ {parameters}, [q], [I(q)] ],
29        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
30
31        [ {parameters}, (qx, qy), I(qx, Iqy)],
32        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
33                        [I(qx1, qy1), I(qx2, qy2), ...]],
34
35        [ {parameters}, q, F(q), F^2(q), R_eff, V, V_r ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49import traceback
50
51try:
52    from StringIO import StringIO
53except ImportError:
54    # StringIO.StringIO renamed to io.StringIO in Python 3
55    # Note: io.StringIO exists in python 2, but using unicode instead of str
56    from io import StringIO
57
58import numpy as np  # type: ignore
59
60from . import core
61from .core import list_models, load_model_info, build_model
62from .direct_model import call_kernel, call_Fq
63from .exception import annotate_exception
64from .modelinfo import expand_pars
65from .kernelcl import use_opencl
66from .kernelcuda import use_cuda
67from . import product
68
69# pylint: disable=unused-import
70try:
71    from typing import List, Iterator, Callable
72except ImportError:
73    pass
74else:
75    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
76    from .kernel import KernelModel
77# pylint: enable=unused-import
78
79def make_suite(loaders, models):
80    # type: (List[str], List[str]) -> unittest.TestSuite
81    """
82    Construct the pyunit test suite.
83
84    *loaders* is the list of kernel drivers to use (dll, opencl or cuda).
85    For python model the python driver is always used.
86
87    *models* is the list of models to test, or *["all"]* to test all models.
88    """
89    suite = unittest.TestSuite()
90
91    if models[0] in core.KINDS:
92        skip = models[1:]
93        models = list_models(models[0])
94    else:
95        skip = []
96    for model_name in models:
97        if model_name not in skip:
98            model_info = load_model_info(model_name)
99            _add_model_to_suite(loaders, suite, model_info)
100
101    return suite
102
103def _add_model_to_suite(loaders, suite, model_info):
104    ModelTestCase = _hide_model_case_from_nose()
105
106    #print('------')
107    #print('found tests in', model_name)
108    #print('------')
109
110    # if ispy then use the dll loader to call pykernel
111    # don't try to call cl kernel since it will not be
112    # available in some environmentes.
113    is_py = callable(model_info.Iq)
114
115    # Some OpenCL drivers seem to be flaky, and are not producing the
116    # expected result.  Since we don't have known test values yet for
117    # all of our models, we are instead going to compare the results
118    # for the 'smoke test' (that is, evaluation at q=0.1 for the default
119    # parameters just to see that the model runs to completion) between
120    # the OpenCL and the DLL.  To do this, we define a 'stash' which is
121    # shared between OpenCL and DLL tests.  This is just a list.  If the
122    # list is empty (which it will be when DLL runs, if the DLL runs
123    # first), then the results are appended to the list.  If the list
124    # is not empty (which it will be when OpenCL runs second), the results
125    # are compared to the results stored in the first element of the list.
126    # This is a horrible stateful hack which only makes sense because the
127    # test suite is thrown away after being run once.
128    stash = []
129
130    if is_py:  # kernel implemented in python
131        test_name = "%s-python"%model_info.name
132        test_method_name = "test_%s_python" % model_info.id
133        test = ModelTestCase(test_name, model_info,
134                                test_method_name,
135                                platform="dll",  # so that
136                                dtype="double",
137                                stash=stash)
138        suite.addTest(test)
139    else:   # kernel implemented in C
140
141        # test using dll if desired
142        if 'dll' in loaders or not use_opencl():
143            test_name = "%s-dll"%model_info.name
144            test_method_name = "test_%s_dll" % model_info.id
145            test = ModelTestCase(test_name, model_info,
146                                    test_method_name,
147                                    platform="dll",
148                                    dtype="double",
149                                    stash=stash)
150            suite.addTest(test)
151
152        # test using opencl if desired and available
153        if 'opencl' in loaders and use_opencl():
154            test_name = "%s-opencl"%model_info.name
155            test_method_name = "test_%s_opencl" % model_info.id
156            # Using dtype=None so that the models that are only
157            # correct for double precision are not tested using
158            # single precision.  The choice is determined by the
159            # presence of *single=False* in the model file.
160            test = ModelTestCase(test_name, model_info,
161                                    test_method_name,
162                                    platform="ocl", dtype=None,
163                                    stash=stash)
164            #print("defining", test_name)
165            suite.addTest(test)
166
167        # test using cuda if desired and available
168        if 'cuda' in loaders and use_cuda():
169            test_name = "%s-cuda"%model_name
170            test_method_name = "test_%s_cuda" % model_info.id
171            # Using dtype=None so that the models that are only
172            # correct for double precision are not tested using
173            # single precision.  The choice is determined by the
174            # presence of *single=False* in the model file.
175            test = ModelTestCase(test_name, model_info,
176                                    test_method_name,
177                                    platform="cuda", dtype=None,
178                                    stash=stash)
179            #print("defining", test_name)
180            suite.addTest(test)
181
182
183def _hide_model_case_from_nose():
184    # type: () -> type
185    class ModelTestCase(unittest.TestCase):
186        """
187        Test suit for a particular model with a particular kernel driver.
188
189        The test suite runs a simple smoke test to make sure the model
190        functions, then runs the list of tests at the bottom of the model
191        description file.
192        """
193        def __init__(self, test_name, model_info, test_method_name,
194                     platform, dtype, stash):
195            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
196            self.test_name = test_name
197            self.info = model_info
198            self.platform = platform
199            self.dtype = dtype
200            self.stash = stash  # container for the results of the first run
201
202            setattr(self, test_method_name, self.run_all)
203            unittest.TestCase.__init__(self, test_method_name)
204
205        def run_all(self):
206            # type: () -> None
207            """
208            Run all the tests in the test suite, including smoke tests.
209            """
210            smoke_tests = [
211                # test validity at reasonable values
212                ({}, 0.1, None),
213                ({}, (0.1, 0.1), None),
214                # test validity at q = 0
215                #({}, 0.0, None),
216                #({}, (0.0, 0.0), None),
217                # test vector form
218                ({}, [0.001, 0.01, 0.1], [None]*3),
219                ({}, [(0.1, 0.1)]*2, [None]*2),
220                # test that Fq will run, and return R_eff, V, V_r
221                ({}, 0.1, None, None, None, None, None),
222                ]
223            tests = smoke_tests
224            #tests = []
225            if self.info.tests is not None:
226                tests += self.info.tests
227            S_tests = [test for test in tests if '@S' in test[0]]
228            P_tests = [test for test in tests if '@S' not in test[0]]
229            try:
230                model = build_model(self.info, dtype=self.dtype,
231                                    platform=self.platform)
232                results = [self.run_one(model, test) for test in P_tests]
233                for test in S_tests:
234                    # pull the S model name out of the test defn
235                    pars = test[0].copy()
236                    s_name = pars.pop('@S')
237                    ps_test = [pars] + list(test[1:])
238                    # build the P@S model
239                    s_info = load_model_info(s_name)
240                    ps_info = product.make_product_info(self.info, s_info)
241                    ps_model = build_model(ps_info, dtype=self.dtype,
242                                           platform=self.platform)
243                    # run the tests
244                    results.append(self.run_one(ps_model, ps_test))
245
246                if self.stash:
247                    for test, target, actual in zip(tests, self.stash[0], results):
248                        assert np.all(abs(target-actual) < 5e-5*abs(actual)), \
249                            ("GPU/CPU comparison expected %s but got %s for %s"
250                             % (target, actual, test[0]))
251                else:
252                    self.stash.append(results)
253
254                # Check for missing tests.  Only do so for the "dll" tests
255                # to reduce noise from both opencl and cuda, and because
256                # python kernels use platform="dll".
257                if self.platform == "dll":
258                    missing = []
259                    ## Uncomment the following to require test cases
260                    #missing = self._find_missing_tests()
261                    if missing:
262                        raise ValueError("Missing tests for "+", ".join(missing))
263
264            except:
265                annotate_exception(self.test_name)
266                raise
267
268        def _find_missing_tests(self):
269            # type: () -> None
270            """make sure there are 1D and 2D tests as appropriate"""
271            model_has_1D = True
272            model_has_2D = any(p.type == 'orientation'
273                               for p in self.info.parameters.kernel_parameters)
274
275            # Lists of tests that have a result that is not None
276            single = [test for test in self.info.tests
277                      if not isinstance(test[2], list) and test[2] is not None]
278            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
279            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
280
281            multiple = [test for test in self.info.tests
282                        if isinstance(test[2], list)
283                        and not all(result is None for result in test[2])]
284            tests_has_1D_multiple = any(isinstance(test[1][0], float)
285                                        for test in multiple)
286            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
287                                        for test in multiple)
288
289            missing = []
290            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
291                missing.append("1D")
292            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
293                missing.append("2D")
294
295            return missing
296
297        def run_one(self, model, test):
298            # type: (KernelModel, TestCondition) -> None
299            """Run a single test case."""
300            user_pars, x, y = test[:3]
301            pars = expand_pars(self.info.parameters, user_pars)
302            invalid = invalid_pars(self.info.parameters, pars)
303            if invalid:
304                raise ValueError("Unknown parameters in test: " + ", ".join(invalid))
305
306            if not isinstance(y, list):
307                y = [y]
308            if not isinstance(x, list):
309                x = [x]
310
311            self.assertEqual(len(y), len(x))
312
313            if isinstance(x[0], tuple):
314                qx, qy = zip(*x)
315                q_vectors = [np.array(qx), np.array(qy)]
316            else:
317                q_vectors = [np.array(x)]
318
319            kernel = model.make_kernel(q_vectors)
320            if len(test) == 3:
321                actual = call_kernel(kernel, pars)
322                self._check_vectors(x, y, actual, 'I')
323                return actual
324            else:
325                y1 = y
326                y2 = test[3] if not isinstance(test[3], list) else [test[3]]
327                F1, F2, R_eff, volume, volume_ratio = call_Fq(kernel, pars)
328                if F1 is not None:  # F1 is none for models with Iq instead of Fq
329                    self._check_vectors(x, y1, F1, 'F')
330                self._check_vectors(x, y2, F2, 'F^2')
331                self._check_scalar(test[4], R_eff, 'R_eff')
332                self._check_scalar(test[5], volume, 'volume')
333                self._check_scalar(test[6], volume_ratio, 'form:shell ratio')
334                return F2
335
336        def _check_scalar(self, target, actual, name):
337            if target is None:
338                # smoke test --- make sure it runs and produces a value
339                self.assertTrue(not np.isnan(actual),
340                                'invalid %s: %s' % (name, actual))
341            elif np.isnan(target):
342                # make sure nans match
343                self.assertTrue(np.isnan(actual),
344                                '%s: expected:%s; actual:%s'
345                                % (name, target, actual))
346            else:
347                # is_near does not work for infinite values, so also test
348                # for exact values.
349                self.assertTrue(target == actual or is_near(target, actual, 5),
350                                '%s: expected:%s; actual:%s'
351                                % (name, target, actual))
352
353        def _check_vectors(self, x, target, actual, name='I'):
354            self.assertTrue(len(actual) > 0,
355                            '%s(...) expected return'%name)
356            if target is None:
357                return
358            self.assertEqual(len(target), len(actual),
359                             '%s(...) returned wrong length'%name)
360            for xi, yi, actual_yi in zip(x, target, actual):
361                if yi is None:
362                    # smoke test --- make sure it runs and produces a value
363                    self.assertTrue(not np.isnan(actual_yi),
364                                    'invalid %s(%s): %s' % (name, xi, actual_yi))
365                elif np.isnan(yi):
366                    # make sure nans match
367                    self.assertTrue(np.isnan(actual_yi),
368                                    '%s(%s): expected:%s; actual:%s'
369                                    % (name, xi, yi, actual_yi))
370                else:
371                    # is_near does not work for infinite values, so also test
372                    # for exact values.
373                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
374                                    '%s(%s); expected:%s; actual:%s'
375                                    % (name, xi, yi, actual_yi))
376
377    return ModelTestCase
378
379def invalid_pars(partable, pars):
380    # type: (ParameterTable, Dict[str, float])
381    """
382    Return a list of parameter names that are not part of the model.
383    """
384    names = set(p.id for p in partable.call_parameters)
385    invalid = []
386    for par in sorted(pars.keys()):
387        # special handling of R_eff mode, which is not a usual parameter
388        if par == product.RADIUS_MODE_ID:
389            continue
390        parts = par.split('_pd')
391        if len(parts) > 1 and parts[1] not in ("", "_n", "nsigma", "type"):
392            invalid.append(par)
393            continue
394        if parts[0] not in names:
395            invalid.append(par)
396    return invalid
397
398
399def is_near(target, actual, digits=5):
400    # type: (float, float, int) -> bool
401    """
402    Returns true if *actual* is within *digits* significant digits of *target*.
403    """
404    import math
405    shift = 10**math.ceil(math.log10(abs(target)))
406    return abs(target-actual)/shift < 1.5*10**-digits
407
408# CRUFT: old interface; should be deprecated and removed
409def run_one(model_name):
410    # msg = "use check_model(model_info) rather than run_one(model_name)"
411    # warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
412    try:
413        model_info = load_model_info(model_name)
414    except Exception:
415        output = traceback.format_exc()
416        return output
417
418    success, output = check_model(model_info)
419    return output
420
421def check_model(model_info):
422    # type: (ModelInfo) -> str
423    """
424    Run the tests for a single model, capturing the output.
425
426    Returns success status and the output string.
427    """
428    # Note that running main() directly did not work from within the
429    # wxPython pycrust console.  Instead of the results appearing in the
430    # window they were printed to the underlying console.
431    from unittest.runner import TextTestResult, _WritelnDecorator
432
433    # Build a object to capture and print the test results
434    stream = _WritelnDecorator(StringIO())  # Add writeln() method to stream
435    verbosity = 2
436    descriptions = True
437    result = TextTestResult(stream, descriptions, verbosity)
438
439    # Build a test suite containing just the model
440    loaders = ['opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll']
441    suite = unittest.TestSuite()
442    _add_model_to_suite(loaders, suite, model_info)
443
444    # Warn if there are no user defined tests.
445    # Note: the test suite constructed above only has one test in it, which
446    # runs through some smoke tests to make sure the model runs, then runs
447    # through the input-output pairs given in the model definition file.  To
448    # check if any such pairs are defined, therefore, we just need to check if
449    # they are in the first test of the test suite.  We do this with an
450    # iterator since we don't have direct access to the list of tests in the
451    # test suite.
452    # In Qt5 suite.run() will clear all tests in the suite after running
453    # with no way of retaining them for the test below, so let's check
454    # for user tests before running the suite.
455    for test in suite:
456        if not test.info.tests:
457            stream.writeln("Note: %s has no user defined tests."%model_info.name)
458        break
459    else:
460        stream.writeln("Note: no test suite created --- this should never happen")
461
462    # Run the test suite
463    suite.run(result)
464
465    # Print the failures and errors
466    for _, tb in result.errors:
467        stream.writeln(tb)
468    for _, tb in result.failures:
469        stream.writeln(tb)
470
471    output = stream.getvalue()
472    stream.close()
473    return result.wasSuccessful(), output
474
475
476def main(*models):
477    # type: (*str) -> int
478    """
479    Run tests given is models.
480
481    Returns 0 if success or 1 if any tests fail.
482    """
483    try:
484        from xmlrunner import XMLTestRunner as TestRunner
485        test_args = {'output': 'logs'}
486    except ImportError:
487        from unittest import TextTestRunner as TestRunner
488        test_args = {}
489
490    if models and models[0] == '-v':
491        verbosity = 2
492        models = models[1:]
493    else:
494        verbosity = 1
495    if models and models[0] == 'opencl':
496        if not use_opencl():
497            print("opencl is not available")
498            return 1
499        loaders = ['opencl']
500        models = models[1:]
501    elif models and models[0] == 'cuda':
502        if not use_cuda():
503            print("cuda is not available")
504            return 1
505        loaders = ['cuda']
506        models = models[1:]
507    elif models and models[0] == 'dll':
508        # TODO: test if compiler is available?
509        loaders = ['dll']
510        models = models[1:]
511    else:
512        loaders = ['dll']
513        if use_opencl():
514            loaders.append('opencl')
515        if use_cuda():
516            loaders.append('cuda')
517    if not models:
518        print("""\
519usage:
520  python -m sasmodels.model_test [-v] [opencl|cuda|dll] model1 model2 ...
521
522If -v is included on the command line, then use verbose output.
523
524If no platform is specified, then models will be tested with dll, and
525if available, OpenCL and CUDA; the compute target is ignored for pure python models.
526
527If model1 is 'all', then all except the remaining models will be tested.
528
529""")
530
531        return 1
532
533    runner = TestRunner(verbosity=verbosity, **test_args)
534    result = runner.run(make_suite(loaders, models))
535    return 1 if result.failures or result.errors else 0
536
537
538def model_tests():
539    # type: () -> Iterator[Callable[[], None]]
540    """
541    Test runner visible to nosetests.
542
543    Run "nosetests sasmodels" on the command line to invoke it.
544    """
545    loaders = ['dll']
546    if use_opencl():
547        loaders.append('opencl')
548    if use_cuda():
549        loaders.append('cuda')
550    tests = make_suite(loaders, ['all'])
551    def build_test(test):
552        # In order for nosetest to show the test name, wrap the test.run_all
553        # instance in function that takes the test name as a parameter which
554        # will be displayed when the test is run.  Do this as a function so
555        # that it properly captures the context for tests that captured and
556        # run later.  If done directly in the for loop, then the looping
557        # variable test will be shared amongst all the tests, and we will be
558        # repeatedly testing vesicle.
559
560        # Note: in sasview sas.sasgui.perspectives.fitting.gpu_options
561        # requires that the test.description field be set.
562        wrap = lambda: test.run_all()
563        wrap.description = test.test_name
564        return wrap
565        # The following would work with nosetests and pytest:
566        #     return lambda name: test.run_all(), test.test_name
567
568    for test in tests:
569        yield build_test(test)
570
571
572if __name__ == "__main__":
573    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.