source: sasmodels/sasmodels/model_test.py @ c713c85

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since c713c85 was c713c85, checked in by Paul Kienzle <pkienzle@…>, 7 years ago

force missing tests raise an error

  • Property mode set to 100644
File size: 15.9 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50import numpy as np  # type: ignore
51
52from . import core
53from .core import list_models, load_model_info, build_model
54from .direct_model import call_kernel, call_ER, call_VR
55from .exception import annotate_exception
56from .modelinfo import expand_pars
57
58try:
59    from typing import List, Iterator, Callable
60except ImportError:
61    pass
62else:
63    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
64    from .kernel import KernelModel
65
66
67def make_suite(loaders, models):
68    # type: (List[str], List[str]) -> unittest.TestSuite
69    """
70    Construct the pyunit test suite.
71
72    *loaders* is the list of kernel drivers to use, which is one of
73    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
74    the python driver is always used.
75
76    *models* is the list of models to test, or *["all"]* to test all models.
77    """
78    ModelTestCase = _hide_model_case_from_nose()
79    suite = unittest.TestSuite()
80
81    if models[0] == 'all':
82        skip = models[1:]
83        models = list_models()
84    else:
85        skip = []
86    for model_name in models:
87        if model_name in skip: continue
88        model_info = load_model_info(model_name)
89
90        #print('------')
91        #print('found tests in', model_name)
92        #print('------')
93
94        # if ispy then use the dll loader to call pykernel
95        # don't try to call cl kernel since it will not be
96        # available in some environmentes.
97        is_py = callable(model_info.Iq)
98
99        if is_py:  # kernel implemented in python
100            test_name = "Model: %s, Kernel: python"%model_name
101            test_method_name = "test_%s_python" % model_info.id
102            test = ModelTestCase(test_name, model_info,
103                                 test_method_name,
104                                 platform="dll",  # so that
105                                 dtype="double")
106            suite.addTest(test)
107        else:   # kernel implemented in C
108            # test using opencl if desired and available
109            if 'opencl' in loaders and core.HAVE_OPENCL:
110                test_name = "Model: %s, Kernel: OpenCL"%model_name
111                test_method_name = "test_%s_opencl" % model_info.id
112                # Using dtype=None so that the models that are only
113                # correct for double precision are not tested using
114                # single precision.  The choice is determined by the
115                # presence of *single=False* in the model file.
116                test = ModelTestCase(test_name, model_info,
117                                     test_method_name,
118                                     platform="ocl", dtype=None)
119                #print("defining", test_name)
120                suite.addTest(test)
121
122            # test using dll if desired
123            if 'dll' in loaders or not core.HAVE_OPENCL:
124                test_name = "Model: %s, Kernel: dll"%model_name
125                test_method_name = "test_%s_dll" % model_info.id
126                test = ModelTestCase(test_name, model_info,
127                                     test_method_name,
128                                     platform="dll",
129                                     dtype="double")
130                suite.addTest(test)
131
132    return suite
133
134
135def _hide_model_case_from_nose():
136    # type: () -> type
137    class ModelTestCase(unittest.TestCase):
138        """
139        Test suit for a particular model with a particular kernel driver.
140
141        The test suite runs a simple smoke test to make sure the model
142        functions, then runs the list of tests at the bottom of the model
143        description file.
144        """
145        def __init__(self, test_name, model_info, test_method_name,
146                     platform, dtype):
147            # type: (str, ModelInfo, str, str, DType) -> None
148            self.test_name = test_name
149            self.info = model_info
150            self.platform = platform
151            self.dtype = dtype
152
153            setattr(self, test_method_name, self.run_all)
154            unittest.TestCase.__init__(self, test_method_name)
155
156        def run_all(self):
157            # type: () -> None
158            """
159            Run all the tests in the test suite, including smoke tests.
160            """
161            smoke_tests = [
162                # test validity at reasonable values
163                ({}, 0.1, None),
164                ({}, (0.1, 0.1), None),
165                # test validity at q = 0
166                #({}, 0.0, None),
167                #({}, (0.0, 0.0), None),
168                # test vector form
169                ({}, [0.1]*2, [None]*2),
170                ({}, [(0.1, 0.1)]*2, [None]*2),
171                # test that ER/VR will run if they exist
172                ({}, 'ER', None),
173                ({}, 'VR', None),
174                ]
175
176            tests = self.info.tests
177            try:
178                model = build_model(self.info, dtype=self.dtype,
179                                    platform=self.platform)
180                for test in smoke_tests + tests:
181                    self.run_one(model, test)
182
183                # Check for missing tests.  Only do so for the "dll" tests
184                # to reduce noise from both opencl and dll, and because
185                # python kernels use platform="dll".
186                if self.platform == "dll":
187                    missing = []
188                    ## Uncomment the following to make forgetting the test
189                    ## an error
190                    missing = self._find_missing_tests()
191                    if missing:
192                        raise ValueError("Missing tests for "+", ".join(missing))
193
194            except:
195                annotate_exception(self.test_name)
196                raise
197
198        def _find_missing_tests(self):
199            # type: () -> None
200            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
201            model_has_VR = callable(self.info.VR)
202            model_has_ER = callable(self.info.ER)
203            model_has_1D = True
204            model_has_2D = any(p.type == 'orientation'
205                               for p in self.info.parameters.kernel_parameters)
206
207            # Lists of tests that have a result that is not None
208            single = [test for test in self.info.tests
209                      if not isinstance(test[2], list) and test[2] is not None]
210            tests_has_VR = any(test[1] == 'VR' for test in single)
211            tests_has_ER = any(test[1] == 'ER' for test in single)
212            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
213            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
214
215            multiple = [test for test in self.info.tests
216                        if isinstance(test[2], list)
217                            and not all(result is None for result in test[2])]
218            tests_has_1D_multiple = any(isinstance(test[1][0], float)
219                                        for test in multiple)
220            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
221                                        for test in multiple)
222
223            missing = []
224            if model_has_VR and not tests_has_VR:
225                missing.append("VR")
226            if model_has_ER and not tests_has_ER:
227                missing.append("ER")
228            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
229                missing.append("1D")
230            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
231                missing.append("2D")
232
233            return missing
234
235        def run_one(self, model, test):
236            # type: (KernelModel, TestCondition) -> None
237            """Run a single test case."""
238            user_pars, x, y = test
239            pars = expand_pars(self.info.parameters, user_pars)
240
241            if not isinstance(y, list):
242                y = [y]
243            if not isinstance(x, list):
244                x = [x]
245
246            self.assertEqual(len(y), len(x))
247
248            if x[0] == 'ER':
249                actual = [call_ER(model.info, pars)]
250            elif x[0] == 'VR':
251                actual = [call_VR(model.info, pars)]
252            elif isinstance(x[0], tuple):
253                qx, qy = zip(*x)
254                q_vectors = [np.array(qx), np.array(qy)]
255                kernel = model.make_kernel(q_vectors)
256                actual = call_kernel(kernel, pars)
257            else:
258                q_vectors = [np.array(x)]
259                kernel = model.make_kernel(q_vectors)
260                actual = call_kernel(kernel, pars)
261
262            self.assertTrue(len(actual) > 0)
263            self.assertEqual(len(y), len(actual))
264
265            for xi, yi, actual_yi in zip(x, y, actual):
266                if yi is None:
267                    # smoke test --- make sure it runs and produces a value
268                    self.assertTrue(not np.isnan(actual_yi),
269                                    'invalid f(%s): %s' % (xi, actual_yi))
270                elif np.isnan(yi):
271                    self.assertTrue(np.isnan(actual_yi),
272                                    'f(%s): expected:%s; actual:%s'
273                                    % (xi, yi, actual_yi))
274                else:
275                    # is_near does not work for infinite values, so also test
276                    # for exact values.  Note that this will not
277                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
278                                    'f(%s); expected:%s; actual:%s'
279                                    % (xi, yi, actual_yi))
280
281    return ModelTestCase
282
283def is_near(target, actual, digits=5):
284    # type: (float, float, int) -> bool
285    """
286    Returns true if *actual* is within *digits* significant digits of *target*.
287    """
288    import math
289    shift = 10**math.ceil(math.log10(abs(target)))
290    return abs(target-actual)/shift < 1.5*10**-digits
291
292def run_one(model):
293    # type: (str) -> None
294    """
295    Run the tests for a single model, printing the results to stdout.
296
297    *model* can by a python file, which is handy for checking user defined
298    plugin models.
299    """
300    # Note that running main() directly did not work from within the
301    # wxPython pycrust console.  Instead of the results appearing in the
302    # window they were printed to the underlying console.
303    from unittest.runner import TextTestResult, _WritelnDecorator
304
305    # Build a object to capture and print the test results
306    stream = _WritelnDecorator(sys.stdout)  # Add writeln() method to stream
307    verbosity = 2
308    descriptions = True
309    result = TextTestResult(stream, descriptions, verbosity)
310
311    # Build a test suite containing just the model
312    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
313    models = [model]
314    try:
315        suite = make_suite(loaders, models)
316    except Exception:
317        import traceback
318        stream.writeln(traceback.format_exc())
319        return
320
321    # Run the test suite
322    suite.run(result)
323
324    # Print the failures and errors
325    for _, tb in result.errors:
326        stream.writeln(tb)
327    for _, tb in result.failures:
328        stream.writeln(tb)
329
330    # Warn if there are no user defined tests.
331    # Note: the test suite constructed above only has one test in it, which
332    # runs through some smoke tests to make sure the model runs, then runs
333    # through the input-output pairs given in the model definition file.  To
334    # check if any such pairs are defined, therefore, we just need to check if
335    # they are in the first test of the test suite.  We do this with an
336    # iterator since we don't have direct access to the list of tests in the
337    # test suite.
338    for test in suite:
339        if not test.info.tests:
340            stream.writeln("Note: %s has no user defined tests."%model)
341        break
342    else:
343        stream.writeln("Note: no test suite created --- this should never happen")
344
345
346def main(*models):
347    # type: (*str) -> int
348    """
349    Run tests given is models.
350
351    Returns 0 if success or 1 if any tests fail.
352    """
353    try:
354        from xmlrunner import XMLTestRunner as TestRunner
355        test_args = {'output': 'logs'}
356    except ImportError:
357        from unittest import TextTestRunner as TestRunner
358        test_args = {}
359
360    if models and models[0] == '-v':
361        verbosity = 2
362        models = models[1:]
363    else:
364        verbosity = 1
365    if models and models[0] == 'opencl':
366        if not core.HAVE_OPENCL:
367            print("opencl is not available")
368            return 1
369        loaders = ['opencl']
370        models = models[1:]
371    elif models and models[0] == 'dll':
372        # TODO: test if compiler is available?
373        loaders = ['dll']
374        models = models[1:]
375    elif models and models[0] == 'opencl_and_dll':
376        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
377        models = models[1:]
378    else:
379        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
380    if not models:
381        print("""\
382usage:
383  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
384
385If -v is included on the command line, then use verbose output.
386
387If neither opencl nor dll is specified, then models will be tested with
388both OpenCL and dll; the compute target is ignored for pure python models.
389
390If model1 is 'all', then all except the remaining models will be tested.
391
392""")
393
394        return 1
395
396    runner = TestRunner(verbosity=verbosity, **test_args)
397    result = runner.run(make_suite(loaders, models))
398    return 1 if result.failures or result.errors else 0
399
400
401def model_tests():
402    # type: () -> Iterator[Callable[[], None]]
403    """
404    Test runner visible to nosetests.
405
406    Run "nosetests sasmodels" on the command line to invoke it.
407    """
408    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
409    tests = make_suite(loaders, ['all'])
410    for test_i in tests:
411        # In order for nosetest to see the correct test name, need to set
412        # the description attribute of the returned function.  Since we
413        # can't do this for the returned instance, wrap it in a lambda and
414        # set the description on the lambda.  Otherwise we could just do:
415        #    yield test_i.run_all
416        L = lambda: test_i.run_all()
417        L.description = test_i.test_name
418        yield L
419
420
421if __name__ == "__main__":
422    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.