source: sasmodels/sasmodels/model_test.py @ 81cd2a2

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 81cd2a2 was 81cd2a2, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

fix test runner patch

  • Property mode set to 100644
File size: 10.6 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50import numpy as np
51
52from .core import list_models, load_model_info, build_model, HAVE_OPENCL
53from .core import call_kernel, call_ER, call_VR
54from .exception import annotate_exception
55
56#TODO: rename to tests so that tab completion works better for models directory
57
58def make_suite(loaders, models):
59    """
60    Construct the pyunit test suite.
61
62    *loaders* is the list of kernel drivers to use, which is one of
63    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
64    the python driver is always used.
65
66    *models* is the list of models to test, or *["all"]* to test all models.
67    """
68
69    ModelTestCase = _hide_model_case_from_nosetests()
70    suite = unittest.TestSuite()
71
72    if models[0] == 'all':
73        skip = models[1:]
74        models = list_models()
75    else:
76        skip = []
77    for model_name in models:
78        if model_name in skip: continue
79        model_info = load_model_info(model_name)
80
81        #print('------')
82        #print('found tests in', model_name)
83        #print('------')
84
85        # if ispy then use the dll loader to call pykernel
86        # don't try to call cl kernel since it will not be
87        # available in some environmentes.
88        is_py = callable(model_info['Iq'])
89
90        if is_py:  # kernel implemented in python
91            test_name = "Model: %s, Kernel: python"%model_name
92            test_method_name = "test_%s_python" % model_name
93            test = ModelTestCase(test_name, model_info,
94                                 test_method_name,
95                                 platform="dll",  # so that
96                                 dtype="double")
97            suite.addTest(test)
98        else:   # kernel implemented in C
99            # test using opencl if desired and available
100            if 'opencl' in loaders and HAVE_OPENCL:
101                test_name = "Model: %s, Kernel: OpenCL"%model_name
102                test_method_name = "test_%s_opencl" % model_name
103                # Using dtype=None so that the models that are only
104                # correct for double precision are not tested using
105                # single precision.  The choice is determined by the
106                # presence of *single=False* in the model file.
107                test = ModelTestCase(test_name, model_info,
108                                     test_method_name,
109                                     platform="ocl", dtype=None)
110                #print("defining", test_name)
111                suite.addTest(test)
112
113            # test using dll if desired
114            if 'dll' in loaders:
115                test_name = "Model: %s, Kernel: dll"%model_name
116                test_method_name = "test_%s_dll" % model_name
117                test = ModelTestCase(test_name, model_info,
118                                     test_method_name,
119                                     platform="dll",
120                                     dtype="double")
121                suite.addTest(test)
122
123    return suite
124
125
126def _hide_model_case_from_nosetests():
127    class ModelTestCase(unittest.TestCase):
128        """
129        Test suit for a particular model with a particular kernel driver.
130
131        The test suite runs a simple smoke test to make sure the model
132        functions, then runs the list of tests at the bottom of the model
133        description file.
134        """
135        def __init__(self, test_name, model_info, test_method_name,
136                     platform, dtype):
137            self.test_name = test_name
138            self.info = model_info
139            self.platform = platform
140            self.dtype = dtype
141
142            setattr(self, test_method_name, self._runTest)
143            unittest.TestCase.__init__(self, test_method_name)
144
145        def _runTest(self):
146            smoke_tests = [
147                # test validity at reasonable values
148                [{}, 0.1, None],
149                [{}, (0.1, 0.1), None],
150                # test validity at q = 0
151                #[{}, 0.0, None],
152                #[{}, (0.0, 0.0), None],
153                # test that ER/VR will run if they exist
154                [{}, 'ER', None],
155                [{}, 'VR', None],
156                ]
157
158            tests = self.info['tests']
159            try:
160                model = build_model(self.info, dtype=self.dtype,
161                                    platform=self.platform)
162                for test in smoke_tests + tests:
163                    self._run_one_test(model, test)
164
165                if not tests and self.platform == "dll":
166                    ## Uncomment the following to make forgetting the test
167                    ## values an error.  Only do so for the "dll" tests
168                    ## to reduce noise from both opencl and dll, and because
169                    ## python kernels use platform="dll".
170                    #raise Exception("No test cases provided")
171                    pass
172
173            except:
174                annotate_exception(self.test_name)
175                raise
176
177        def _run_one_test(self, model, test):
178            pars, x, y = test
179
180            if not isinstance(y, list):
181                y = [y]
182            if not isinstance(x, list):
183                x = [x]
184
185            self.assertEqual(len(y), len(x))
186
187            if x[0] == 'ER':
188                actual = [call_ER(model.info, pars)]
189            elif x[0] == 'VR':
190                actual = [call_VR(model.info, pars)]
191            elif isinstance(x[0], tuple):
192                Qx, Qy = zip(*x)
193                q_vectors = [np.array(Qx), np.array(Qy)]
194                kernel = model.make_kernel(q_vectors)
195                actual = call_kernel(kernel, pars)
196            else:
197                q_vectors = [np.array(x)]
198                kernel = model.make_kernel(q_vectors)
199                actual = call_kernel(kernel, pars)
200
201            self.assertTrue(len(actual) > 0)
202            self.assertEqual(len(y), len(actual))
203
204            for xi, yi, actual_yi in zip(x, y, actual):
205                if yi is None:
206                    # smoke test --- make sure it runs and produces a value
207                    self.assertTrue(np.isfinite(actual_yi),
208                                    'invalid f(%s): %s' % (xi, actual_yi))
209                elif np.isnan(yi):
210                    self.assertTrue(np.isnan(actual_yi),
211                                    'f(%s): expected:%s; actual:%s'
212                                    % (xi, yi, actual_yi))
213                else:
214                    # is_near does not work for infinite values, so also test
215                    # for exact values.  Note that this will not
216                    self.assertTrue(yi==actual_yi or is_near(yi, actual_yi, 5),
217                                    'f(%s); expected:%s; actual:%s'
218                                    % (xi, yi, actual_yi))
219
220    return ModelTestCase
221
222def is_near(target, actual, digits=5):
223    """
224    Returns true if *actual* is within *digits* significant digits of *target*.
225    """
226    import math
227    shift = 10**math.ceil(math.log10(abs(target)))
228    return abs(target-actual)/shift < 1.5*10**-digits
229
230def main():
231    """
232    Run tests given is sys.argv.
233
234    Returns 0 if success or 1 if any tests fail.
235    """
236    try:
237        from xmlrunner import XMLTestRunner as TestRunner
238        test_args = { 'output': 'logs' }
239    except ImportError:
240        from unittest import TextTestRunner as TestRunner
241        test_args = { }
242
243    models = sys.argv[1:]
244    if models and models[0] == '-v':
245        verbosity = 2
246        models = models[1:]
247    else:
248        verbosity = 1
249    if models and models[0] == 'opencl':
250        if not HAVE_OPENCL:
251            print("opencl is not available")
252            return 1
253        loaders = ['opencl']
254        models = models[1:]
255    elif models and models[0] == 'dll':
256        # TODO: test if compiler is available?
257        loaders = ['dll']
258        models = models[1:]
259    elif models and models[0] == 'opencl_and_dll':
260        loaders = ['opencl', 'dll']
261        models = models[1:]
262    else:
263        loaders = ['opencl', 'dll']
264    if not models:
265        print("""\
266usage:
267  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
268
269If -v is included on the command line, then use verboe output.
270
271If neither opencl nor dll is specified, then models will be tested with
272both opencl and dll; the compute target is ignored for pure python models.
273
274If model1 is 'all', then all except the remaining models will be tested.
275
276""")
277
278        return 1
279
280    runner = TestRunner(verbosity=verbosity, **test_args)
281    result = runner.run(make_suite(loaders, models))
282    return 1 if result.failures or result.errors else 0
283
284
285def model_tests():
286    """
287    Test runner visible to nosetests.
288
289    Run "nosetests sasmodels" on the command line to invoke it.
290    """
291    tests = make_suite(['opencl', 'dll'], ['all'])
292    for test_i in tests:
293        yield test_i._runTest
294
295
296if __name__ == "__main__":
297    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.