source: sasmodels/sasmodels/model_test.py @ 13ed84c

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 13ed84c was 13ed84c, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

set single=False on all models that fail the single precision tests

  • Property mode set to 100644
File size: 9.6 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50import numpy as np
51
52from .core import list_models, load_model_definition, load_model, HAVE_OPENCL
53from .core import make_kernel, call_kernel, call_ER, call_VR
54from .exception import annotate_exception
55
56
57def make_suite(loaders, models):
58    """
59    Construct the pyunit test suite.
60
61    *loaders* is the list of kernel drivers to use, which is one of
62    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
63    the python driver is always used.
64
65    *models* is the list of models to test, or *["all"]* to test all models.
66    """
67
68    ModelTestCase = _hide_model_case_from_nosetests()
69    suite = unittest.TestSuite()
70
71    if models[0] == 'all':
72        skip = models[1:]
73        models = list_models()
74    else:
75        skip = []
76    for model_name in models:
77        if model_name in skip: continue
78        model_definition = load_model_definition(model_name)
79
80        #print('------')
81        #print('found tests in', model_name)
82        #print('------')
83
84        # if ispy then use the dll loader to call pykernel
85        # don't try to call cl kernel since it will not be
86        # available in some environmentes.
87        is_py = callable(getattr(model_definition, 'Iq', None))
88
89        if is_py:  # kernel implemented in python
90            test_name = "Model: %s, Kernel: python"%model_name
91            test_method_name = "test_%s_python" % model_name
92            test = ModelTestCase(test_name, model_definition,
93                                 test_method_name,
94                                 platform="dll",  # so that
95                                 dtype="double")
96            suite.addTest(test)
97        else:   # kernel implemented in C
98            # test using opencl if desired and available
99            if 'opencl' in loaders and HAVE_OPENCL:
100                test_name = "Model: %s, Kernel: OpenCL"%model_name
101                test_method_name = "test_%s_opencl" % model_name
102                # Using dtype=None so that the models that are only
103                # correct for double precision are not tested using
104                # single precision.  The choice is determined by the
105                # presence of *single=False* in the model file.
106                test = ModelTestCase(test_name, model_definition,
107                                     test_method_name,
108                                     platform="ocl", dtype=None)
109                #print("defining", test_name)
110                suite.addTest(test)
111
112            # test using dll if desired
113            if 'dll' in loaders:
114                test_name = "Model: %s, Kernel: dll"%model_name
115                test_method_name = "test_%s_dll" % model_name
116                test = ModelTestCase(test_name, model_definition,
117                                     test_method_name,
118                                     platform="dll",
119                                     dtype="double")
120                suite.addTest(test)
121
122    return suite
123
124
125def _hide_model_case_from_nosetests():
126    class ModelTestCase(unittest.TestCase):
127        """
128        Test suit for a particular model with a particular kernel driver.
129
130        The test suite runs a simple smoke test to make sure the model
131        functions, then runs the list of tests at the bottom of the model
132        description file.
133        """
134        def __init__(self, test_name, definition, test_method_name,
135                     platform, dtype):
136            self.test_name = test_name
137            self.definition = definition
138            self.platform = platform
139            self.dtype = dtype
140
141            setattr(self, test_method_name, self._runTest)
142            unittest.TestCase.__init__(self, test_method_name)
143
144        def _runTest(self):
145            smoke_tests = [
146                [{}, 0.1, None],
147                [{}, (0.1, 0.1), None],
148                [{}, 'ER', None],
149                [{}, 'VR', None],
150                ]
151
152            tests = getattr(self.definition, 'tests', [])
153            try:
154                model = load_model(self.definition, dtype=self.dtype,
155                                   platform=self.platform)
156                for test in smoke_tests + tests:
157                    self._run_one_test(model, test)
158
159                if not tests and self.platform == "dll":
160                    ## Uncomment the following to make forgetting the test
161                    ## values an error.  Only do so for the "dll" tests
162                    ## to reduce noise from both opencl and dll, and because
163                    ## python kernels use platform="dll".
164                    #raise Exception("No test cases provided")
165                    pass
166
167            except Exception as exc:
168                annotate_exception(exc, self.test_name)
169                raise
170
171        def _run_one_test(self, model, test):
172            pars, x, y = test
173
174            if not isinstance(y, list):
175                y = [y]
176            if not isinstance(x, list):
177                x = [x]
178
179            self.assertEqual(len(y), len(x))
180
181            if x[0] == 'ER':
182                actual = [call_ER(model.info, pars)]
183            elif x[0] == 'VR':
184                actual = [call_VR(model.info, pars)]
185            elif isinstance(x[0], tuple):
186                Qx, Qy = zip(*x)
187                q_vectors = [np.array(Qx), np.array(Qy)]
188                kernel = make_kernel(model, q_vectors)
189                actual = call_kernel(kernel, pars)
190            else:
191                q_vectors = [np.array(x)]
192                kernel = make_kernel(model, q_vectors)
193                actual = call_kernel(kernel, pars)
194
195            self.assertGreater(len(actual), 0)
196            self.assertEqual(len(y), len(actual))
197
198            for xi, yi, actual_yi in zip(x, y, actual):
199                if yi is None:
200                    # smoke test --- make sure it runs and produces a value
201                    self.assertTrue(np.isfinite(actual_yi),
202                                    'invalid f(%s): %s' % (xi, actual_yi))
203                else:
204                    self.assertTrue(is_near(yi, actual_yi, 5),
205                                    'f(%s); expected:%s; actual:%s'
206                                    % (xi, yi, actual_yi))
207
208    return ModelTestCase
209
210def is_near(target, actual, digits=5):
211    """
212    Returns true if *actual* is within *digits* significant digits of *target*.
213    """
214    import math
215    shift = 10**math.ceil(math.log10(abs(target)))
216    return abs(target-actual)/shift < 1.5*10**-digits
217
218def main():
219    """
220    Run tests given is sys.argv.
221
222    Returns 0 if success or 1 if any tests fail.
223    """
224    import xmlrunner
225
226    models = sys.argv[1:]
227    if models and models[0] == '-v':
228        verbosity = 2
229        models = models[1:]
230    else:
231        verbosity = 1
232    if models and models[0] == 'opencl':
233        if not HAVE_OPENCL:
234            print("opencl is not available")
235            return 1
236        loaders = ['opencl']
237        models = models[1:]
238    elif models and models[0] == 'dll':
239        # TODO: test if compiler is available?
240        loaders = ['dll']
241        models = models[1:]
242    elif models and models[0] == 'opencl_and_dll':
243        loaders = ['opencl', 'dll']
244        models = models[1:]
245    else:
246        loaders = ['opencl', 'dll']
247    if not models:
248        print("""\
249usage:
250  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
251
252If -v is included on the
253If neither opencl nor dll is specified, then models will be tested with
254both opencl and dll; the compute target is ignored for pure python models.
255
256If model1 is 'all', then all except the remaining models will be tested.
257
258""")
259
260        return 1
261
262    #runner = unittest.TextTestRunner()
263    runner = xmlrunner.XMLTestRunner(output='logs', verbosity=verbosity)
264    result = runner.run(make_suite(loaders, models))
265    return 1 if result.failures or result.errors else 0
266
267
268def model_tests():
269    """
270    Test runner visible to nosetests.
271
272    Run "nosetests sasmodels" on the command line to invoke it.
273    """
274    tests = make_suite(['opencl', 'dll'], ['all'])
275    for test_i in tests:
276        yield test_i._runTest
277
278
279if __name__ == "__main__":
280    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.