source: sasmodels/sasmodels/model_test.py @ cf404cb

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since cf404cb was 9404dd3, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

python 3.x support

  • Property mode set to 100644
File size: 8.3 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx,qy)=(0.1,0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1,qy1), I(qx2,qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45
46import sys
47import unittest
48
49import numpy as np
50
51from .core import list_models, load_model_definition, load_model, HAVE_OPENCL
52from .core import make_kernel, call_kernel, call_ER, call_VR
53from .exception import annotate_exception
54
55
56def make_suite(loaders, models):
57
58    ModelTestCase = _hide_model_case_from_nosetests()
59    suite = unittest.TestSuite()
60
61    if models[0] == 'all':
62        skip = models[1:]
63        models = list_models()
64    else:
65        skip = []
66    for model_name in models:
67        if model_name in skip: continue
68        model_definition = load_model_definition(model_name)
69
70        #print('------')
71        #print('found tests in', model_name)
72        #print('------')
73
74        # if ispy then use the dll loader to call pykernel
75        # don't try to call cl kernel since it will not be
76        # available in some environmentes.
77        is_py = callable(getattr(model_definition,'Iq', None))
78
79        if is_py:  # kernel implemented in python
80            test_name = "Model: %s, Kernel: python"%model_name
81            test_method_name = "test_%s_python" % model_name
82            test = ModelTestCase(test_name, model_definition,
83                                 test_method_name,
84                                 platform="dll",  # so that
85                                 dtype="double")
86            suite.addTest(test)
87        else:   # kernel implemented in C
88            # test using opencl if desired and available
89            if 'opencl' in loaders and HAVE_OPENCL:
90                test_name = "Model: %s, Kernel: OpenCL"%model_name
91                test_method_name = "test_%s_opencl" % model_name
92                test = ModelTestCase(test_name, model_definition,
93                                     test_method_name,
94                                     platform="ocl", dtype='single')
95                #print("defining", test_name)
96                suite.addTest(test)
97
98            # test using dll if desired
99            if 'dll' in loaders:
100                test_name = "Model: %s, Kernel: dll"%model_name
101                test_method_name = "test_%s_dll" % model_name
102                test = ModelTestCase(test_name, model_definition,
103                                     test_method_name,
104                                     platform="dll",
105                                     dtype="double")
106                suite.addTest(test)
107
108    return suite
109
110
111def _hide_model_case_from_nosetests():
112    class ModelTestCase(unittest.TestCase):
113        def __init__(self, test_name, definition, test_method_name,
114                     platform, dtype):
115            self.test_name = test_name
116            self.definition = definition
117            self.platform = platform
118            self.dtype = dtype
119
120            setattr(self, test_method_name, self._runTest)
121            unittest.TestCase.__init__(self, test_method_name)
122
123        def _runTest(self):
124            smoke_tests = [
125                [{},0.1,None],
126                [{},(0.1,0.1),None],
127                [{},'ER',None],
128                [{},'VR',None],
129                ]
130
131            tests = getattr(self.definition, 'tests', [])
132            try:
133                model = load_model(self.definition, dtype=self.dtype,
134                                   platform=self.platform)
135                for test in smoke_tests + tests:
136                    self._run_one_test(model, test)
137
138                if not tests and self.platform == "dll":
139                    ## Uncomment the following to make forgetting the test
140                    ## values an error.  Only do so for the "dll" tests
141                    ## to reduce noise from both opencl and dll, and because
142                    ## python kernels us
143                    #raise Exception("No test cases provided")
144                    pass
145
146            except Exception as exc:
147                annotate_exception(exc, self.test_name)
148                raise
149
150        def _run_one_test(self, model, test):
151            pars, x, y = test
152
153            if not isinstance(y, list):
154                y = [y]
155            if not isinstance(x, list):
156                x = [x]
157
158            self.assertEqual(len(y), len(x))
159
160            if x[0] == 'ER':
161                actual = [call_ER(model.info, pars)]
162            elif x[0] == 'VR':
163                actual = [call_VR(model.info, pars)]
164            elif isinstance(x[0], tuple):
165                Qx,Qy = zip(*x)
166                q_vectors = [np.array(Qx), np.array(Qy)]
167                kernel = make_kernel(model, q_vectors)
168                actual = call_kernel(kernel, pars)
169            else:
170                q_vectors = [np.array(x)]
171                kernel = make_kernel(model, q_vectors)
172                actual = call_kernel(kernel, pars)
173
174            self.assertGreater(len(actual), 0)
175            self.assertEqual(len(y), len(actual))
176
177            for xi, yi, actual_yi in zip(x, y, actual):
178                if yi is None:
179                    # smoke test --- make sure it runs and produces a value
180                    self.assertTrue(np.isfinite(actual_yi),
181                        'invalid f(%s): %s' % (xi, actual_yi))
182                else:
183                    err = abs(yi - actual_yi)
184                    nrm = abs(yi)
185                    self.assertLess(err * 10**5, nrm,
186                        'f(%s); expected:%s; actual:%s' % (xi, yi, actual_yi))
187
188    return ModelTestCase
189
190
191
192def main():
193    """
194    Run tests given is sys.argv.
195
196    Returns 0 if success or 1 if any tests fail.
197    """
198    import xmlrunner
199
200    models = sys.argv[1:]
201    if models and models[0] == 'opencl':
202        if not HAVE_OPENCL:
203            print("opencl is not available")
204            return 1
205        loaders = ['opencl']
206        models = models[1:]
207    elif models and models[0] == 'dll':
208        # TODO: test if compiler is available?
209        loaders = ['dll']
210        models = models[1:]
211    elif models and models[0] == 'opencl_and_dll':
212        loaders = ['opencl', 'dll']
213        models = models[1:]
214    else:
215        loaders = ['opencl', 'dll']
216    if not models:
217        print("""\
218usage:
219  python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
220
221If model1 is 'all', then all except the remaining models will be tested.
222If no compute target is specified, then models will be tested with both opencl
223and dll; the compute target is ignored for pure python models.""")
224
225        return 1
226
227    #runner = unittest.TextTestRunner()
228    runner = xmlrunner.XMLTestRunner(output='logs')
229    result = runner.run(make_suite(loaders, models))
230    return 1 if result.failures or result.errors else 0
231
232
233def model_tests():
234    """
235    Test runner visible to nosetests.
236
237    Run "nosetests sasmodels" on the command line to invoke it.
238    """
239    tests = make_suite(['opencl','dll'],['all'])
240    for test_i in tests:
241        yield test_i._runTest
242
243
244if __name__ == "__main__":
245    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.