source: sasmodels/sasmodels/model_test.py @ 4bfd277

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 4bfd277 was 4bfd277, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

support ER/VR calls with vector parameters

  • Property mode set to 100644
File size: 11.1 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45#TODO: rename to tests so that tab completion works better for models directory
46
47from __future__ import print_function
48
49import sys
50import unittest
51
52import numpy as np
53
54from .core import list_models, load_model_info, build_model, HAVE_OPENCL
55from .details import dispersion_mesh
56from .direct_model import call_kernel, get_weights
57from .exception import annotate_exception
58from .modelinfo import expand_pars
59
60def call_ER(model_info, pars):
61    """
62    Call the model ER function using *values*. *model_info* is either
63    *model.info* if you have a loaded model, or *kernel.info* if you
64    have a model kernel prepared for evaluation.
65    """
66    if model_info.ER is None:
67        return 1.0
68    else:
69        value, weight = _vol_pars(model_info, pars)
70        individual_radii = model_info.ER(*value)
71        return np.sum(weight*individual_radii) / np.sum(weight)
72
73def call_VR(model_info, pars):
74    """
75    Call the model VR function using *pars*.
76    *info* is either *model.info* if you have a loaded model, or *kernel.info*
77    if you have a model kernel prepared for evaluation.
78    """
79    if model_info.VR is None:
80        return 1.0
81    else:
82        value, weight = _vol_pars(model_info, pars)
83        whole, part = model_info.VR(*value)
84        return np.sum(weight*part)/np.sum(weight*whole)
85
86def _vol_pars(model_info, pars):
87    vol_pars = [get_weights(p, pars)
88                for p in model_info.parameters.call_parameters
89                if p.type == 'volume']
90    value, weight = dispersion_mesh(model_info, vol_pars)
91    return value, weight
92
93
94def make_suite(loaders, models):
95    """
96    Construct the pyunit test suite.
97
98    *loaders* is the list of kernel drivers to use, which is one of
99    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
100    the python driver is always used.
101
102    *models* is the list of models to test, or *["all"]* to test all models.
103    """
104
105    ModelTestCase = _hide_model_case_from_nosetests()
106    suite = unittest.TestSuite()
107
108    if models[0] == 'all':
109        skip = models[1:]
110        models = list_models()
111    else:
112        skip = []
113    for model_name in models:
114        if model_name in skip: continue
115        model_info = load_model_info(model_name)
116
117        #print('------')
118        #print('found tests in', model_name)
119        #print('------')
120
121        # if ispy then use the dll loader to call pykernel
122        # don't try to call cl kernel since it will not be
123        # available in some environmentes.
124        is_py = callable(model_info.Iq)
125
126        if is_py:  # kernel implemented in python
127            test_name = "Model: %s, Kernel: python"%model_name
128            test_method_name = "test_%s_python" % model_name
129            test = ModelTestCase(test_name, model_info,
130                                 test_method_name,
131                                 platform="dll",  # so that
132                                 dtype="double")
133            suite.addTest(test)
134        else:   # kernel implemented in C
135            # test using opencl if desired and available
136            if 'opencl' in loaders and HAVE_OPENCL:
137                test_name = "Model: %s, Kernel: OpenCL"%model_name
138                test_method_name = "test_%s_opencl" % model_name
139                # Using dtype=None so that the models that are only
140                # correct for double precision are not tested using
141                # single precision.  The choice is determined by the
142                # presence of *single=False* in the model file.
143                test = ModelTestCase(test_name, model_info,
144                                     test_method_name,
145                                     platform="ocl", dtype=None)
146                #print("defining", test_name)
147                suite.addTest(test)
148
149            # test using dll if desired
150            if 'dll' in loaders:
151                test_name = "Model: %s, Kernel: dll"%model_name
152                test_method_name = "test_%s_dll" % model_name
153                test = ModelTestCase(test_name, model_info,
154                                     test_method_name,
155                                     platform="dll",
156                                     dtype="double")
157                suite.addTest(test)
158
159    return suite
160
161
162def _hide_model_case_from_nosetests():
163    class ModelTestCase(unittest.TestCase):
164        """
165        Test suit for a particular model with a particular kernel driver.
166
167        The test suite runs a simple smoke test to make sure the model
168        functions, then runs the list of tests at the bottom of the model
169        description file.
170        """
171        def __init__(self, test_name, model_info, test_method_name,
172                     platform, dtype):
173            self.test_name = test_name
174            self.info = model_info
175            self.platform = platform
176            self.dtype = dtype
177
178            setattr(self, test_method_name, self.run_all)
179            unittest.TestCase.__init__(self, test_method_name)
180
181        def run_all(self):
182            smoke_tests = [
183                [{}, 0.1, None],
184                [{}, (0.1, 0.1), None],
185                [{}, 'ER', None],
186                [{}, 'VR', None],
187                ]
188
189            tests = self.info.tests
190            try:
191                model = build_model(self.info, dtype=self.dtype,
192                                    platform=self.platform)
193                for test in smoke_tests + tests:
194                    self.run_one(model, test)
195
196                if not tests and self.platform == "dll":
197                    ## Uncomment the following to make forgetting the test
198                    ## values an error.  Only do so for the "dll" tests
199                    ## to reduce noise from both opencl and dll, and because
200                    ## python kernels use platform="dll".
201                    #raise Exception("No test cases provided")
202                    pass
203
204            except:
205                annotate_exception(self.test_name)
206                raise
207
208        def run_one(self, model, test):
209            pars, x, y = test
210            pars = expand_pars(self.info.parameters, pars)
211
212            if not isinstance(y, list):
213                y = [y]
214            if not isinstance(x, list):
215                x = [x]
216
217            self.assertEqual(len(y), len(x))
218
219            if x[0] == 'ER':
220                actual = [call_ER(model.info, pars)]
221            elif x[0] == 'VR':
222                actual = [call_VR(model.info, pars)]
223            elif isinstance(x[0], tuple):
224                Qx, Qy = zip(*x)
225                q_vectors = [np.array(Qx), np.array(Qy)]
226                kernel = model.make_kernel(q_vectors)
227                actual = call_kernel(kernel, pars)
228            else:
229                q_vectors = [np.array(x)]
230                kernel = model.make_kernel(q_vectors)
231                actual = call_kernel(kernel, pars)
232
233            self.assertTrue(len(actual) > 0)
234            self.assertEqual(len(y), len(actual))
235
236            for xi, yi, actual_yi in zip(x, y, actual):
237                if yi is None:
238                    # smoke test --- make sure it runs and produces a value
239                    self.assertTrue(np.isfinite(actual_yi),
240                                    'invalid f(%s): %s' % (xi, actual_yi))
241                else:
242                    self.assertTrue(is_near(yi, actual_yi, 5),
243                                    'f(%s); expected:%s; actual:%s'
244                                    % (xi, yi, actual_yi))
245
246    return ModelTestCase
247
248def is_near(target, actual, digits=5):
249    """
250    Returns true if *actual* is within *digits* significant digits of *target*.
251    """
252    import math
253    shift = 10**math.ceil(math.log10(abs(target)))
254    return abs(target-actual)/shift < 1.5*10**-digits
255
256def main():
257    """
258    Run tests given is sys.argv.
259
260    Returns 0 if success or 1 if any tests fail.
261    """
262    import xmlrunner
263
264    models = sys.argv[1:]
265    if models and models[0] == '-v':
266        verbosity = 2
267        models = models[1:]
268    else:
269        verbosity = 1
270    if models and models[0] == 'opencl':
271        if not HAVE_OPENCL:
272            print("opencl is not available")
273            return 1
274        loaders = ['opencl']
275        models = models[1:]
276    elif models and models[0] == 'dll':
277        # TODO: test if compiler is available?
278        loaders = ['dll']
279        models = models[1:]
280    elif models and models[0] == 'opencl_and_dll':
281        loaders = ['opencl', 'dll']
282        models = models[1:]
283    else:
284        loaders = ['opencl', 'dll']
285    if not models:
286        print("""\
287usage:
288  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
289
290If -v is included on the command line, then use verboe output.
291
292If neither opencl nor dll is specified, then models will be tested with
293both opencl and dll; the compute target is ignored for pure python models.
294
295If model1 is 'all', then all except the remaining models will be tested.
296
297""")
298
299        return 1
300
301    #runner = unittest.TextTestRunner()
302    runner = xmlrunner.XMLTestRunner(output='logs', verbosity=verbosity)
303    result = runner.run(make_suite(loaders, models))
304    return 1 if result.failures or result.errors else 0
305
306
307def model_tests():
308    """
309    Test runner visible to nosetests.
310
311    Run "nosetests sasmodels" on the command line to invoke it.
312    """
313    tests = make_suite(['opencl', 'dll'], ['all'])
314    for test_i in tests:
315        yield test_i.run_all
316
317
318if __name__ == "__main__":
319    sys.exit(main())
Note: See TracBrowser for help on using the repository browser.