source: sasmodels/sasmodels/model_test.py @ 6592f56

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 6592f56 was 897ca7f, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

add run_one() to run tests on model from sasview console

  • Property mode set to 100644
File size: 12.9 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50import numpy as np  # type: ignore
51
52from . import core
53from .core import list_models, load_model_info, build_model
54from .direct_model import call_kernel, call_ER, call_VR
55from .exception import annotate_exception
56from .modelinfo import expand_pars
57
58try:
59    from typing import List, Iterator, Callable
60except ImportError:
61    pass
62else:
63    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
64    from .kernel import KernelModel
65
66
67def make_suite(loaders, models):
68    # type: (List[str], List[str]) -> unittest.TestSuite
69    """
70    Construct the pyunit test suite.
71
72    *loaders* is the list of kernel drivers to use, which is one of
73    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
74    the python driver is always used.
75
76    *models* is the list of models to test, or *["all"]* to test all models.
77    """
78    ModelTestCase = _hide_model_case_from_nose()
79    suite = unittest.TestSuite()
80
81    if models[0] == 'all':
82        skip = models[1:]
83        models = list_models()
84    else:
85        skip = []
86    for model_name in models:
87        if model_name in skip: continue
88        model_info = load_model_info(model_name)
89
90        #print('------')
91        #print('found tests in', model_name)
92        #print('------')
93
94        # if ispy then use the dll loader to call pykernel
95        # don't try to call cl kernel since it will not be
96        # available in some environmentes.
97        is_py = callable(model_info.Iq)
98
99        if is_py:  # kernel implemented in python
100            test_name = "Model: %s, Kernel: python"%model_name
101            test_method_name = "test_%s_python" % model_info.id
102            test = ModelTestCase(test_name, model_info,
103                                 test_method_name,
104                                 platform="dll",  # so that
105                                 dtype="double")
106            suite.addTest(test)
107        else:   # kernel implemented in C
108            # test using opencl if desired and available
109            if 'opencl' in loaders and core.HAVE_OPENCL:
110                test_name = "Model: %s, Kernel: OpenCL"%model_name
111                test_method_name = "test_%s_opencl" % model_info.id
112                # Using dtype=None so that the models that are only
113                # correct for double precision are not tested using
114                # single precision.  The choice is determined by the
115                # presence of *single=False* in the model file.
116                test = ModelTestCase(test_name, model_info,
117                                     test_method_name,
118                                     platform="ocl", dtype=None)
119                #print("defining", test_name)
120                suite.addTest(test)
121
122            # test using dll if desired
123            if 'dll' in loaders:
124                test_name = "Model: %s, Kernel: dll"%model_name
125                test_method_name = "test_%s_dll" % model_info.id
126                test = ModelTestCase(test_name, model_info,
127                                     test_method_name,
128                                     platform="dll",
129                                     dtype="double")
130                suite.addTest(test)
131
132    return suite
133
134
135def _hide_model_case_from_nose():
136    # type: () -> type
137    class ModelTestCase(unittest.TestCase):
138        """
139        Test suit for a particular model with a particular kernel driver.
140
141        The test suite runs a simple smoke test to make sure the model
142        functions, then runs the list of tests at the bottom of the model
143        description file.
144        """
145        def __init__(self, test_name, model_info, test_method_name,
146                     platform, dtype):
147            # type: (str, ModelInfo, str, str, DType) -> None
148            self.test_name = test_name
149            self.info = model_info
150            self.platform = platform
151            self.dtype = dtype
152
153            setattr(self, test_method_name, self.run_all)
154            unittest.TestCase.__init__(self, test_method_name)
155
156        def run_all(self):
157            # type: () -> None
158            """
159            Run all the tests in the test suite, including smoke tests.
160            """
161            smoke_tests = [
162                # test validity at reasonable values
163                ({}, 0.1, None),
164                ({}, (0.1, 0.1), None),
165                # test validity at q = 0
166                #({}, 0.0, None),
167                #({}, (0.0, 0.0), None),
168                # test vector form
169                ({}, [0.1]*2, [None]*2),
170                ({}, [(0.1, 0.1)]*2, [None]*2),
171                # test that ER/VR will run if they exist
172                ({}, 'ER', None),
173                ({}, 'VR', None),
174                ]
175
176            tests = self.info.tests
177            try:
178                model = build_model(self.info, dtype=self.dtype,
179                                    platform=self.platform)
180                for test in smoke_tests + tests:
181                    self.run_one(model, test)
182
183                if not tests and self.platform == "dll":
184                    ## Uncomment the following to make forgetting the test
185                    ## values an error.  Only do so for the "dll" tests
186                    ## to reduce noise from both opencl and dll, and because
187                    ## python kernels use platform="dll".
188                    #raise Exception("No test cases provided")
189                    pass
190
191            except:
192                annotate_exception(self.test_name)
193                raise
194
195        def run_one(self, model, test):
196            # type: (KernelModel, TestCondition) -> None
197            """Run a single test case."""
198            user_pars, x, y = test
199            pars = expand_pars(self.info.parameters, user_pars)
200
201            if not isinstance(y, list):
202                y = [y]
203            if not isinstance(x, list):
204                x = [x]
205
206            self.assertEqual(len(y), len(x))
207
208            if x[0] == 'ER':
209                actual = [call_ER(model.info, pars)]
210            elif x[0] == 'VR':
211                actual = [call_VR(model.info, pars)]
212            elif isinstance(x[0], tuple):
213                qx, qy = zip(*x)
214                q_vectors = [np.array(qx), np.array(qy)]
215                kernel = model.make_kernel(q_vectors)
216                actual = call_kernel(kernel, pars)
217            else:
218                q_vectors = [np.array(x)]
219                kernel = model.make_kernel(q_vectors)
220                actual = call_kernel(kernel, pars)
221
222            self.assertTrue(len(actual) > 0)
223            self.assertEqual(len(y), len(actual))
224
225            for xi, yi, actual_yi in zip(x, y, actual):
226                if yi is None:
227                    # smoke test --- make sure it runs and produces a value
228                    self.assertTrue(not np.isnan(actual_yi),
229                                    'invalid f(%s): %s' % (xi, actual_yi))
230                elif np.isnan(yi):
231                    self.assertTrue(np.isnan(actual_yi),
232                                    'f(%s): expected:%s; actual:%s'
233                                    % (xi, yi, actual_yi))
234                else:
235                    # is_near does not work for infinite values, so also test
236                    # for exact values.  Note that this will not
237                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
238                                    'f(%s); expected:%s; actual:%s'
239                                    % (xi, yi, actual_yi))
240
241    return ModelTestCase
242
243def is_near(target, actual, digits=5):
244    # type: (float, float, int) -> bool
245    """
246    Returns true if *actual* is within *digits* significant digits of *target*.
247    """
248    import math
249    shift = 10**math.ceil(math.log10(abs(target)))
250    return abs(target-actual)/shift < 1.5*10**-digits
251
252def run_one(model):
253    # type: (str) -> None
254    """
255    Run the tests for a single model, printing the results to stdout.
256
257    *model* can by a python file, which is handy for checking user defined
258    plugin models.
259    """
260    # Note that running main() directly did not work from within the
261    # wxPython pycrust console.  Instead of the results appearing in the
262    # window they were printed to the underlying console.
263    from unittest.runner import TextTestResult, _WritelnDecorator
264
265    # Build a object to capture and print the test results
266    stream = _WritelnDecorator(sys.stdout)  # Add writeln() method to stream
267    verbosity = 2
268    descriptions = True
269    result = TextTestResult(stream, descriptions, verbosity)
270
271    # Build a test suite containing just the model
272    loaders = ['opencl']
273    models = [model]
274    try:
275        suite = make_suite(loaders, models)
276    except Exception:
277        import traceback
278        stream.writeln(traceback.format_exc())
279        return
280
281    # Run the test suite
282    suite.run(result)
283
284    # Print the failures and errors
285    for _, tb in result.errors:
286        stream.writeln(tb)
287    for _, tb in result.failures:
288        stream.writeln(tb)
289
290    # Check if there are user defined tests.
291    # Yes, it is naughty to peek into the structure of the test suite, and
292    # to assume that it contains only one test.
293    if not suite._tests[0].info.tests:
294        stream.writeln("Note: %s has no user defined tests."%model)
295
296
297def main(*models):
298    # type: (*str) -> int
299    """
300    Run tests given is models.
301
302    Returns 0 if success or 1 if any tests fail.
303    """
304    try:
305        from xmlrunner import XMLTestRunner as TestRunner
306        test_args = {'output': 'logs'}
307    except ImportError:
308        from unittest import TextTestRunner as TestRunner
309        test_args = {}
310
311    if models and models[0] == '-v':
312        verbosity = 2
313        models = models[1:]
314    else:
315        verbosity = 1
316    if models and models[0] == 'opencl':
317        if not core.HAVE_OPENCL:
318            print("opencl is not available")
319            return 1
320        loaders = ['opencl']
321        models = models[1:]
322    elif models and models[0] == 'dll':
323        # TODO: test if compiler is available?
324        loaders = ['dll']
325        models = models[1:]
326    elif models and models[0] == 'opencl_and_dll':
327        loaders = ['opencl', 'dll']
328        models = models[1:]
329    else:
330        loaders = ['opencl', 'dll']
331    if not models:
332        print("""\
333usage:
334  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
335
336If -v is included on the command line, then use verbose output.
337
338If neither opencl nor dll is specified, then models will be tested with
339both OpenCL and dll; the compute target is ignored for pure python models.
340
341If model1 is 'all', then all except the remaining models will be tested.
342
343""")
344
345        return 1
346
347    runner = TestRunner(verbosity=verbosity, **test_args)
348    result = runner.run(make_suite(loaders, models))
349    return 1 if result.failures or result.errors else 0
350
351
352def model_tests():
353    # type: () -> Iterator[Callable[[], None]]
354    """
355    Test runner visible to nosetests.
356
357    Run "nosetests sasmodels" on the command line to invoke it.
358    """
359    tests = make_suite(['opencl', 'dll'], ['all'])
360    for test_i in tests:
361        yield test_i.run_all
362
363
364if __name__ == "__main__":
365    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.