source: sasmodels/sasmodels/model_test.py @ d5b5b71

core_shell_microgelscostrafo411magnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since d5b5b71 was e09d1e0, checked in by Paul Kienzle <pkienzle@…>, 7 years ago

check OpenCL against DLL results in test suite

  • Property mode set to 100644
File size: 17.5 KB
Line 
1# -*- coding: utf-8 -*-
2"""
3Run model unit tests.
4
5Usage::
6
7    python -m sasmodels.model_test [opencl|dll|opencl_and_dll] model1 model2 ...
8
9    if model1 is 'all', then all except the remaining models will be tested
10
11Each model is tested using the default parameters at q=0.1, (qx, qy)=(0.1, 0.1),
12and the ER and VR are computed.  The return values at these points are not
13considered.  The test is only to verify that the models run to completion,
14and do not produce inf or NaN.
15
16Tests are defined with the *tests* attribute in the model.py file.  *tests*
17is a list of individual tests to run, where each test consists of the
18parameter values for the test, the q-values and the expected results.  For
19the effective radius test, the q-value should be 'ER'.  For the VR test,
20the q-value should be 'VR'.  For 1-D tests, either specify the q value or
21a list of q-values, and the corresponding I(q) value, or list of I(q) values.
22
23That is::
24
25    tests = [
26        [ {parameters}, q, I(q)],
27        [ {parameters}, [q], [I(q)] ],
28        [ {parameters}, [q1, q2, ...], [I(q1), I(q2), ...]],
29
30        [ {parameters}, (qx, qy), I(qx, Iqy)],
31        [ {parameters}, [(qx1, qy1), (qx2, qy2), ...],
32                        [I(qx1, qy1), I(qx2, qy2), ...]],
33
34        [ {parameters}, 'ER', ER(pars) ],
35        [ {parameters}, 'VR', VR(pars) ],
36        ...
37    ]
38
39Parameters are *key:value* pairs, where key is one of the parameters of the
40model and value is the value to use for the test.  Any parameters not given
41in the parameter list will take on the default parameter value.
42
43Precision defaults to 5 digits (relative).
44"""
45from __future__ import print_function
46
47import sys
48import unittest
49
50import numpy as np  # type: ignore
51
52from . import core
53from .core import list_models, load_model_info, build_model
54from .direct_model import call_kernel, call_ER, call_VR
55from .exception import annotate_exception
56from .modelinfo import expand_pars
57
58try:
59    from typing import List, Iterator, Callable
60except ImportError:
61    pass
62else:
63    from .modelinfo import ParameterTable, ParameterSet, TestCondition, ModelInfo
64    from .kernel import KernelModel
65
66
67def make_suite(loaders, models):
68    # type: (List[str], List[str]) -> unittest.TestSuite
69    """
70    Construct the pyunit test suite.
71
72    *loaders* is the list of kernel drivers to use, which is one of
73    *["dll", "opencl"]*, *["dll"]* or *["opencl"]*.  For python models,
74    the python driver is always used.
75
76    *models* is the list of models to test, or *["all"]* to test all models.
77    """
78    ModelTestCase = _hide_model_case_from_nose()
79    suite = unittest.TestSuite()
80
81    if models[0] == 'all':
82        skip = models[1:]
83        models = list_models()
84    else:
85        skip = []
86    for model_name in models:
87        if model_name in skip: continue
88        model_info = load_model_info(model_name)
89
90        #print('------')
91        #print('found tests in', model_name)
92        #print('------')
93
94        # if ispy then use the dll loader to call pykernel
95        # don't try to call cl kernel since it will not be
96        # available in some environmentes.
97        is_py = callable(model_info.Iq)
98
99        # Some OpenCL drivers seem to be flaky, and are not producing the
100        # expected result.  Since we don't have known test values yet for
101        # all of our models, we are instead going to compare the results
102        # for the 'smoke test' (that is, evaluation at q=0.1 for the default
103        # parameters just to see that the model runs to completion) between
104        # the OpenCL and the DLL.  To do this, we define a 'stash' which is
105        # shared between OpenCL and DLL tests.  This is just a list.  If the
106        # list is empty (which it will be when DLL runs, if the DLL runs
107        # first), then the results are appended to the list.  If the list
108        # is not empty (which it will be when OpenCL runs second), the results
109        # are compared to the results stored in the first element of the list.
110        # This is a horrible stateful hack which only makes sense because the
111        # test suite is thrown away after being run once.
112        stash = []
113
114        if is_py:  # kernel implemented in python
115            test_name = "Model: %s, Kernel: python"%model_name
116            test_method_name = "test_%s_python" % model_info.id
117            test = ModelTestCase(test_name, model_info,
118                                 test_method_name,
119                                 platform="dll",  # so that
120                                 dtype="double",
121                                 stash=stash)
122            suite.addTest(test)
123        else:   # kernel implemented in C
124
125            # test using dll if desired
126            if 'dll' in loaders or not core.HAVE_OPENCL:
127                test_name = "Model: %s, Kernel: dll"%model_name
128                test_method_name = "test_%s_dll" % model_info.id
129                test = ModelTestCase(test_name, model_info,
130                                     test_method_name,
131                                     platform="dll",
132                                     dtype="double",
133                                     stash=stash)
134                suite.addTest(test)
135
136            # test using opencl if desired and available
137            if 'opencl' in loaders and core.HAVE_OPENCL:
138                test_name = "Model: %s, Kernel: OpenCL"%model_name
139                test_method_name = "test_%s_opencl" % model_info.id
140                # Using dtype=None so that the models that are only
141                # correct for double precision are not tested using
142                # single precision.  The choice is determined by the
143                # presence of *single=False* in the model file.
144                test = ModelTestCase(test_name, model_info,
145                                     test_method_name,
146                                     platform="ocl", dtype=None,
147                                     stash=stash)
148                #print("defining", test_name)
149                suite.addTest(test)
150
151    return suite
152
153
154def _hide_model_case_from_nose():
155    # type: () -> type
156    class ModelTestCase(unittest.TestCase):
157        """
158        Test suit for a particular model with a particular kernel driver.
159
160        The test suite runs a simple smoke test to make sure the model
161        functions, then runs the list of tests at the bottom of the model
162        description file.
163        """
164        def __init__(self, test_name, model_info, test_method_name,
165                     platform, dtype, stash):
166            # type: (str, ModelInfo, str, str, DType, List[Any]) -> None
167            self.test_name = test_name
168            self.info = model_info
169            self.platform = platform
170            self.dtype = dtype
171            self.stash = stash  # container for the results of the first run
172
173            setattr(self, test_method_name, self.run_all)
174            unittest.TestCase.__init__(self, test_method_name)
175
176        def run_all(self):
177            # type: () -> None
178            """
179            Run all the tests in the test suite, including smoke tests.
180            """
181            smoke_tests = [
182                # test validity at reasonable values
183                ({}, 0.1, None),
184                ({}, (0.1, 0.1), None),
185                # test validity at q = 0
186                #({}, 0.0, None),
187                #({}, (0.0, 0.0), None),
188                # test vector form
189                ({}, [0.1]*2, [None]*2),
190                ({}, [(0.1, 0.1)]*2, [None]*2),
191                # test that ER/VR will run if they exist
192                ({}, 'ER', None),
193                ({}, 'VR', None),
194                ]
195
196            tests = smoke_tests + self.info.tests
197            try:
198                model = build_model(self.info, dtype=self.dtype,
199                                    platform=self.platform)
200                results = [self.run_one(model, test) for test in tests]
201                if self.stash:
202                    for test, target, actual in zip(tests, self.stash[0], results):
203                        assert np.all(abs(target-actual)<2e-5*abs(actual)),\
204                            "expected %s but got %s for %s"%(target, actual, test[0])
205                else:
206                    self.stash.append(results)
207
208                # Check for missing tests.  Only do so for the "dll" tests
209                # to reduce noise from both opencl and dll, and because
210                # python kernels use platform="dll".
211                if self.platform == "dll":
212                    missing = []
213                    ## Uncomment the following to require test cases
214                    #missing = self._find_missing_tests()
215                    if missing:
216                        raise ValueError("Missing tests for "+", ".join(missing))
217
218            except:
219                annotate_exception(self.test_name)
220                raise
221
222        def _find_missing_tests(self):
223            # type: () -> None
224            """make sure there are 1D, 2D, ER and VR tests as appropriate"""
225            model_has_VR = callable(self.info.VR)
226            model_has_ER = callable(self.info.ER)
227            model_has_1D = True
228            model_has_2D = any(p.type == 'orientation'
229                               for p in self.info.parameters.kernel_parameters)
230
231            # Lists of tests that have a result that is not None
232            single = [test for test in self.info.tests
233                      if not isinstance(test[2], list) and test[2] is not None]
234            tests_has_VR = any(test[1] == 'VR' for test in single)
235            tests_has_ER = any(test[1] == 'ER' for test in single)
236            tests_has_1D_single = any(isinstance(test[1], float) for test in single)
237            tests_has_2D_single = any(isinstance(test[1], tuple) for test in single)
238
239            multiple = [test for test in self.info.tests
240                        if isinstance(test[2], list)
241                            and not all(result is None for result in test[2])]
242            tests_has_1D_multiple = any(isinstance(test[1][0], float)
243                                        for test in multiple)
244            tests_has_2D_multiple = any(isinstance(test[1][0], tuple)
245                                        for test in multiple)
246
247            missing = []
248            if model_has_VR and not tests_has_VR:
249                missing.append("VR")
250            if model_has_ER and not tests_has_ER:
251                missing.append("ER")
252            if model_has_1D and not (tests_has_1D_single or tests_has_1D_multiple):
253                missing.append("1D")
254            if model_has_2D and not (tests_has_2D_single or tests_has_2D_multiple):
255                missing.append("2D")
256
257            return missing
258
259        def run_one(self, model, test):
260            # type: (KernelModel, TestCondition) -> None
261            """Run a single test case."""
262            user_pars, x, y = test
263            pars = expand_pars(self.info.parameters, user_pars)
264
265            if not isinstance(y, list):
266                y = [y]
267            if not isinstance(x, list):
268                x = [x]
269
270            self.assertEqual(len(y), len(x))
271
272            if x[0] == 'ER':
273                actual = np.array([call_ER(model.info, pars)])
274            elif x[0] == 'VR':
275                actual = np.array([call_VR(model.info, pars)])
276            elif isinstance(x[0], tuple):
277                qx, qy = zip(*x)
278                q_vectors = [np.array(qx), np.array(qy)]
279                kernel = model.make_kernel(q_vectors)
280                actual = call_kernel(kernel, pars)
281            else:
282                q_vectors = [np.array(x)]
283                kernel = model.make_kernel(q_vectors)
284                actual = call_kernel(kernel, pars)
285
286            self.assertTrue(len(actual) > 0)
287            self.assertEqual(len(y), len(actual))
288
289            for xi, yi, actual_yi in zip(x, y, actual):
290                if yi is None:
291                    # smoke test --- make sure it runs and produces a value
292                    self.assertTrue(not np.isnan(actual_yi),
293                                    'invalid f(%s): %s' % (xi, actual_yi))
294                elif np.isnan(yi):
295                    self.assertTrue(np.isnan(actual_yi),
296                                    'f(%s): expected:%s; actual:%s'
297                                    % (xi, yi, actual_yi))
298                else:
299                    # is_near does not work for infinite values, so also test
300                    # for exact values.  Note that this will not
301                    self.assertTrue(yi == actual_yi or is_near(yi, actual_yi, 5),
302                                    'f(%s); expected:%s; actual:%s'
303                                    % (xi, yi, actual_yi))
304            return actual
305
306    return ModelTestCase
307
308def is_near(target, actual, digits=5):
309    # type: (float, float, int) -> bool
310    """
311    Returns true if *actual* is within *digits* significant digits of *target*.
312    """
313    import math
314    shift = 10**math.ceil(math.log10(abs(target)))
315    return abs(target-actual)/shift < 1.5*10**-digits
316
317def run_one(model):
318    # type: (str) -> None
319    """
320    Run the tests for a single model, printing the results to stdout.
321
322    *model* can by a python file, which is handy for checking user defined
323    plugin models.
324    """
325    # Note that running main() directly did not work from within the
326    # wxPython pycrust console.  Instead of the results appearing in the
327    # window they were printed to the underlying console.
328    from unittest.runner import TextTestResult, _WritelnDecorator
329
330    # Build a object to capture and print the test results
331    stream = _WritelnDecorator(sys.stdout)  # Add writeln() method to stream
332    verbosity = 2
333    descriptions = True
334    result = TextTestResult(stream, descriptions, verbosity)
335
336    # Build a test suite containing just the model
337    loaders = ['opencl'] if core.HAVE_OPENCL else ['dll']
338    models = [model]
339    try:
340        suite = make_suite(loaders, models)
341    except Exception:
342        import traceback
343        stream.writeln(traceback.format_exc())
344        return
345
346    # Run the test suite
347    suite.run(result)
348
349    # Print the failures and errors
350    for _, tb in result.errors:
351        stream.writeln(tb)
352    for _, tb in result.failures:
353        stream.writeln(tb)
354
355    # Warn if there are no user defined tests.
356    # Note: the test suite constructed above only has one test in it, which
357    # runs through some smoke tests to make sure the model runs, then runs
358    # through the input-output pairs given in the model definition file.  To
359    # check if any such pairs are defined, therefore, we just need to check if
360    # they are in the first test of the test suite.  We do this with an
361    # iterator since we don't have direct access to the list of tests in the
362    # test suite.
363    for test in suite:
364        if not test.info.tests:
365            stream.writeln("Note: %s has no user defined tests."%model)
366        break
367    else:
368        stream.writeln("Note: no test suite created --- this should never happen")
369
370
371def main(*models):
372    # type: (*str) -> int
373    """
374    Run tests given is models.
375
376    Returns 0 if success or 1 if any tests fail.
377    """
378    try:
379        from xmlrunner import XMLTestRunner as TestRunner
380        test_args = {'output': 'logs'}
381    except ImportError:
382        from unittest import TextTestRunner as TestRunner
383        test_args = {}
384
385    if models and models[0] == '-v':
386        verbosity = 2
387        models = models[1:]
388    else:
389        verbosity = 1
390    if models and models[0] == 'opencl':
391        if not core.HAVE_OPENCL:
392            print("opencl is not available")
393            return 1
394        loaders = ['opencl']
395        models = models[1:]
396    elif models and models[0] == 'dll':
397        # TODO: test if compiler is available?
398        loaders = ['dll']
399        models = models[1:]
400    elif models and models[0] == 'opencl_and_dll':
401        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
402        models = models[1:]
403    else:
404        loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
405    if not models:
406        print("""\
407usage:
408  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...
409
410If -v is included on the command line, then use verbose output.
411
412If neither opencl nor dll is specified, then models will be tested with
413both OpenCL and dll; the compute target is ignored for pure python models.
414
415If model1 is 'all', then all except the remaining models will be tested.
416
417""")
418
419        return 1
420
421    runner = TestRunner(verbosity=verbosity, **test_args)
422    result = runner.run(make_suite(loaders, models))
423    return 1 if result.failures or result.errors else 0
424
425
426def model_tests():
427    # type: () -> Iterator[Callable[[], None]]
428    """
429    Test runner visible to nosetests.
430
431    Run "nosetests sasmodels" on the command line to invoke it.
432    """
433    loaders = ['opencl', 'dll'] if core.HAVE_OPENCL else ['dll']
434    tests = make_suite(loaders, ['all'])
435    for test_i in tests:
436        # In order for nosetest to see the correct test name, need to set
437        # the description attribute of the returned function.  Since we
438        # can't do this for the returned instance, wrap it in a lambda and
439        # set the description on the lambda.  Otherwise we could just do:
440        #    yield test_i.run_all
441        L = lambda: test_i.run_all()
442        L.description = test_i.test_name
443        yield L
444
445
446if __name__ == "__main__":
447    sys.exit(main(*sys.argv[1:]))
Note: See TracBrowser for help on using the repository browser.