Changes in / [c6084f1:275b07dc] in sasmodels


Ignore:
Location:
sasmodels
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • sasmodels/kernelpy.py

    r91bd550 r91bd550  
    3737        self.info = model_info 
    3838        self.dtype = np.dtype('d') 
    39         logger.info("make python model " + self.info.name) 
    4039 
    4140    def make_kernel(self, q_vectors): 
  • sasmodels/model_test.py

    r12eec1e r012cd34  
    4747import sys 
    4848import unittest 
    49 import traceback 
    5049 
    5150try: 
     
    7574# pylint: enable=unused-import 
    7675 
     76 
    7777def make_suite(loaders, models): 
    7878    # type: (List[str], List[str]) -> unittest.TestSuite 
     
    8686    *models* is the list of models to test, or *["all"]* to test all models. 
    8787    """ 
     88    ModelTestCase = _hide_model_case_from_nose() 
    8889    suite = unittest.TestSuite() 
    8990 
     
    9495        skip = [] 
    9596    for model_name in models: 
    96         if model_name not in skip: 
    97             model_info = load_model_info(model_name) 
    98             _add_model_to_suite(loaders, suite, model_info) 
     97        if model_name in skip: 
     98            continue 
     99        model_info = load_model_info(model_name) 
     100 
     101        #print('------') 
     102        #print('found tests in', model_name) 
     103        #print('------') 
     104 
     105        # if ispy then use the dll loader to call pykernel 
     106        # don't try to call cl kernel since it will not be 
     107        # available in some environmentes. 
     108        is_py = callable(model_info.Iq) 
     109 
     110        # Some OpenCL drivers seem to be flaky, and are not producing the 
     111        # expected result.  Since we don't have known test values yet for 
     112        # all of our models, we are instead going to compare the results 
     113        # for the 'smoke test' (that is, evaluation at q=0.1 for the default 
     114        # parameters just to see that the model runs to completion) between 
     115        # the OpenCL and the DLL.  To do this, we define a 'stash' which is 
     116        # shared between OpenCL and DLL tests.  This is just a list.  If the 
     117        # list is empty (which it will be when DLL runs, if the DLL runs 
     118        # first), then the results are appended to the list.  If the list 
     119        # is not empty (which it will be when OpenCL runs second), the results 
     120        # are compared to the results stored in the first element of the list. 
     121        # This is a horrible stateful hack which only makes sense because the 
     122        # test suite is thrown away after being run once. 
     123        stash = [] 
     124 
     125        if is_py:  # kernel implemented in python 
     126            test_name = "%s-python"%model_name 
     127            test_method_name = "test_%s_python" % model_info.id 
     128            test = ModelTestCase(test_name, model_info, 
     129                                 test_method_name, 
     130                                 platform="dll",  # so that 
     131                                 dtype="double", 
     132                                 stash=stash) 
     133            suite.addTest(test) 
     134        else:   # kernel implemented in C 
     135 
     136            # test using dll if desired 
     137            if 'dll' in loaders or not use_opencl(): 
     138                test_name = "%s-dll"%model_name 
     139                test_method_name = "test_%s_dll" % model_info.id 
     140                test = ModelTestCase(test_name, model_info, 
     141                                     test_method_name, 
     142                                     platform="dll", 
     143                                     dtype="double", 
     144                                     stash=stash) 
     145                suite.addTest(test) 
     146 
     147            # test using opencl if desired and available 
     148            if 'opencl' in loaders and use_opencl(): 
     149                test_name = "%s-opencl"%model_name 
     150                test_method_name = "test_%s_opencl" % model_info.id 
     151                # Using dtype=None so that the models that are only 
     152                # correct for double precision are not tested using 
     153                # single precision.  The choice is determined by the 
     154                # presence of *single=False* in the model file. 
     155                test = ModelTestCase(test_name, model_info, 
     156                                     test_method_name, 
     157                                     platform="ocl", dtype=None, 
     158                                     stash=stash) 
     159                #print("defining", test_name) 
     160                suite.addTest(test) 
    99161 
    100162    return suite 
    101  
    102 def _add_model_to_suite(loaders, suite, model_info): 
    103     ModelTestCase = _hide_model_case_from_nose() 
    104  
    105     #print('------') 
    106     #print('found tests in', model_name) 
    107     #print('------') 
    108  
    109     # if ispy then use the dll loader to call pykernel 
    110     # don't try to call cl kernel since it will not be 
    111     # available in some environmentes. 
    112     is_py = callable(model_info.Iq) 
    113  
    114     # Some OpenCL drivers seem to be flaky, and are not producing the 
    115     # expected result.  Since we don't have known test values yet for 
    116     # all of our models, we are instead going to compare the results 
    117     # for the 'smoke test' (that is, evaluation at q=0.1 for the default 
    118     # parameters just to see that the model runs to completion) between 
    119     # the OpenCL and the DLL.  To do this, we define a 'stash' which is 
    120     # shared between OpenCL and DLL tests.  This is just a list.  If the 
    121     # list is empty (which it will be when DLL runs, if the DLL runs 
    122     # first), then the results are appended to the list.  If the list 
    123     # is not empty (which it will be when OpenCL runs second), the results 
    124     # are compared to the results stored in the first element of the list. 
    125     # This is a horrible stateful hack which only makes sense because the 
    126     # test suite is thrown away after being run once. 
    127     stash = [] 
    128  
    129     if is_py:  # kernel implemented in python 
    130         test_name = "%s-python"%model_info.name 
    131         test_method_name = "test_%s_python" % model_info.id 
    132         test = ModelTestCase(test_name, model_info, 
    133                                 test_method_name, 
    134                                 platform="dll",  # so that 
    135                                 dtype="double", 
    136                                 stash=stash) 
    137         suite.addTest(test) 
    138     else:   # kernel implemented in C 
    139  
    140         # test using dll if desired 
    141         if 'dll' in loaders or not use_opencl(): 
    142             test_name = "%s-dll"%model_info.name 
    143             test_method_name = "test_%s_dll" % model_info.id 
    144             test = ModelTestCase(test_name, model_info, 
    145                                     test_method_name, 
    146                                     platform="dll", 
    147                                     dtype="double", 
    148                                     stash=stash) 
    149             suite.addTest(test) 
    150  
    151         # test using opencl if desired and available 
    152         if 'opencl' in loaders and use_opencl(): 
    153             test_name = "%s-opencl"%model_info.name 
    154             test_method_name = "test_%s_opencl" % model_info.id 
    155             # Using dtype=None so that the models that are only 
    156             # correct for double precision are not tested using 
    157             # single precision.  The choice is determined by the 
    158             # presence of *single=False* in the model file. 
    159             test = ModelTestCase(test_name, model_info, 
    160                                     test_method_name, 
    161                                     platform="ocl", dtype=None, 
    162                                     stash=stash) 
    163             #print("defining", test_name) 
    164             suite.addTest(test) 
    165  
    166163 
    167164def _hide_model_case_from_nose(): 
     
    351348    return abs(target-actual)/shift < 1.5*10**-digits 
    352349 
    353 # CRUFT: old interface; should be deprecated and removed 
    354 def run_one(model_name): 
    355     # msg = "use check_model(model_info) rather than run_one(model_name)" 
    356     # warnings.warn(msg, category=DeprecationWarning, stacklevel=2) 
    357     try: 
    358         model_info = load_model_info(model_name) 
    359     except Exception: 
    360         output = traceback.format_exc() 
    361         return output 
    362  
    363     success, output = check_model(model_info) 
    364     return output 
    365  
    366 def check_model(model_info): 
    367     # type: (ModelInfo) -> str 
    368     """ 
    369     Run the tests for a single model, capturing the output. 
    370  
    371     Returns success status and the output string. 
     350def run_one(model): 
     351    # type: (str) -> str 
     352    """ 
     353    Run the tests for a single model, printing the results to stdout. 
     354 
     355    *model* can by a python file, which is handy for checking user defined 
     356    plugin models. 
    372357    """ 
    373358    # Note that running main() directly did not work from within the 
     
    384369    # Build a test suite containing just the model 
    385370    loaders = ['opencl'] if use_opencl() else ['dll'] 
    386     suite = unittest.TestSuite() 
    387     _add_model_to_suite(loaders, suite, model_info) 
     371    models = [model] 
     372    try: 
     373        suite = make_suite(loaders, models) 
     374    except Exception: 
     375        import traceback 
     376        stream.writeln(traceback.format_exc()) 
     377        return 
    388378 
    389379    # Warn if there are no user defined tests. 
     
    400390    for test in suite: 
    401391        if not test.info.tests: 
    402             stream.writeln("Note: %s has no user defined tests."%model_info.name) 
     392            stream.writeln("Note: %s has no user defined tests."%model) 
    403393        break 
    404394    else: 
     
    416406    output = stream.getvalue() 
    417407    stream.close() 
    418     return result.wasSuccessful(), output 
     408    return output 
    419409 
    420410 
  • sasmodels/sasview_model.py

    rbd547d0 rbd547d0  
    803803            return value, [value], [1.0] 
    804804 
    805     @classmethod 
    806     def runTests(cls): 
    807         """ 
    808         Run any tests built into the model and captures the test output. 
    809  
    810         Returns success flag and output 
    811         """ 
    812         from .model_test import check_model 
    813         return check_model(cls._model_info) 
    814  
    815805def test_cylinder(): 
    816806    # type: () -> float 
Note: See TracChangeset for help on using the changeset viewer.