Changeset 319ab14 in sasmodels for sasmodels/compare_many.py
- Timestamp:
- Nov 25, 2015 11:12:06 AM (9 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 0fa687d
- Parents:
- 38d8774
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
sasmodels/compare_many.py
rb514adf r319ab14 11 11 columnize, constrain_pars) 12 12 13 def get_stats(target, value, index):13 def calc_stats(target, value, index): 14 14 resid = abs(value-target)[index] 15 15 relerr = resid/target[index] … … 34 34 print(','.join('"%s"'%c for c in columns)) 35 35 36 def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5): 36 def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5, 37 precision='double'): 37 38 model_definition = core.load_model_definition(name) 38 39 pars = get_demo_pars(model_definition) … … 41 42 print(header) 42 43 43 def trymodel(fn, *args, **kw): 44 # Some not very clean macros for evaluating the models and checking the 45 # results. They freely use variables from the current scope, even some 46 # which have not been defined yet, complete with abuse of mutable lists 47 # to allow them to update values in the current scope since nonlocal 48 # declarations are not available in python 2.7. 49 def try_model(fn, *args, **kw): 44 50 try: 45 51 result, _ = fn(model_definition, pars_i, data, *args, **kw) … … 54 60 result = np.NaN*data.x 55 61 return result 62 def check_model(label, target, value, acceptable): 63 stats = calc_stats(target, value, index) 64 columns.extend(stats) 65 labels.append('GPU single') 66 max_diff[0] = max(max_diff[0], stats[0]) 67 good[0] = good[0] and (stats[0] < acceptable) 56 68 57 69 num_good = 0 58 70 first = True 59 max_diff = 071 max_diff = [0] 60 72 for k in range(N): 61 73 print >>sys.stderr, name, k … … 64 76 if mono: suppress_pd(pars_i) 65 77 66 good = True78 good = [True] 67 79 labels = [] 68 80 columns = [] 69 if 1: 70 sasview_value = trymodel(eval_sasview) 81 #target = try_model(eval_sasview) 82 target = try_model(eval_opencl, dtype='longdouble', cutoff=cutoff) 83 if precision == 'single': 84 value = try_model(eval_opencl, dtype='single', cutoff=cutoff) 85 check_model('GPU single', target, value, 5e-5) 86 single_value = value # remember for single/double comparison 87 elif precision == 'double': 88 if environment().has_double: 89 label = 'GPU double' 90 value = try_model(eval_opencl, dtype='double', cutoff=cutoff) 91 else: 92 label = 'CPU double' 93 value = try_model(eval_ctypes, dtype='double', cutoff=cutoff) 94 check_model(label, target, value, 5e-14) 95 double_value = value # remember for single/double comparison 96 elif precision == 'quad': 97 value = try_model(eval_opencl, dtype='longdouble', cutoff=cutoff) 98 check_model('CPU quad', target, value, 5e-14) 71 99 if 0: 72 gpu_single_value = trymodel(eval_opencl, dtype='single', cutoff=cutoff) 73 stats = get_stats(sasview_value, gpu_single_value, index) 74 columns.extend(stats) 75 labels.append('GPU single') 76 max_diff = max(max_diff, stats[0]) 77 good = good and (stats[0] < 5e-5) 78 if 0 and environment().has_double: 79 gpu_double_value = trymodel(eval_opencl, dtype='double', cutoff=cutoff) 80 stats = get_stats(sasview_value, gpu_double_value, index) 81 columns.extend(stats) 82 labels.append('GPU double') 83 max_diff = max(max_diff, stats[0]) 84 good = good and (stats[0] < 1e-12) 85 if 1: 86 cpu_double_value = trymodel(eval_ctypes, dtype='double', cutoff=cutoff) 87 stats = get_stats(sasview_value, cpu_double_value, index) 88 columns.extend(stats) 89 labels.append('CPU double') 90 max_diff = max(max_diff, stats[0]) 91 good = good and (stats[0] < 1e-12) 92 if 0: 93 stats = get_stats(cpu_double_value, gpu_single_value, index) 94 columns.extend(stats) 95 labels.append('single/double') 96 max_diff = max(max_diff, stats[0]) 97 good = good and (stats[0] < 5e-5) 100 check_model('single/double', double_value, single_value, 5e-5) 98 101 99 102 columns += [v for _,v in sorted(pars_i.items())] … … 101 104 print_column_headers(pars_i, labels) 102 105 first = False 103 if good :106 if good[0]: 104 107 num_good += 1 105 108 else: 106 109 print(("%d,"%seed)+','.join("%g"%v for v in columns)) 107 print '"good","%d/%d","max diff",%g'%(num_good, N, max_diff )110 print '"good","%d/%d","max diff",%g'%(num_good, N, max_diff[0]) 108 111 109 112 110 113 def print_usage(): 111 print "usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) "114 print "usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)" 112 115 113 116 … … 138 141 is set in compare.py defaults for each model. 139 142 143 PRECISION is the floating point precision to use for comparisons. 144 140 145 Available models: 141 146 """) … … 143 148 144 149 def main(): 145 if len(sys.argv) == 1:150 if len(sys.argv) != 6: 146 151 print_help() 147 152 sys.exit(1) … … 159 164 mono = sys.argv[4] == 'mono' 160 165 cutoff = float(sys.argv[4]) if not mono else 0 166 precision = sys.argv[5] 161 167 except: 168 traceback.print_exc() 162 169 print_usage() 163 170 sys.exit(1) … … 166 173 model_list = [model] if model != "all" else MODELS 167 174 for model in model_list: 168 compare_instance(model, data, index, N=count, mono=mono, cutoff=cutoff) 175 compare_instance(model, data, index, N=count, mono=mono, 176 cutoff=cutoff, precision=precision) 169 177 170 178 if __name__ == "__main__":
Note: See TracChangeset
for help on using the changeset viewer.