source: sasmodels/sasmodels/compare_many.py @ f4f3919

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since f4f3919 was f4f3919, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

improve performance timing by suppressing startup time

  • Property mode set to 100755
File size: 6.5 KB
Line 
1#!/usr/bin/env python
2import sys
3import traceback
4
5import numpy as np
6
7from . import core
8from .kernelcl import environment
9from .compare import (MODELS, randomize_model, suppress_pd, eval_sasview,
10                      eval_opencl, eval_ctypes, make_data, get_demo_pars,
11                      columnize, constrain_pars, constrain_new_to_old)
12
13def calc_stats(target, value, index):
14    resid = abs(value-target)[index]
15    relerr = resid/target[index]
16    srel = np.argsort(relerr)
17    #p90 = int(len(relerr)*0.90)
18    p95 = int(len(relerr)*0.95)
19    maxrel = np.max(relerr)
20    rel95 = relerr[srel[p95]]
21    maxabs = np.max(resid[srel[p95:]])
22    maxval = np.max(value[srel[p95:]])
23    return maxrel,rel95,maxabs,maxval
24
25def print_column_headers(pars, parts):
26    stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|'))
27    groups = ['']
28    for p in parts:
29        groups.append(p)
30        groups.extend(['']*(len(stats)-1))
31    groups.append("Parameters")
32    columns = ['Seed'] + stats*len(parts) +  list(sorted(pars.keys()))
33    print(','.join('"%s"'%c for c in groups))
34    print(','.join('"%s"'%c for c in columns))
35
36def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5,
37                     precision='double'):
38    model_definition = core.load_model_definition(name)
39    pars = get_demo_pars(model_definition)
40    header = '\n"Model","%s","Count","%d"'%(name, N)
41    if not mono: header += ',"Cutoff",%g'%(cutoff,)
42    print(header)
43
44    # Some not very clean macros for evaluating the models and checking the
45    # results.  They freely use variables from the current scope, even some
46    # which have not been defined yet, complete with abuse of mutable lists
47    # to allow them to update values in the current scope since nonlocal
48    # declarations are not available in python 2.7.
49    def try_model(fn, *args, **kw):
50        try:
51            result, _ = fn(model_definition, pars_i, data, *args, **kw)
52        except KeyboardInterrupt:
53            raise
54        except:
55            traceback.print_exc()
56            print("when comparing %s for %d"%(name, seed))
57            if hasattr(data, 'qx_data'):
58                result = np.NaN*data.data
59            else:
60                result = np.NaN*data.x
61        return result
62    def check_model(label, target, value, acceptable):
63        stats = calc_stats(target, value, index)
64        columns.extend(stats)
65        labels.append('GPU single')
66        max_diff[0] = max(max_diff[0], stats[0])
67        good[0] = good[0] and (stats[0] < acceptable)
68
69    num_good = 0
70    first = True
71    max_diff = [0]
72    for k in range(N):
73        print("%s %d"%(name, k))
74        pars_i, seed = randomize_model(pars)
75        constrain_pars(model_definition, pars_i)
76        constrain_new_to_old(model_definition, pars_i)
77        if mono:
78            pars_i = suppress_pd(pars_i)
79
80        good = [True]
81        labels = []
82        columns = []
83        target = try_model(eval_sasview)
84        #target = try_model(eval_ctypes, dtype='double', cutoff=0.)
85        #target = try_model(eval_ctypes, dtype='longdouble', cutoff=0.)
86        if precision == 'single':
87            value = try_model(eval_opencl, dtype='single', cutoff=cutoff)
88            check_model('GPU single', target, value, 5e-5)
89            single_value = value  # remember for single/double comparison
90        elif precision == 'double':
91            if environment().has_type('double'):
92                label = 'GPU double'
93                value = try_model(eval_opencl, dtype='double', cutoff=cutoff)
94            else:
95                label = 'CPU double'
96                value = try_model(eval_ctypes, dtype='double', cutoff=cutoff)
97            check_model(label, target, value, 5e-14)
98            double_value = value  # remember for single/double comparison
99        elif precision == 'quad':
100            value = try_model(eval_opencl, dtype='longdouble', cutoff=cutoff)
101            check_model('CPU quad', target, value, 5e-14)
102        if 0:
103            check_model('single/double', double_value, single_value, 5e-5)
104
105        columns += [v for _,v in sorted(pars_i.items())]
106        if first:
107            print_column_headers(pars_i, labels)
108            first = False
109        if good[0]:
110            num_good += 1
111        else:
112            print(("%d,"%seed)+','.join("%g"%v for v in columns))
113    print('"good","%d/%d","max diff",%g'%(num_good, N, max_diff[0]))
114
115
116def print_usage():
117    print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)")
118
119
120def print_models():
121    print(columnize(MODELS, indent="  "))
122
123
124def print_help():
125    print_usage()
126    print("""\
127
128MODEL is the model name of the model or "all" for all the models
129in alphabetical order.
130
131COUNT is the number of randomly generated parameter sets to try. A value
132of "10000" is a reasonable check for monodisperse models, or "100" for
133polydisperse models.   For a quick check, use "100" and "5" respectively.
134
135NQ is the number of Q values to calculate.  If it starts with "1d", then
136it is a 1-dimensional problem, with log spaced Q points from 1e-3 to 1.0.
137If it starts with "2d" then it is a 2-dimensional problem, with linearly
138spaced points Q points from -1.0 to 1.0 in each dimension. The usual
139values are "1d100" for 1-D and "2d32" for 2-D.
140
141CUTOFF is the cutoff value to use for the polydisperse distribution. Weights
142below the cutoff will be ignored.  Use "mono" for monodisperse models.  The
143choice of polydisperse parameters, and the number of points in the distribution
144is set in compare.py defaults for each model.
145
146PRECISION is the floating point precision to use for comparisons.
147
148Available models:
149""")
150    print_models()
151
152def main():
153    if len(sys.argv) != 6:
154        print_help()
155        sys.exit(1)
156
157    model = sys.argv[1]
158    if not (model in MODELS) and (model != "all"):
159        print('Bad model %s.  Use "all" or one of:')
160        print_models()
161        sys.exit(1)
162    try:
163        count = int(sys.argv[2])
164        is2D = sys.argv[3].startswith('2d')
165        assert sys.argv[3][1] == 'd'
166        Nq = int(sys.argv[3][2:])
167        mono = sys.argv[4] == 'mono'
168        cutoff = float(sys.argv[4]) if not mono else 0
169        precision = sys.argv[5]
170    except:
171        traceback.print_exc()
172        print_usage()
173        sys.exit(1)
174
175    data, index = make_data(qmax=1.0, is2D=is2D, Nq=Nq)
176    model_list = [model] if model != "all" else MODELS
177    for model in model_list:
178        compare_instance(model, data, index, N=count, mono=mono,
179                         cutoff=cutoff, precision=precision)
180
181if __name__ == "__main__":
182    main()
Note: See TracBrowser for help on using the repository browser.