source: sasmodels/sasmodels/compare_many.py @ 9404dd3

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 9404dd3 was 9404dd3, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

python 3.x support

  • Property mode set to 100755
File size: 6.4 KB
Line 
1#!/usr/bin/env python
2import sys
3import traceback
4
5import numpy as np
6
7from . import core
8from .kernelcl import environment
9from .compare import (MODELS, randomize_model, suppress_pd, eval_sasview,
10                      eval_opencl, eval_ctypes, make_data, get_demo_pars,
11                      columnize, constrain_pars)
12
13def calc_stats(target, value, index):
14    resid = abs(value-target)[index]
15    relerr = resid/target[index]
16    srel = np.argsort(relerr)
17    #p90 = int(len(relerr)*0.90)
18    p95 = int(len(relerr)*0.95)
19    maxrel = np.max(relerr)
20    rel95 = relerr[srel[p95]]
21    maxabs = np.max(resid[srel[p95:]])
22    maxval = np.max(value[srel[p95:]])
23    return maxrel,rel95,maxabs,maxval
24
25def print_column_headers(pars, parts):
26    stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|'))
27    groups = ['']
28    for p in parts:
29        groups.append(p)
30        groups.extend(['']*(len(stats)-1))
31    groups.append("Parameters")
32    columns = ['Seed'] + stats*len(parts) +  list(sorted(pars.keys()))
33    print(','.join('"%s"'%c for c in groups))
34    print(','.join('"%s"'%c for c in columns))
35
36def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5,
37                     precision='double'):
38    model_definition = core.load_model_definition(name)
39    pars = get_demo_pars(model_definition)
40    header = '\n"Model","%s","Count","%d"'%(name, N)
41    if not mono: header += ',"Cutoff",%g'%(cutoff,)
42    print(header)
43
44    # Some not very clean macros for evaluating the models and checking the
45    # results.  They freely use variables from the current scope, even some
46    # which have not been defined yet, complete with abuse of mutable lists
47    # to allow them to update values in the current scope since nonlocal
48    # declarations are not available in python 2.7.
49    def try_model(fn, *args, **kw):
50        try:
51            result, _ = fn(model_definition, pars_i, data, *args, **kw)
52        except KeyboardInterrupt:
53            raise
54        except:
55            traceback.print_exc()
56            print("when comparing %s for %d"%(name, seed))
57            if hasattr(data, 'qx_data'):
58                result = np.NaN*data.data
59            else:
60                result = np.NaN*data.x
61        return result
62    def check_model(label, target, value, acceptable):
63        stats = calc_stats(target, value, index)
64        columns.extend(stats)
65        labels.append('GPU single')
66        max_diff[0] = max(max_diff[0], stats[0])
67        good[0] = good[0] and (stats[0] < acceptable)
68
69    num_good = 0
70    first = True
71    max_diff = [0]
72    for k in range(N):
73        print("%s %d"%(name, k))
74        pars_i, seed = randomize_model(pars)
75        constrain_pars(model_definition, pars_i)
76        if mono: suppress_pd(pars_i)
77
78        good = [True]
79        labels = []
80        columns = []
81        target = try_model(eval_sasview)
82        #target = try_model(eval_ctypes, dtype='double', cutoff=0.)
83        #target = try_model(eval_ctypes, dtype='longdouble', cutoff=0.)
84        if precision == 'single':
85            value = try_model(eval_opencl, dtype='single', cutoff=cutoff)
86            check_model('GPU single', target, value, 5e-5)
87            single_value = value  # remember for single/double comparison
88        elif precision == 'double':
89            if environment().has_double:
90                label = 'GPU double'
91                value = try_model(eval_opencl, dtype='double', cutoff=cutoff)
92            else:
93                label = 'CPU double'
94                value = try_model(eval_ctypes, dtype='double', cutoff=cutoff)
95            check_model(label, target, value, 5e-14)
96            double_value = value  # remember for single/double comparison
97        elif precision == 'quad':
98            value = try_model(eval_opencl, dtype='longdouble', cutoff=cutoff)
99            check_model('CPU quad', target, value, 5e-14)
100        if 0:
101            check_model('single/double', double_value, single_value, 5e-5)
102
103        columns += [v for _,v in sorted(pars_i.items())]
104        if first:
105            print_column_headers(pars_i, labels)
106            first = False
107        if good[0]:
108            num_good += 1
109        else:
110            print(("%d,"%seed)+','.join("%g"%v for v in columns))
111    print('"good","%d/%d","max diff",%g'%(num_good, N, max_diff[0]))
112
113
114def print_usage():
115    print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)")
116
117
118def print_models():
119    print(columnize(MODELS, indent="  "))
120
121
122def print_help():
123    print_usage()
124    print("""\
125
126MODEL is the model name of the model or "all" for all the models
127in alphabetical order.
128
129COUNT is the number of randomly generated parameter sets to try. A value
130of "10000" is a reasonable check for monodisperse models, or "100" for
131polydisperse models.   For a quick check, use "100" and "5" respectively.
132
133NQ is the number of Q values to calculate.  If it starts with "1d", then
134it is a 1-dimensional problem, with log spaced Q points from 1e-3 to 1.0.
135If it starts with "2d" then it is a 2-dimensional problem, with linearly
136spaced points Q points from -1.0 to 1.0 in each dimension. The usual
137values are "1d100" for 1-D and "2d32" for 2-D.
138
139CUTOFF is the cutoff value to use for the polydisperse distribution. Weights
140below the cutoff will be ignored.  Use "mono" for monodisperse models.  The
141choice of polydisperse parameters, and the number of points in the distribution
142is set in compare.py defaults for each model.
143
144PRECISION is the floating point precision to use for comparisons.
145
146Available models:
147""")
148    print_models()
149
150def main():
151    if len(sys.argv) != 6:
152        print_help()
153        sys.exit(1)
154
155    model = sys.argv[1]
156    if not (model in MODELS) and (model != "all"):
157        print('Bad model %s.  Use "all" or one of:')
158        print_models()
159        sys.exit(1)
160    try:
161        count = int(sys.argv[2])
162        is2D = sys.argv[3].startswith('2d')
163        assert sys.argv[3][1] == 'd'
164        Nq = int(sys.argv[3][2:])
165        mono = sys.argv[4] == 'mono'
166        cutoff = float(sys.argv[4]) if not mono else 0
167        precision = sys.argv[5]
168    except:
169        traceback.print_exc()
170        print_usage()
171        sys.exit(1)
172
173    data, index = make_data(qmax=1.0, is2D=is2D, Nq=Nq)
174    model_list = [model] if model != "all" else MODELS
175    for model in model_list:
176        compare_instance(model, data, index, N=count, mono=mono,
177                         cutoff=cutoff, precision=precision)
178
179if __name__ == "__main__":
180    main()
Note: See TracBrowser for help on using the repository browser.