source: sasmodels/sasmodels/compare_many.py @ 9a66e65

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 9a66e65 was 9a66e65, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

refactor sasmodels to sasview parameter conversion

  • Property mode set to 100755
File size: 6.5 KB
Line 
1#!/usr/bin/env python
2import sys
3import traceback
4
5import numpy as np
6
7from . import core
8from .kernelcl import environment
9from .compare import (MODELS, randomize_model, suppress_pd, eval_sasview,
10                      eval_opencl, eval_ctypes, make_data, get_demo_pars,
11                      columnize, constrain_pars, constrain_new_to_old)
12
13def calc_stats(target, value, index):
14    resid = abs(value-target)[index]
15    relerr = resid/target[index]
16    srel = np.argsort(relerr)
17    #p90 = int(len(relerr)*0.90)
18    p95 = int(len(relerr)*0.95)
19    maxrel = np.max(relerr)
20    rel95 = relerr[srel[p95]]
21    maxabs = np.max(resid[srel[p95:]])
22    maxval = np.max(value[srel[p95:]])
23    return maxrel,rel95,maxabs,maxval
24
25def print_column_headers(pars, parts):
26    stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|'))
27    groups = ['']
28    for p in parts:
29        groups.append(p)
30        groups.extend(['']*(len(stats)-1))
31    groups.append("Parameters")
32    columns = ['Seed'] + stats*len(parts) +  list(sorted(pars.keys()))
33    print(','.join('"%s"'%c for c in groups))
34    print(','.join('"%s"'%c for c in columns))
35
36def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5,
37                     precision='double'):
38    model_definition = core.load_model_definition(name)
39    pars = get_demo_pars(model_definition)
40    header = '\n"Model","%s","Count","%d"'%(name, N)
41    if not mono: header += ',"Cutoff",%g'%(cutoff,)
42    print(header)
43
44    # Some not very clean macros for evaluating the models and checking the
45    # results.  They freely use variables from the current scope, even some
46    # which have not been defined yet, complete with abuse of mutable lists
47    # to allow them to update values in the current scope since nonlocal
48    # declarations are not available in python 2.7.
49    def try_model(fn, *args, **kw):
50        try:
51            result, _ = fn(model_definition, pars_i, data, *args, **kw)
52        except KeyboardInterrupt:
53            raise
54        except:
55            traceback.print_exc()
56            print("when comparing %s for %d"%(name, seed))
57            if hasattr(data, 'qx_data'):
58                result = np.NaN*data.data
59            else:
60                result = np.NaN*data.x
61        return result
62    def check_model(label, target, value, acceptable):
63        stats = calc_stats(target, value, index)
64        columns.extend(stats)
65        labels.append('GPU single')
66        max_diff[0] = max(max_diff[0], stats[0])
67        good[0] = good[0] and (stats[0] < acceptable)
68
69    num_good = 0
70    first = True
71    max_diff = [0]
72    for k in range(N):
73        print("%s %d"%(name, k))
74        pars_i, seed = randomize_model(pars)
75        constrain_pars(model_definition, pars_i)
76        constrain_new_to_old(model_definition, pars_i)
77        if mono: suppress_pd(pars_i)
78
79        good = [True]
80        labels = []
81        columns = []
82        target = try_model(eval_sasview)
83        #target = try_model(eval_ctypes, dtype='double', cutoff=0.)
84        #target = try_model(eval_ctypes, dtype='longdouble', cutoff=0.)
85        if precision == 'single':
86            value = try_model(eval_opencl, dtype='single', cutoff=cutoff)
87            check_model('GPU single', target, value, 5e-5)
88            single_value = value  # remember for single/double comparison
89        elif precision == 'double':
90            if environment().has_type('double'):
91                label = 'GPU double'
92                value = try_model(eval_opencl, dtype='double', cutoff=cutoff)
93            else:
94                label = 'CPU double'
95                value = try_model(eval_ctypes, dtype='double', cutoff=cutoff)
96            check_model(label, target, value, 5e-14)
97            double_value = value  # remember for single/double comparison
98        elif precision == 'quad':
99            value = try_model(eval_opencl, dtype='longdouble', cutoff=cutoff)
100            check_model('CPU quad', target, value, 5e-14)
101        if 0:
102            check_model('single/double', double_value, single_value, 5e-5)
103
104        columns += [v for _,v in sorted(pars_i.items())]
105        if first:
106            print_column_headers(pars_i, labels)
107            first = False
108        if good[0]:
109            num_good += 1
110        else:
111            print(("%d,"%seed)+','.join("%g"%v for v in columns))
112    print('"good","%d/%d","max diff",%g'%(num_good, N, max_diff[0]))
113
114
115def print_usage():
116    print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)")
117
118
119def print_models():
120    print(columnize(MODELS, indent="  "))
121
122
123def print_help():
124    print_usage()
125    print("""\
126
127MODEL is the model name of the model or "all" for all the models
128in alphabetical order.
129
130COUNT is the number of randomly generated parameter sets to try. A value
131of "10000" is a reasonable check for monodisperse models, or "100" for
132polydisperse models.   For a quick check, use "100" and "5" respectively.
133
134NQ is the number of Q values to calculate.  If it starts with "1d", then
135it is a 1-dimensional problem, with log spaced Q points from 1e-3 to 1.0.
136If it starts with "2d" then it is a 2-dimensional problem, with linearly
137spaced points Q points from -1.0 to 1.0 in each dimension. The usual
138values are "1d100" for 1-D and "2d32" for 2-D.
139
140CUTOFF is the cutoff value to use for the polydisperse distribution. Weights
141below the cutoff will be ignored.  Use "mono" for monodisperse models.  The
142choice of polydisperse parameters, and the number of points in the distribution
143is set in compare.py defaults for each model.
144
145PRECISION is the floating point precision to use for comparisons.
146
147Available models:
148""")
149    print_models()
150
151def main():
152    if len(sys.argv) != 6:
153        print_help()
154        sys.exit(1)
155
156    model = sys.argv[1]
157    if not (model in MODELS) and (model != "all"):
158        print('Bad model %s.  Use "all" or one of:')
159        print_models()
160        sys.exit(1)
161    try:
162        count = int(sys.argv[2])
163        is2D = sys.argv[3].startswith('2d')
164        assert sys.argv[3][1] == 'd'
165        Nq = int(sys.argv[3][2:])
166        mono = sys.argv[4] == 'mono'
167        cutoff = float(sys.argv[4]) if not mono else 0
168        precision = sys.argv[5]
169    except:
170        traceback.print_exc()
171        print_usage()
172        sys.exit(1)
173
174    data, index = make_data(qmax=1.0, is2D=is2D, Nq=Nq)
175    model_list = [model] if model != "all" else MODELS
176    for model in model_list:
177        compare_instance(model, data, index, N=count, mono=mono,
178                         cutoff=cutoff, precision=precision)
179
180if __name__ == "__main__":
181    main()
Note: See TracBrowser for help on using the repository browser.