Changeset 1198f90 in sasmodels
- Timestamp:
- May 17, 2018 8:07:02 PM (7 years ago)
- Branches:
- master, core_shell_microgels, magnetic_model, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- df0d2ca
- Parents:
- 33969b6
- Location:
- sasmodels
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
sasmodels/compare.py
r1fbadb2 r1198f90 645 645 646 646 def make_data(opts): 647 # type: (Dict[str, Any] ) -> Tuple[Data, np.ndarray]647 # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] 648 648 """ 649 649 Generate an empty dataset, used with the model to set Q points … … 786 786 base_n, comp_n = opts['count'] 787 787 base_pars, comp_pars = opts['pars'] 788 data = opts['data']788 base_data, comp_data = opts['data'] 789 789 790 790 comparison = comp is not None … … 800 800 print("%s t=%.2f ms, intensity=%.0f" 801 801 % (base.engine, base_time, base_value.sum())) 802 _show_invalid( data, base_value)802 _show_invalid(base_data, base_value) 803 803 except ImportError: 804 804 traceback.print_exc() … … 812 812 print("%s t=%.2f ms, intensity=%.0f" 813 813 % (comp.engine, comp_time, comp_value.sum())) 814 _show_invalid( data, comp_value)814 _show_invalid(base_data, comp_value) 815 815 except ImportError: 816 816 traceback.print_exc() … … 866 866 have_base, have_comp = (base_value is not None), (comp_value is not None) 867 867 base, comp = opts['engines'] 868 data = opts['data']868 base_data, comp_data = opts['data'] 869 869 use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 870 870 … … 884 884 if have_comp: 885 885 plt.subplot(131) 886 plot_theory( data, base_value, view=view, use_data=use_data, limits=limits)886 plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) 887 887 plt.title("%s t=%.2f ms"%(base.engine, base_time)) 888 888 #cbar_title = "log I" … … 891 891 plt.subplot(132) 892 892 if not opts['is2d'] and have_base: 893 plot_theory( data, base_value, view=view, use_data=use_data, limits=limits)894 plot_theory( data, comp_value, view=view, use_data=use_data, limits=limits)893 plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) 894 plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) 895 895 plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 896 896 #cbar_title = "log I" … … 908 908 err[err > cutoff] = cutoff 909 909 #err,errstr = base/comp,"ratio" 910 plot_theory(data, None, resid=err, view=errview, use_data=use_data) 910 # Note: base_data only since base and comp have same q values (though 911 # perhaps different resolution), and we are plotting the difference 912 # at each q 913 plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 911 914 plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 912 915 plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') … … 1075 1078 'qmax' : 0.05, 1076 1079 'nq' : 128, 1077 'res' : 0.0,1080 'res' : '0.0', 1078 1081 'noise' : 0.0, 1079 1082 'accuracy' : 'Low', … … 1115 1118 elif arg.startswith('-q='): 1116 1119 opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 1117 elif arg.startswith('-res='): opts['res'] = float(arg[5:])1120 elif arg.startswith('-res='): opts['res'] = arg[5:] 1118 1121 elif arg.startswith('-noise='): opts['noise'] = float(arg[7:]) 1119 1122 elif arg.startswith('-sets='): opts['sets'] = int(arg[6:]) … … 1173 1176 if opts['qmin'] is None: 1174 1177 opts['qmin'] = 0.001*opts['qmax'] 1175 if opts['datafile'] is not None:1176 data = load_data(os.path.expanduser(opts['datafile']))1177 else:1178 data, _ = make_data(opts)1179 1178 1180 1179 comparison = any(PAR_SPLIT in v for v in values) … … 1216 1215 opts['cutoff'] = [float(opts['cutoff'])]*2 1217 1216 1218 base = make_engine(model_info[0], data, opts['engine'][0], 1217 if PAR_SPLIT in opts['res']: 1218 opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 1219 comparison = True 1220 else: 1221 opts['res'] = [float(opts['res'])]*2 1222 1223 if opts['datafile'] is not None: 1224 data = load_data(os.path.expanduser(opts['datafile'])) 1225 else: 1226 # Hack around the fact that make_data doesn't take a pair of resolutions 1227 res = opts['res'] 1228 opts['res'] = res[0] 1229 data0, _ = make_data(opts) 1230 if res[0] != res[1]: 1231 opts['res'] = res[1] 1232 data1, _ = make_data(opts) 1233 else: 1234 data1 = data0 1235 opts['res'] = res 1236 data = data0, data1 1237 1238 base = make_engine(model_info[0], data[0], opts['engine'][0], 1219 1239 opts['cutoff'][0], opts['ngauss'][0]) 1220 1240 if comparison: 1221 comp = make_engine(model_info[1], data , opts['engine'][1],1241 comp = make_engine(model_info[1], data[1], opts['engine'][1], 1222 1242 opts['cutoff'][1], opts['ngauss'][1]) 1223 1243 else: -
sasmodels/jitter.py
rb3703f5 r1198f90 774 774 # set small jitter as 0 if multiple pd dims 775 775 dims = sum(v > 0 for v in jitter) 776 limit = [0, 0 , 0.5, 5][dims]776 limit = [0, 0.5, 5][dims] 777 777 jitter = [0 if v < limit else v for v in jitter] 778 778 axes.cla() -
sasmodels/resolution.py
r2d81cfe r1198f90 20 20 MINIMUM_RESOLUTION = 1e-8 21 21 MINIMUM_ABSOLUTE_Q = 0.02 # relative to the minimum q in the data 22 PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC 22 23 23 24 class Resolution(object): … … 65 66 *q_calc* is the list of points to calculate, or None if this should 66 67 be estimated from the *q* and *q_width*. 67 """ 68 def __init__(self, q, q_width, q_calc=None, nsigma=3): 68 69 *nsigma* is the width of the resolution function. Should be 2.5. 70 See :func:`pinhole_resolution` for details. 71 """ 72 def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 69 73 #*min_step* is the minimum point spacing to use when computing the 70 74 #underlying model. It should be on the order of … … 88 92 # Build weight matrix from calculated q values 89 93 self.weight_matrix = pinhole_resolution( 90 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION)) 94 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION), 95 nsigma=nsigma) 91 96 self.q_calc = abs(self.q_calc) 92 97 … … 154 159 155 160 156 def pinhole_resolution(q_calc, q, q_width ):157 """161 def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA): 162 r""" 158 163 Compute the convolution matrix *W* for pinhole resolution 1-D data. 159 164 … … 162 167 *W*, the resolution smearing can be computed using *dot(W,q)*. 163 168 169 Note that resolution is limited to $\pm 2.5 \sigma$.[1] The true resolution 170 function is a broadened triangle, and does not extend over the entire 171 range $(-\infty, +\infty)$. It is important to impose this limitation 172 since some models fall so steeply that the weighted value in gaussian 173 tails would otherwise dominate the integral. 174 164 175 *q_calc* must be increasing. *q_width* must be greater than zero. 176 177 [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects 178 in Radially Symmetric Small-Angle Neutron Scattering by Numerical and 179 Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14. 180 https://doi.org/10.1107/S0021889894010095. 165 181 """ 166 182 # The current algorithm is a midpoint rectangle rule. In the test case, … … 170 186 cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 171 187 weights = cdf[1:] - cdf[:-1] 188 # Limit q range to +/- 2.5 sigma 189 weights[q_calc[:, None] < (q - nsigma*q_width)[None, :]] = 0. 190 weights[q_calc[:, None] > (q + nsigma*q_width)[None, :]] = 0. 172 191 weights /= np.sum(weights, axis=0)[None, :] 173 192 return weights
Note: See TracChangeset
for help on using the changeset viewer.