Changes in / [f796469:3fc4097] in sasmodels
- Location:
- sasmodels
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
sasmodels/compare.py
r1fbadb2 r65fbf7c 645 645 646 646 def make_data(opts): 647 # type: (Dict[str, Any] ) -> Tuple[Data, np.ndarray]647 # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] 648 648 """ 649 649 Generate an empty dataset, used with the model to set Q points … … 667 667 if opts['zero']: 668 668 q = np.hstack((0, q)) 669 data = empty_data1D(q, resolution=res) 669 # TODO: provide command line control of lambda and Delta lambda/lambda 670 #L, dLoL = 5, 0.14/np.sqrt(6) # wavelength and 14% triangular FWHM 671 L, dLoL = 0, 0 672 data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) 670 673 index = slice(None, None) 671 674 return data, index … … 786 789 base_n, comp_n = opts['count'] 787 790 base_pars, comp_pars = opts['pars'] 788 data = opts['data']791 base_data, comp_data = opts['data'] 789 792 790 793 comparison = comp is not None … … 800 803 print("%s t=%.2f ms, intensity=%.0f" 801 804 % (base.engine, base_time, base_value.sum())) 802 _show_invalid( data, base_value)805 _show_invalid(base_data, base_value) 803 806 except ImportError: 804 807 traceback.print_exc() … … 812 815 print("%s t=%.2f ms, intensity=%.0f" 813 816 % (comp.engine, comp_time, comp_value.sum())) 814 _show_invalid( data, comp_value)817 _show_invalid(base_data, comp_value) 815 818 except ImportError: 816 819 traceback.print_exc() … … 866 869 have_base, have_comp = (base_value is not None), (comp_value is not None) 867 870 base, comp = opts['engines'] 868 data = opts['data']871 base_data, comp_data = opts['data'] 869 872 use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 870 873 871 874 # Plot if requested 872 875 view = opts['view'] 876 #view = 'log' 873 877 if limits is None: 874 878 vmin, vmax = np.inf, -np.inf … … 884 888 if have_comp: 885 889 plt.subplot(131) 886 plot_theory( data, base_value, view=view, use_data=use_data, limits=limits)890 plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) 887 891 plt.title("%s t=%.2f ms"%(base.engine, base_time)) 888 892 #cbar_title = "log I" … … 891 895 plt.subplot(132) 892 896 if not opts['is2d'] and have_base: 893 plot_theory( data, base_value, view=view, use_data=use_data, limits=limits)894 plot_theory( data, comp_value, view=view, use_data=use_data, limits=limits)897 plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) 898 plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) 895 899 plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 896 900 #cbar_title = "log I" … … 908 912 err[err > cutoff] = cutoff 909 913 #err,errstr = base/comp,"ratio" 910 plot_theory(data, None, resid=err, view=errview, use_data=use_data) 914 # Note: base_data only since base and comp have same q values (though 915 # perhaps different resolution), and we are plotting the difference 916 # at each q 917 plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 911 918 plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 912 919 plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') … … 1075 1082 'qmax' : 0.05, 1076 1083 'nq' : 128, 1077 'res' : 0.0,1084 'res' : '0.0', 1078 1085 'noise' : 0.0, 1079 1086 'accuracy' : 'Low', … … 1115 1122 elif arg.startswith('-q='): 1116 1123 opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 1117 elif arg.startswith('-res='): opts['res'] = float(arg[5:])1124 elif arg.startswith('-res='): opts['res'] = arg[5:] 1118 1125 elif arg.startswith('-noise='): opts['noise'] = float(arg[7:]) 1119 1126 elif arg.startswith('-sets='): opts['sets'] = int(arg[6:]) … … 1173 1180 if opts['qmin'] is None: 1174 1181 opts['qmin'] = 0.001*opts['qmax'] 1175 if opts['datafile'] is not None:1176 data = load_data(os.path.expanduser(opts['datafile']))1177 else:1178 data, _ = make_data(opts)1179 1182 1180 1183 comparison = any(PAR_SPLIT in v for v in values) … … 1216 1219 opts['cutoff'] = [float(opts['cutoff'])]*2 1217 1220 1218 base = make_engine(model_info[0], data, opts['engine'][0], 1221 if PAR_SPLIT in opts['res']: 1222 opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 1223 comparison = True 1224 else: 1225 opts['res'] = [float(opts['res'])]*2 1226 1227 if opts['datafile'] is not None: 1228 data = load_data(os.path.expanduser(opts['datafile'])) 1229 else: 1230 # Hack around the fact that make_data doesn't take a pair of resolutions 1231 res = opts['res'] 1232 opts['res'] = res[0] 1233 data0, _ = make_data(opts) 1234 if res[0] != res[1]: 1235 opts['res'] = res[1] 1236 data1, _ = make_data(opts) 1237 else: 1238 data1 = data0 1239 opts['res'] = res 1240 data = data0, data1 1241 1242 base = make_engine(model_info[0], data[0], opts['engine'][0], 1219 1243 opts['cutoff'][0], opts['ngauss'][0]) 1220 1244 if comparison: 1221 comp = make_engine(model_info[1], data , opts['engine'][1],1245 comp = make_engine(model_info[1], data[1], opts['engine'][1], 1222 1246 opts['cutoff'][1], opts['ngauss'][1]) 1223 1247 else: -
sasmodels/data.py
rd86f0fc r65fbf7c 36 36 37 37 import numpy as np # type: ignore 38 from numpy import sqrt, sin, cos, pi 38 39 39 40 # pylint: disable=unused-import … … 301 302 302 303 303 def empty_data1D(q, resolution=0.0 ):304 def empty_data1D(q, resolution=0.0, L=0., dL=0.): 304 305 # type: (np.ndarray, float) -> Data1D 305 """306 r""" 306 307 Create empty 1D data using the given *q* as the x value. 307 308 308 *resolution* dq/q defaults to 5%. 309 rms *resolution* $\Delta q/q$ defaults to 0%. If wavelength *L* and rms 310 wavelength divergence *dL* are defined, then *resolution* defines 311 rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from 312 $q = 4\pi/\lambda \sin(\theta)$. 309 313 """ 310 314 … … 313 317 Iq, dIq = None, None 314 318 q = np.asarray(q) 315 data = Data1D(q, Iq, dx=resolution * q, dy=dIq) 319 if L != 0 and resolution != 0: 320 theta = np.arcsin(q*L/(4*pi)) 321 dtheta = theta[0]*resolution 322 ## Solving Gaussian error propagation from 323 ## Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 324 ## gives 325 ## (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 326 ## Take the square root and multiply by q, giving 327 ## Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 328 dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 329 else: 330 dq = resolution * q 331 332 data = Data1D(q, Iq, dx=dq, dy=dIq) 316 333 data.filename = "fake data" 317 334 return data -
sasmodels/jitter.py
rb3703f5 r1198f90 774 774 # set small jitter as 0 if multiple pd dims 775 775 dims = sum(v > 0 for v in jitter) 776 limit = [0, 0 , 0.5, 5][dims]776 limit = [0, 0.5, 5][dims] 777 777 jitter = [0 if v < limit else v for v in jitter] 778 778 axes.cla() -
sasmodels/resolution.py
r2d81cfe rbbb887d 20 20 MINIMUM_RESOLUTION = 1e-8 21 21 MINIMUM_ABSOLUTE_Q = 0.02 # relative to the minimum q in the data 22 PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC 22 23 23 24 class Resolution(object): … … 65 66 *q_calc* is the list of points to calculate, or None if this should 66 67 be estimated from the *q* and *q_width*. 67 """ 68 def __init__(self, q, q_width, q_calc=None, nsigma=3): 68 69 *nsigma* is the width of the resolution function. Should be 2.5. 70 See :func:`pinhole_resolution` for details. 71 """ 72 def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 69 73 #*min_step* is the minimum point spacing to use when computing the 70 74 #underlying model. It should be on the order of … … 82 86 83 87 # Protect against models which are not defined for very low q. Limit 84 # the smallest q value evaluated (in absolute) to 0.02*min 88 # the smallest q value evaluated to 0.02*min. Note that negative q 89 # values are trimmed even for broad resolution. Although not possible 90 # from the geometry, they may appear since we are using a truncated 91 # gaussian to represent resolution rather than a skew distribution. 85 92 cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q) 86 self.q_calc = self.q_calc[ abs(self.q_calc)>= cutoff]93 self.q_calc = self.q_calc[self.q_calc >= cutoff] 87 94 88 95 # Build weight matrix from calculated q values 89 96 self.weight_matrix = pinhole_resolution( 90 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION) )91 self.q_calc = abs(self.q_calc)97 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION), 98 nsigma=nsigma) 92 99 93 100 def apply(self, theory): … … 101 108 *q* points at which the data is measured. 102 109 103 * dqx* slit width in qx104 105 * dqy* slit height in qy110 *qx_width* slit width in qx 111 112 *qy_width* slit height in qy 106 113 107 114 *q_calc* is the list of points to calculate, or None if this should … … 154 161 155 162 156 def pinhole_resolution(q_calc, q, q_width ):157 """163 def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA): 164 r""" 158 165 Compute the convolution matrix *W* for pinhole resolution 1-D data. 159 166 … … 162 169 *W*, the resolution smearing can be computed using *dot(W,q)*. 163 170 171 Note that resolution is limited to $\pm 2.5 \sigma$.[1] The true resolution 172 function is a broadened triangle, and does not extend over the entire 173 range $(-\infty, +\infty)$. It is important to impose this limitation 174 since some models fall so steeply that the weighted value in gaussian 175 tails would otherwise dominate the integral. 176 164 177 *q_calc* must be increasing. *q_width* must be greater than zero. 178 179 [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects 180 in Radially Symmetric Small-Angle Neutron Scattering by Numerical and 181 Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14. 182 https://doi.org/10.1107/S0021889894010095. 165 183 """ 166 184 # The current algorithm is a midpoint rectangle rule. In the test case, … … 170 188 cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 171 189 weights = cdf[1:] - cdf[:-1] 190 # Limit q range to +/- 2.5 sigma 191 qhigh = q + nsigma*q_width 192 #qlow = q - nsigma*q_width # linear limits 193 qlow = q*q/qhigh # log limits 194 weights[q_calc[:, None] < qlow[None, :]] = 0. 195 weights[q_calc[:, None] > qhigh[None, :]] = 0. 172 196 weights /= np.sum(weights, axis=0)[None, :] 173 197 return weights … … 494 518 495 519 496 def gaussian(q, q0, dq ):497 """ 498 Return the Gaussian resolution function.520 def gaussian(q, q0, dq, nsigma=2.5): 521 """ 522 Return the truncated Gaussian resolution function. 499 523 500 524 *q0* is the center, *dq* is the width and *q* are the points to evaluate. 501 525 """ 502 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 526 # Calculate the density of the tails; the resulting gaussian needs to be 527 # scaled by this amount in ordere to integrate to 1.0 528 two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2 529 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density) 503 530 504 531 … … 558 585 559 586 560 def romberg_pinhole_1d(q, q_width, form, pars, nsigma= 5):587 def romberg_pinhole_1d(q, q_width, form, pars, nsigma=2.5): 561 588 """ 562 589 Romberg integration for pinhole resolution. … … 678 705 np.testing.assert_equal(output, self.y) 679 706 707 # TODO: turn pinhole/slit demos into tests 708 680 709 def test_pinhole(self): 681 710 """ … … 686 715 theory = 12.0-1000.0*resolution.q_calc 687 716 output = resolution.apply(theory) 717 # Note: answer came from output of previous run. Non-integer 718 # values at ends come from the fact that q_calc does not 719 # extend beyond q, and so the weights don't balance. 688 720 answer = [ 689 10.4 4785079, 9.84991299, 8.98101708,690 7.99906585, 6.99998311, 6.00001689,691 5.00093415, 4.01898292, 3.15008701, 2.55214921,721 10.47037734, 9.86925860, 722 9., 8., 7., 6., 5., 4., 723 3.13074140, 2.52962266, 692 724 ] 693 725 np.testing.assert_allclose(output, answer, atol=1e-8) … … 732 764 self._compare(q, output, answer, 1e-6) 733 765 766 @unittest.skip("suppress comparison with old version; pinhole calc changed") 734 767 def test_pinhole(self): 735 768 """ … … 746 779 self._compare(q, output, answer, 3e-4) 747 780 781 @unittest.skip("suppress comparison with old version; pinhole calc changed") 748 782 def test_pinhole_romberg(self): 749 783 """ … … 761 795 # 2*np.pi/pars['radius']/200) 762 796 #tol = 0.001 763 ## The default 3sigma and no extra points gets 1%797 ## The default 2.5 sigma and no extra points gets 1% 764 798 q_calc = None # type: np.ndarray 765 799 tol = 0.01 … … 1080 1114 1081 1115 if isinstance(resolution, Slit1D): 1082 width, height = resolution. dqx, resolution.dqy1116 width, height = resolution.qx_width, resolution.qy_width 1083 1117 Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 1084 1118 else:
Note: See TracChangeset
for help on using the changeset viewer.