Changes in / [052d4c5:f89ec96] in sasmodels
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/guide/magnetism/magnetism.rst
rbefe905 r4f5afc9 39 39 40 40 .. math:: 41 -- &= ( 1-u_i)(1-u_f)\\42 -+ &= ( 1-u_i)(u_f)\\43 +- &= ( u_i)(1-u_f)\\44 ++ &= ( u_i)(u_f)41 -- &= ((1-u_i)(1-u_f))^{1/4} \\ 42 -+ &= ((1-u_i)(u_f))^{1/4} \\ 43 +- &= ((u_i)(1-u_f))^{1/4} \\ 44 ++ &= ((u_i)(u_f))^{1/4} 45 45 46 46 Ideally the experiment would measure the pure spin states independently and … … 104 104 | 2015-05-02 Steve King 105 105 | 2017-11-15 Paul Kienzle 106 | 2018-06-02 Adam Washington -
doc/guide/plugin.rst
rf796469 r7e6bc45e 822 822 823 823 :code:`source = ["lib/Si.c", ...]` 824 (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/ sas_Si.c>`_)824 (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/Si.c>`_) 825 825 826 826 sas_3j1x_x(x): -
sasmodels/compare.py
r65fbf7c r1fbadb2 645 645 646 646 def make_data(opts): 647 # type: (Dict[str, Any] , float) -> Tuple[Data, np.ndarray]647 # type: (Dict[str, Any]) -> Tuple[Data, np.ndarray] 648 648 """ 649 649 Generate an empty dataset, used with the model to set Q points … … 667 667 if opts['zero']: 668 668 q = np.hstack((0, q)) 669 # TODO: provide command line control of lambda and Delta lambda/lambda 670 #L, dLoL = 5, 0.14/np.sqrt(6) # wavelength and 14% triangular FWHM 671 L, dLoL = 0, 0 672 data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) 669 data = empty_data1D(q, resolution=res) 673 670 index = slice(None, None) 674 671 return data, index … … 789 786 base_n, comp_n = opts['count'] 790 787 base_pars, comp_pars = opts['pars'] 791 base_data, comp_data = opts['data']788 data = opts['data'] 792 789 793 790 comparison = comp is not None … … 803 800 print("%s t=%.2f ms, intensity=%.0f" 804 801 % (base.engine, base_time, base_value.sum())) 805 _show_invalid( base_data, base_value)802 _show_invalid(data, base_value) 806 803 except ImportError: 807 804 traceback.print_exc() … … 815 812 print("%s t=%.2f ms, intensity=%.0f" 816 813 % (comp.engine, comp_time, comp_value.sum())) 817 _show_invalid( base_data, comp_value)814 _show_invalid(data, comp_value) 818 815 except ImportError: 819 816 traceback.print_exc() … … 869 866 have_base, have_comp = (base_value is not None), (comp_value is not None) 870 867 base, comp = opts['engines'] 871 base_data, comp_data = opts['data']868 data = opts['data'] 872 869 use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 873 870 874 871 # Plot if requested 875 872 view = opts['view'] 876 #view = 'log'877 873 if limits is None: 878 874 vmin, vmax = np.inf, -np.inf … … 888 884 if have_comp: 889 885 plt.subplot(131) 890 plot_theory( base_data, base_value, view=view, use_data=use_data, limits=limits)886 plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 891 887 plt.title("%s t=%.2f ms"%(base.engine, base_time)) 892 888 #cbar_title = "log I" … … 895 891 plt.subplot(132) 896 892 if not opts['is2d'] and have_base: 897 plot_theory( comp_data, base_value, view=view, use_data=use_data, limits=limits)898 plot_theory( comp_data, comp_value, view=view, use_data=use_data, limits=limits)893 plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 894 plot_theory(data, comp_value, view=view, use_data=use_data, limits=limits) 899 895 plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 900 896 #cbar_title = "log I" … … 912 908 err[err > cutoff] = cutoff 913 909 #err,errstr = base/comp,"ratio" 914 # Note: base_data only since base and comp have same q values (though 915 # perhaps different resolution), and we are plotting the difference 916 # at each q 917 plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 910 plot_theory(data, None, resid=err, view=errview, use_data=use_data) 918 911 plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 919 912 plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') … … 1082 1075 'qmax' : 0.05, 1083 1076 'nq' : 128, 1084 'res' : '0.0',1077 'res' : 0.0, 1085 1078 'noise' : 0.0, 1086 1079 'accuracy' : 'Low', … … 1122 1115 elif arg.startswith('-q='): 1123 1116 opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 1124 elif arg.startswith('-res='): opts['res'] = arg[5:]1117 elif arg.startswith('-res='): opts['res'] = float(arg[5:]) 1125 1118 elif arg.startswith('-noise='): opts['noise'] = float(arg[7:]) 1126 1119 elif arg.startswith('-sets='): opts['sets'] = int(arg[6:]) … … 1180 1173 if opts['qmin'] is None: 1181 1174 opts['qmin'] = 0.001*opts['qmax'] 1175 if opts['datafile'] is not None: 1176 data = load_data(os.path.expanduser(opts['datafile'])) 1177 else: 1178 data, _ = make_data(opts) 1182 1179 1183 1180 comparison = any(PAR_SPLIT in v for v in values) … … 1219 1216 opts['cutoff'] = [float(opts['cutoff'])]*2 1220 1217 1221 if PAR_SPLIT in opts['res']: 1222 opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 1223 comparison = True 1224 else: 1225 opts['res'] = [float(opts['res'])]*2 1226 1227 if opts['datafile'] is not None: 1228 data = load_data(os.path.expanduser(opts['datafile'])) 1229 else: 1230 # Hack around the fact that make_data doesn't take a pair of resolutions 1231 res = opts['res'] 1232 opts['res'] = res[0] 1233 data0, _ = make_data(opts) 1234 if res[0] != res[1]: 1235 opts['res'] = res[1] 1236 data1, _ = make_data(opts) 1237 else: 1238 data1 = data0 1239 opts['res'] = res 1240 data = data0, data1 1241 1242 base = make_engine(model_info[0], data[0], opts['engine'][0], 1218 base = make_engine(model_info[0], data, opts['engine'][0], 1243 1219 opts['cutoff'][0], opts['ngauss'][0]) 1244 1220 if comparison: 1245 comp = make_engine(model_info[1], data [1], opts['engine'][1],1221 comp = make_engine(model_info[1], data, opts['engine'][1], 1246 1222 opts['cutoff'][1], opts['ngauss'][1]) 1247 1223 else: -
sasmodels/data.py
r65fbf7c rd86f0fc 36 36 37 37 import numpy as np # type: ignore 38 from numpy import sqrt, sin, cos, pi39 38 40 39 # pylint: disable=unused-import … … 302 301 303 302 304 def empty_data1D(q, resolution=0.0 , L=0., dL=0.):303 def empty_data1D(q, resolution=0.0): 305 304 # type: (np.ndarray, float) -> Data1D 306 r"""305 """ 307 306 Create empty 1D data using the given *q* as the x value. 308 307 309 rms *resolution* $\Delta q/q$ defaults to 0%. If wavelength *L* and rms 310 wavelength divergence *dL* are defined, then *resolution* defines 311 rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from 312 $q = 4\pi/\lambda \sin(\theta)$. 308 *resolution* dq/q defaults to 5%. 313 309 """ 314 310 … … 317 313 Iq, dIq = None, None 318 314 q = np.asarray(q) 319 if L != 0 and resolution != 0: 320 theta = np.arcsin(q*L/(4*pi)) 321 dtheta = theta[0]*resolution 322 ## Solving Gaussian error propagation from 323 ## Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 324 ## gives 325 ## (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 326 ## Take the square root and multiply by q, giving 327 ## Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 328 dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 329 else: 330 dq = resolution * q 331 332 data = Data1D(q, Iq, dx=dq, dy=dIq) 315 data = Data1D(q, Iq, dx=resolution * q, dy=dIq) 333 316 data.filename = "fake data" 334 317 return data -
sasmodels/jitter.py
r1198f90 rb3703f5 774 774 # set small jitter as 0 if multiple pd dims 775 775 dims = sum(v > 0 for v in jitter) 776 limit = [0, 0 .5, 5][dims]776 limit = [0, 0, 0.5, 5][dims] 777 777 jitter = [0 if v < limit else v for v in jitter] 778 778 axes.cla() -
sasmodels/kernel_iq.c
r7c35fda rdc6f601 83 83 in_spin = clip(in_spin, 0.0, 1.0); 84 84 out_spin = clip(out_spin, 0.0, 1.0); 85 // Previous version of this function took the square root of the weights, 86 // under the assumption that 87 // 85 // Note: sasview 3.1 scaled all slds by sqrt(weight) and assumed that 88 86 // w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) 89 // 90 // However, since the weights are applied to the final intensity and 91 // are not interned inside the I(q) function, we want the full 92 // weight and not the square root. Any function using 93 // set_spin_weights as part of calculating an amplitude will need to 94 // manually take that square root, but there is currently no such 95 // function. 96 weight[0] = (1.0-in_spin) * (1.0-out_spin); // dd 97 weight[1] = (1.0-in_spin) * out_spin; // du 98 weight[2] = in_spin * (1.0-out_spin); // ud 99 weight[3] = in_spin * out_spin; // uu 87 // which is likely to be the case for simple models. 88 weight[0] = sqrt((1.0-in_spin) * (1.0-out_spin)); // dd 89 weight[1] = sqrt((1.0-in_spin) * out_spin); // du.real 90 weight[2] = sqrt(in_spin * (1.0-out_spin)); // ud.real 91 weight[3] = sqrt(in_spin * out_spin); // uu 100 92 weight[4] = weight[1]; // du.imag 101 93 weight[5] = weight[2]; // ud.imag -
sasmodels/models/mass_surface_fractal.py
r7994359 r2d81cfe 39 39 The surface ( $D_s$ ) and mass ( $D_m$ ) fractal dimensions are only 40 40 valid if $0 < surface\_dim < 6$ , $0 < mass\_dim < 6$ , and 41 $(surface\_dim + mass\_dim ) < 6$ . 42 Older versions of sasview may have the default primary particle radius 43 larger than the cluster radius, this was an error, also present in the 44 Schmidt review paper below. The primary particle should be the smaller 45 as described in the original Hurd et.al. who also point out that 46 polydispersity in the primary particle sizes may affect their 47 apparent surface fractal dimension. 48 41 $(surface\_dim + mass\_dim ) < 6$ . 42 49 43 50 44 References 51 45 ---------- 52 46 53 .. [#] P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 54 .. [#] A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 55 35 (1987) 2361-2364 Equation(2) 47 P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 56 48 57 Authorship and Verification 58 ---------------------------- 59 60 * **Converted to sasmodels by:** Piotr Rozyczko **Date:** Jan 20, 2016 61 * **Last Reviewed by:** Richard Heenan **Date:** May 30, 2018 49 A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 50 35 (1987) 2361-2364 Equation(2) 62 51 """ 63 52 … … 78 67 rg_primary = rg 79 68 background = background 69 Ref: Schmidt, J Appl Cryst, eq(19), (1991), 24, 414-435 80 70 Hurd, Schaefer, Martin, Phys Rev A, eq(2),(1987),35, 2361-2364 81 71 Note that 0 < Ds< 6 and 0 < Dm < 6. … … 88 78 ["fractal_dim_mass", "", 1.8, [0.0, 6.0], "", "Mass fractal dimension"], 89 79 ["fractal_dim_surf", "", 2.3, [0.0, 6.0], "", "Surface fractal dimension"], 90 ["rg_cluster", "Ang", 4000., [0.0, inf], "", "Cluster radius of gyration"],91 ["rg_primary", "Ang", 86.7, [0.0, inf], "", "Primary particle radius of gyration"],80 ["rg_cluster", "Ang", 86.7, [0.0, inf], "", "Cluster radius of gyration"], 81 ["rg_primary", "Ang", 4000., [0.0, inf], "", "Primary particle radius of gyration"], 92 82 ] 93 83 # pylint: enable=bad-whitespace, line-too-long … … 117 107 fractal_dim_mass=1.8, 118 108 fractal_dim_surf=2.3, 119 rg_cluster= 4000.0,120 rg_primary= 86.7)109 rg_cluster=86.7, 110 rg_primary=4000.0) 121 111 122 112 tests = [ 123 113 124 # Accuracy tests based on content in test/utest_other_models.py All except first, changed so rg_cluster is the larger, RKH 30 May 2018125 [{'fractal_dim_mass': 1.8,114 # Accuracy tests based on content in test/utest_other_models.py 115 [{'fractal_dim_mass': 1.8, 126 116 'fractal_dim_surf': 2.3, 127 117 'rg_cluster': 86.7, … … 133 123 [{'fractal_dim_mass': 3.3, 134 124 'fractal_dim_surf': 1.0, 135 'rg_cluster': 4000.0,136 'rg_primary': 90.0,137 }, 0.001, 0. 0932516614456],125 'rg_cluster': 90.0, 126 'rg_primary': 4000.0, 127 }, 0.001, 0.18562699016], 138 128 139 129 [{'fractal_dim_mass': 1.3, 140 'fractal_dim_surf': 2.0,141 'rg_cluster': 2000.0,142 'rg_primary': 90.0,130 'fractal_dim_surf': 1.0, 131 'rg_cluster': 90.0, 132 'rg_primary': 2000.0, 143 133 'background': 0.8, 144 }, 0.001, 1. 28296431786],134 }, 0.001, 1.16539753641], 145 135 146 136 [{'fractal_dim_mass': 2.3, 147 'fractal_dim_surf': 3.1,148 'rg_cluster': 1000.0,149 'rg_primary': 30.0,137 'fractal_dim_surf': 1.0, 138 'rg_cluster': 90.0, 139 'rg_primary': 1000.0, 150 140 'scale': 10.0, 151 141 'background': 0.0, 152 }, 0.051, 0.00 333804044899],142 }, 0.051, 0.000169548800377], 153 143 ] -
sasmodels/resolution.py
r0b9c6df r2d81cfe 20 20 MINIMUM_RESOLUTION = 1e-8 21 21 MINIMUM_ABSOLUTE_Q = 0.02 # relative to the minimum q in the data 22 PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC23 22 24 23 class Resolution(object): … … 66 65 *q_calc* is the list of points to calculate, or None if this should 67 66 be estimated from the *q* and *q_width*. 68 69 *nsigma* is the width of the resolution function. Should be 2.5. 70 See :func:`pinhole_resolution` for details. 71 """ 72 def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 67 """ 68 def __init__(self, q, q_width, q_calc=None, nsigma=3): 73 69 #*min_step* is the minimum point spacing to use when computing the 74 70 #underlying model. It should be on the order of … … 86 82 87 83 # Protect against models which are not defined for very low q. Limit 88 # the smallest q value evaluated to 0.02*min. Note that negative q 89 # values are trimmed even for broad resolution. Although not possible 90 # from the geometry, they may appear since we are using a truncated 91 # gaussian to represent resolution rather than a skew distribution. 84 # the smallest q value evaluated (in absolute) to 0.02*min 92 85 cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q) 93 self.q_calc = self.q_calc[ self.q_calc>= cutoff]86 self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff] 94 87 95 88 # Build weight matrix from calculated q values 96 89 self.weight_matrix = pinhole_resolution( 97 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION) ,98 nsigma=nsigma)90 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION)) 91 self.q_calc = abs(self.q_calc) 99 92 100 93 def apply(self, theory): … … 108 101 *q* points at which the data is measured. 109 102 110 * qx_width* slit width in qx111 112 * qy_width* slit height in qy103 *dqx* slit width in qx 104 105 *dqy* slit height in qy 113 106 114 107 *q_calc* is the list of points to calculate, or None if this should … … 161 154 162 155 163 def pinhole_resolution(q_calc, q, q_width , nsigma=PINHOLE_N_SIGMA):164 r"""156 def pinhole_resolution(q_calc, q, q_width): 157 """ 165 158 Compute the convolution matrix *W* for pinhole resolution 1-D data. 166 159 … … 169 162 *W*, the resolution smearing can be computed using *dot(W,q)*. 170 163 171 Note that resolution is limited to $\pm 2.5 \sigma$.[1] The true resolution172 function is a broadened triangle, and does not extend over the entire173 range $(-\infty, +\infty)$. It is important to impose this limitation174 since some models fall so steeply that the weighted value in gaussian175 tails would otherwise dominate the integral.176 177 164 *q_calc* must be increasing. *q_width* must be greater than zero. 178 179 [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects180 in Radially Symmetric Small-Angle Neutron Scattering by Numerical and181 Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14.182 https://doi.org/10.1107/S0021889894010095.183 165 """ 184 166 # The current algorithm is a midpoint rectangle rule. In the test case, … … 188 170 cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 189 171 weights = cdf[1:] - cdf[:-1] 190 # Limit q range to +/- 2.5 sigma191 qhigh = q + nsigma*q_width192 #qlow = q - nsigma*q_width # linear limits193 qlow = q*q/qhigh # log limits194 weights[q_calc[:, None] < qlow[None, :]] = 0.195 weights[q_calc[:, None] > qhigh[None, :]] = 0.196 172 weights /= np.sum(weights, axis=0)[None, :] 197 173 return weights … … 518 494 519 495 520 def gaussian(q, q0, dq , nsigma=2.5):521 """ 522 Return the truncatedGaussian resolution function.496 def gaussian(q, q0, dq): 497 """ 498 Return the Gaussian resolution function. 523 499 524 500 *q0* is the center, *dq* is the width and *q* are the points to evaluate. 525 501 """ 526 # Calculate the density of the tails; the resulting gaussian needs to be 527 # scaled by this amount in ordere to integrate to 1.0 528 two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2 529 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density) 502 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 530 503 531 504 … … 585 558 586 559 587 def romberg_pinhole_1d(q, q_width, form, pars, nsigma= 2.5):560 def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5): 588 561 """ 589 562 Romberg integration for pinhole resolution. … … 705 678 np.testing.assert_equal(output, self.y) 706 679 707 # TODO: turn pinhole/slit demos into tests708 709 @unittest.skip("suppress comparison with old version; pinhole calc changed")710 680 def test_pinhole(self): 711 681 """ … … 716 686 theory = 12.0-1000.0*resolution.q_calc 717 687 output = resolution.apply(theory) 718 # Note: answer came from output of previous run. Non-integer719 # values at ends come from the fact that q_calc does not720 # extend beyond q, and so the weights don't balance.721 688 answer = [ 722 10.4 7037734, 9.86925860,723 9., 8., 7., 6., 5., 4.,724 3.13074140, 2.52962266,689 10.44785079, 9.84991299, 8.98101708, 690 7.99906585, 6.99998311, 6.00001689, 691 5.00093415, 4.01898292, 3.15008701, 2.55214921, 725 692 ] 726 693 np.testing.assert_allclose(output, answer, atol=1e-8) … … 765 732 self._compare(q, output, answer, 1e-6) 766 733 767 @unittest.skip("suppress comparison with old version; pinhole calc changed")768 734 def test_pinhole(self): 769 735 """ … … 780 746 self._compare(q, output, answer, 3e-4) 781 747 782 @unittest.skip("suppress comparison with old version; pinhole calc changed")783 748 def test_pinhole_romberg(self): 784 749 """ … … 796 761 # 2*np.pi/pars['radius']/200) 797 762 #tol = 0.001 798 ## The default 2.5sigma and no extra points gets 1%763 ## The default 3 sigma and no extra points gets 1% 799 764 q_calc = None # type: np.ndarray 800 765 tol = 0.01 … … 1115 1080 1116 1081 if isinstance(resolution, Slit1D): 1117 width, height = resolution. qx_width, resolution.qy_width1082 width, height = resolution.dqx, resolution.dqy 1118 1083 Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 1119 1084 else:
Note: See TracChangeset
for help on using the changeset viewer.