Changeset 3fc4097 in sasmodels
- Timestamp:
- Jun 10, 2018 8:41:02 PM (7 years ago)
- Branches:
- master, core_shell_microgels, magnetic_model, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 0b9c6df
- Parents:
- f796469 (diff), bbb887d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - git-author:
- Paul Butler <butlerpd@…> (06/10/18 20:41:02)
- git-committer:
- GitHub <noreply@…> (06/10/18 20:41:02)
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
sasmodels/compare.py
r1fbadb2 r65fbf7c 645 645 646 646 def make_data(opts): 647 # type: (Dict[str, Any] ) -> Tuple[Data, np.ndarray]647 # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] 648 648 """ 649 649 Generate an empty dataset, used with the model to set Q points … … 667 667 if opts['zero']: 668 668 q = np.hstack((0, q)) 669 data = empty_data1D(q, resolution=res) 669 # TODO: provide command line control of lambda and Delta lambda/lambda 670 #L, dLoL = 5, 0.14/np.sqrt(6) # wavelength and 14% triangular FWHM 671 L, dLoL = 0, 0 672 data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) 670 673 index = slice(None, None) 671 674 return data, index … … 786 789 base_n, comp_n = opts['count'] 787 790 base_pars, comp_pars = opts['pars'] 788 data = opts['data']791 base_data, comp_data = opts['data'] 789 792 790 793 comparison = comp is not None … … 800 803 print("%s t=%.2f ms, intensity=%.0f" 801 804 % (base.engine, base_time, base_value.sum())) 802 _show_invalid( data, base_value)805 _show_invalid(base_data, base_value) 803 806 except ImportError: 804 807 traceback.print_exc() … … 812 815 print("%s t=%.2f ms, intensity=%.0f" 813 816 % (comp.engine, comp_time, comp_value.sum())) 814 _show_invalid( data, comp_value)817 _show_invalid(base_data, comp_value) 815 818 except ImportError: 816 819 traceback.print_exc() … … 866 869 have_base, have_comp = (base_value is not None), (comp_value is not None) 867 870 base, comp = opts['engines'] 868 data = opts['data']871 base_data, comp_data = opts['data'] 869 872 use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 870 873 871 874 # Plot if requested 872 875 view = opts['view'] 876 #view = 'log' 873 877 if limits is None: 874 878 vmin, vmax = np.inf, -np.inf … … 884 888 if have_comp: 885 889 plt.subplot(131) 886 plot_theory( data, base_value, view=view, use_data=use_data, limits=limits)890 plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) 887 891 plt.title("%s t=%.2f ms"%(base.engine, base_time)) 888 892 #cbar_title = "log I" … … 891 895 plt.subplot(132) 892 896 if not opts['is2d'] and have_base: 893 plot_theory( data, base_value, view=view, use_data=use_data, limits=limits)894 plot_theory( data, comp_value, view=view, use_data=use_data, limits=limits)897 plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) 898 plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) 895 899 plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 896 900 #cbar_title = "log I" … … 908 912 err[err > cutoff] = cutoff 909 913 #err,errstr = base/comp,"ratio" 910 plot_theory(data, None, resid=err, view=errview, use_data=use_data) 914 # Note: base_data only since base and comp have same q values (though 915 # perhaps different resolution), and we are plotting the difference 916 # at each q 917 plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 911 918 plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 912 919 plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') … … 1075 1082 'qmax' : 0.05, 1076 1083 'nq' : 128, 1077 'res' : 0.0,1084 'res' : '0.0', 1078 1085 'noise' : 0.0, 1079 1086 'accuracy' : 'Low', … … 1115 1122 elif arg.startswith('-q='): 1116 1123 opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 1117 elif arg.startswith('-res='): opts['res'] = float(arg[5:])1124 elif arg.startswith('-res='): opts['res'] = arg[5:] 1118 1125 elif arg.startswith('-noise='): opts['noise'] = float(arg[7:]) 1119 1126 elif arg.startswith('-sets='): opts['sets'] = int(arg[6:]) … … 1173 1180 if opts['qmin'] is None: 1174 1181 opts['qmin'] = 0.001*opts['qmax'] 1175 if opts['datafile'] is not None:1176 data = load_data(os.path.expanduser(opts['datafile']))1177 else:1178 data, _ = make_data(opts)1179 1182 1180 1183 comparison = any(PAR_SPLIT in v for v in values) … … 1216 1219 opts['cutoff'] = [float(opts['cutoff'])]*2 1217 1220 1218 base = make_engine(model_info[0], data, opts['engine'][0], 1221 if PAR_SPLIT in opts['res']: 1222 opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 1223 comparison = True 1224 else: 1225 opts['res'] = [float(opts['res'])]*2 1226 1227 if opts['datafile'] is not None: 1228 data = load_data(os.path.expanduser(opts['datafile'])) 1229 else: 1230 # Hack around the fact that make_data doesn't take a pair of resolutions 1231 res = opts['res'] 1232 opts['res'] = res[0] 1233 data0, _ = make_data(opts) 1234 if res[0] != res[1]: 1235 opts['res'] = res[1] 1236 data1, _ = make_data(opts) 1237 else: 1238 data1 = data0 1239 opts['res'] = res 1240 data = data0, data1 1241 1242 base = make_engine(model_info[0], data[0], opts['engine'][0], 1219 1243 opts['cutoff'][0], opts['ngauss'][0]) 1220 1244 if comparison: 1221 comp = make_engine(model_info[1], data , opts['engine'][1],1245 comp = make_engine(model_info[1], data[1], opts['engine'][1], 1222 1246 opts['cutoff'][1], opts['ngauss'][1]) 1223 1247 else: -
sasmodels/data.py
rd86f0fc r65fbf7c 36 36 37 37 import numpy as np # type: ignore 38 from numpy import sqrt, sin, cos, pi 38 39 39 40 # pylint: disable=unused-import … … 301 302 302 303 303 def empty_data1D(q, resolution=0.0 ):304 def empty_data1D(q, resolution=0.0, L=0., dL=0.): 304 305 # type: (np.ndarray, float) -> Data1D 305 """306 r""" 306 307 Create empty 1D data using the given *q* as the x value. 307 308 308 *resolution* dq/q defaults to 5%. 309 rms *resolution* $\Delta q/q$ defaults to 0%. If wavelength *L* and rms 310 wavelength divergence *dL* are defined, then *resolution* defines 311 rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from 312 $q = 4\pi/\lambda \sin(\theta)$. 309 313 """ 310 314 … … 313 317 Iq, dIq = None, None 314 318 q = np.asarray(q) 315 data = Data1D(q, Iq, dx=resolution * q, dy=dIq) 319 if L != 0 and resolution != 0: 320 theta = np.arcsin(q*L/(4*pi)) 321 dtheta = theta[0]*resolution 322 ## Solving Gaussian error propagation from 323 ## Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 324 ## gives 325 ## (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 326 ## Take the square root and multiply by q, giving 327 ## Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 328 dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 329 else: 330 dq = resolution * q 331 332 data = Data1D(q, Iq, dx=dq, dy=dIq) 316 333 data.filename = "fake data" 317 334 return data -
sasmodels/jitter.py
rb3703f5 r1198f90 774 774 # set small jitter as 0 if multiple pd dims 775 775 dims = sum(v > 0 for v in jitter) 776 limit = [0, 0 , 0.5, 5][dims]776 limit = [0, 0.5, 5][dims] 777 777 jitter = [0 if v < limit else v for v in jitter] 778 778 axes.cla() -
sasmodels/resolution.py
r2d81cfe rbbb887d 20 20 MINIMUM_RESOLUTION = 1e-8 21 21 MINIMUM_ABSOLUTE_Q = 0.02 # relative to the minimum q in the data 22 PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC 22 23 23 24 class Resolution(object): … … 65 66 *q_calc* is the list of points to calculate, or None if this should 66 67 be estimated from the *q* and *q_width*. 67 """ 68 def __init__(self, q, q_width, q_calc=None, nsigma=3): 68 69 *nsigma* is the width of the resolution function. Should be 2.5. 70 See :func:`pinhole_resolution` for details. 71 """ 72 def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 69 73 #*min_step* is the minimum point spacing to use when computing the 70 74 #underlying model. It should be on the order of … … 82 86 83 87 # Protect against models which are not defined for very low q. Limit 84 # the smallest q value evaluated (in absolute) to 0.02*min 88 # the smallest q value evaluated to 0.02*min. Note that negative q 89 # values are trimmed even for broad resolution. Although not possible 90 # from the geometry, they may appear since we are using a truncated 91 # gaussian to represent resolution rather than a skew distribution. 85 92 cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q) 86 self.q_calc = self.q_calc[ abs(self.q_calc)>= cutoff]93 self.q_calc = self.q_calc[self.q_calc >= cutoff] 87 94 88 95 # Build weight matrix from calculated q values 89 96 self.weight_matrix = pinhole_resolution( 90 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION) )91 self.q_calc = abs(self.q_calc)97 self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION), 98 nsigma=nsigma) 92 99 93 100 def apply(self, theory): … … 101 108 *q* points at which the data is measured. 102 109 103 * dqx* slit width in qx104 105 * dqy* slit height in qy110 *qx_width* slit width in qx 111 112 *qy_width* slit height in qy 106 113 107 114 *q_calc* is the list of points to calculate, or None if this should … … 154 161 155 162 156 def pinhole_resolution(q_calc, q, q_width ):157 """163 def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA): 164 r""" 158 165 Compute the convolution matrix *W* for pinhole resolution 1-D data. 159 166 … … 162 169 *W*, the resolution smearing can be computed using *dot(W,q)*. 163 170 171 Note that resolution is limited to $\pm 2.5 \sigma$.[1] The true resolution 172 function is a broadened triangle, and does not extend over the entire 173 range $(-\infty, +\infty)$. It is important to impose this limitation 174 since some models fall so steeply that the weighted value in gaussian 175 tails would otherwise dominate the integral. 176 164 177 *q_calc* must be increasing. *q_width* must be greater than zero. 178 179 [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects 180 in Radially Symmetric Small-Angle Neutron Scattering by Numerical and 181 Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14. 182 https://doi.org/10.1107/S0021889894010095. 165 183 """ 166 184 # The current algorithm is a midpoint rectangle rule. In the test case, … … 170 188 cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 171 189 weights = cdf[1:] - cdf[:-1] 190 # Limit q range to +/- 2.5 sigma 191 qhigh = q + nsigma*q_width 192 #qlow = q - nsigma*q_width # linear limits 193 qlow = q*q/qhigh # log limits 194 weights[q_calc[:, None] < qlow[None, :]] = 0. 195 weights[q_calc[:, None] > qhigh[None, :]] = 0. 172 196 weights /= np.sum(weights, axis=0)[None, :] 173 197 return weights … … 494 518 495 519 496 def gaussian(q, q0, dq ):497 """ 498 Return the Gaussian resolution function.520 def gaussian(q, q0, dq, nsigma=2.5): 521 """ 522 Return the truncated Gaussian resolution function. 499 523 500 524 *q0* is the center, *dq* is the width and *q* are the points to evaluate. 501 525 """ 502 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 526 # Calculate the density of the tails; the resulting gaussian needs to be 527 # scaled by this amount in ordere to integrate to 1.0 528 two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2 529 return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density) 503 530 504 531 … … 558 585 559 586 560 def romberg_pinhole_1d(q, q_width, form, pars, nsigma= 5):587 def romberg_pinhole_1d(q, q_width, form, pars, nsigma=2.5): 561 588 """ 562 589 Romberg integration for pinhole resolution. … … 678 705 np.testing.assert_equal(output, self.y) 679 706 707 # TODO: turn pinhole/slit demos into tests 708 680 709 def test_pinhole(self): 681 710 """ … … 686 715 theory = 12.0-1000.0*resolution.q_calc 687 716 output = resolution.apply(theory) 717 # Note: answer came from output of previous run. Non-integer 718 # values at ends come from the fact that q_calc does not 719 # extend beyond q, and so the weights don't balance. 688 720 answer = [ 689 10.4 4785079, 9.84991299, 8.98101708,690 7.99906585, 6.99998311, 6.00001689,691 5.00093415, 4.01898292, 3.15008701, 2.55214921,721 10.47037734, 9.86925860, 722 9., 8., 7., 6., 5., 4., 723 3.13074140, 2.52962266, 692 724 ] 693 725 np.testing.assert_allclose(output, answer, atol=1e-8) … … 732 764 self._compare(q, output, answer, 1e-6) 733 765 766 @unittest.skip("suppress comparison with old version; pinhole calc changed") 734 767 def test_pinhole(self): 735 768 """ … … 746 779 self._compare(q, output, answer, 3e-4) 747 780 781 @unittest.skip("suppress comparison with old version; pinhole calc changed") 748 782 def test_pinhole_romberg(self): 749 783 """ … … 761 795 # 2*np.pi/pars['radius']/200) 762 796 #tol = 0.001 763 ## The default 3sigma and no extra points gets 1%797 ## The default 2.5 sigma and no extra points gets 1% 764 798 q_calc = None # type: np.ndarray 765 799 tol = 0.01 … … 1080 1114 1081 1115 if isinstance(resolution, Slit1D): 1082 width, height = resolution. dqx, resolution.dqy1116 width, height = resolution.qx_width, resolution.qy_width 1083 1117 Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 1084 1118 else: -
doc/guide/magnetism/magnetism.rst
r4f5afc9 rbefe905 39 39 40 40 .. math:: 41 -- &= ( (1-u_i)(1-u_f))^{1/4}\\42 -+ &= ( (1-u_i)(u_f))^{1/4}\\43 +- &= ( (u_i)(1-u_f))^{1/4}\\44 ++ &= ( (u_i)(u_f))^{1/4}41 -- &= (1-u_i)(1-u_f) \\ 42 -+ &= (1-u_i)(u_f) \\ 43 +- &= (u_i)(1-u_f) \\ 44 ++ &= (u_i)(u_f) 45 45 46 46 Ideally the experiment would measure the pure spin states independently and … … 104 104 | 2015-05-02 Steve King 105 105 | 2017-11-15 Paul Kienzle 106 | 2018-06-02 Adam Washington -
doc/guide/plugin.rst
r7e6bc45e rf796469 822 822 823 823 :code:`source = ["lib/Si.c", ...]` 824 (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/ Si.c>`_)824 (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/sas_Si.c>`_) 825 825 826 826 sas_3j1x_x(x): -
sasmodels/kernel_iq.c
rdc6f601 r7c35fda 83 83 in_spin = clip(in_spin, 0.0, 1.0); 84 84 out_spin = clip(out_spin, 0.0, 1.0); 85 // Note: sasview 3.1 scaled all slds by sqrt(weight) and assumed that 85 // Previous version of this function took the square root of the weights, 86 // under the assumption that 87 // 86 88 // w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) 87 // which is likely to be the case for simple models. 88 weight[0] = sqrt((1.0-in_spin) * (1.0-out_spin)); // dd 89 weight[1] = sqrt((1.0-in_spin) * out_spin); // du.real 90 weight[2] = sqrt(in_spin * (1.0-out_spin)); // ud.real 91 weight[3] = sqrt(in_spin * out_spin); // uu 89 // 90 // However, since the weights are applied to the final intensity and 91 // are not interned inside the I(q) function, we want the full 92 // weight and not the square root. Any function using 93 // set_spin_weights as part of calculating an amplitude will need to 94 // manually take that square root, but there is currently no such 95 // function. 96 weight[0] = (1.0-in_spin) * (1.0-out_spin); // dd 97 weight[1] = (1.0-in_spin) * out_spin; // du 98 weight[2] = in_spin * (1.0-out_spin); // ud 99 weight[3] = in_spin * out_spin; // uu 92 100 weight[4] = weight[1]; // du.imag 93 101 weight[5] = weight[2]; // ud.imag -
sasmodels/models/mass_surface_fractal.py
r2d81cfe r7994359 39 39 The surface ( $D_s$ ) and mass ( $D_m$ ) fractal dimensions are only 40 40 valid if $0 < surface\_dim < 6$ , $0 < mass\_dim < 6$ , and 41 $(surface\_dim + mass\_dim ) < 6$ . 42 41 $(surface\_dim + mass\_dim ) < 6$ . 42 Older versions of sasview may have the default primary particle radius 43 larger than the cluster radius, this was an error, also present in the 44 Schmidt review paper below. The primary particle should be the smaller 45 as described in the original Hurd et.al. who also point out that 46 polydispersity in the primary particle sizes may affect their 47 apparent surface fractal dimension. 48 43 49 44 50 References 45 51 ---------- 46 52 47 P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 53 .. [#] P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 54 .. [#] A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 55 35 (1987) 2361-2364 Equation(2) 48 56 49 A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 50 35 (1987) 2361-2364 Equation(2) 57 Authorship and Verification 58 ---------------------------- 59 60 * **Converted to sasmodels by:** Piotr Rozyczko **Date:** Jan 20, 2016 61 * **Last Reviewed by:** Richard Heenan **Date:** May 30, 2018 51 62 """ 52 63 … … 67 78 rg_primary = rg 68 79 background = background 69 Ref: Schmidt, J Appl Cryst, eq(19), (1991), 24, 414-43570 80 Hurd, Schaefer, Martin, Phys Rev A, eq(2),(1987),35, 2361-2364 71 81 Note that 0 < Ds< 6 and 0 < Dm < 6. … … 78 88 ["fractal_dim_mass", "", 1.8, [0.0, 6.0], "", "Mass fractal dimension"], 79 89 ["fractal_dim_surf", "", 2.3, [0.0, 6.0], "", "Surface fractal dimension"], 80 ["rg_cluster", "Ang", 86.7, [0.0, inf], "", "Cluster radius of gyration"],81 ["rg_primary", "Ang", 4000., [0.0, inf], "", "Primary particle radius of gyration"],90 ["rg_cluster", "Ang", 4000., [0.0, inf], "", "Cluster radius of gyration"], 91 ["rg_primary", "Ang", 86.7, [0.0, inf], "", "Primary particle radius of gyration"], 82 92 ] 83 93 # pylint: enable=bad-whitespace, line-too-long … … 107 117 fractal_dim_mass=1.8, 108 118 fractal_dim_surf=2.3, 109 rg_cluster= 86.7,110 rg_primary= 4000.0)119 rg_cluster=4000.0, 120 rg_primary=86.7) 111 121 112 122 tests = [ 113 123 114 # Accuracy tests based on content in test/utest_other_models.py 115 [{'fractal_dim_mass': 124 # Accuracy tests based on content in test/utest_other_models.py All except first, changed so rg_cluster is the larger, RKH 30 May 2018 125 [{'fractal_dim_mass': 1.8, 116 126 'fractal_dim_surf': 2.3, 117 127 'rg_cluster': 86.7, … … 123 133 [{'fractal_dim_mass': 3.3, 124 134 'fractal_dim_surf': 1.0, 125 'rg_cluster': 90.0,126 'rg_primary': 4000.0,127 }, 0.001, 0. 18562699016],135 'rg_cluster': 4000.0, 136 'rg_primary': 90.0, 137 }, 0.001, 0.0932516614456], 128 138 129 139 [{'fractal_dim_mass': 1.3, 130 'fractal_dim_surf': 1.0,131 'rg_cluster': 90.0,132 'rg_primary': 2000.0,140 'fractal_dim_surf': 2.0, 141 'rg_cluster': 2000.0, 142 'rg_primary': 90.0, 133 143 'background': 0.8, 134 }, 0.001, 1. 16539753641],144 }, 0.001, 1.28296431786], 135 145 136 146 [{'fractal_dim_mass': 2.3, 137 'fractal_dim_surf': 1.0,138 'rg_cluster': 90.0,139 'rg_primary': 1000.0,147 'fractal_dim_surf': 3.1, 148 'rg_cluster': 1000.0, 149 'rg_primary': 30.0, 140 150 'scale': 10.0, 141 151 'background': 0.0, 142 }, 0.051, 0.00 0169548800377],152 }, 0.051, 0.00333804044899], 143 153 ]
Note: See TracChangeset
for help on using the changeset viewer.