# Changeset 7609046 in sasmodels

Ignore:
Timestamp:
Jul 19, 2018 12:47:44 PM (12 months ago)
Branches:
master
Children:
f41027b
Parents:
9ce5bcb (diff), 1d9998c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
Paul Kienzle <pkienzle@…> (07/19/18 12:47:44)
git-committer:
Message:

Merge branch 'master' into ticket-608-user-defined-weights

Files:
1 deleted
22 edited

Unmodified
Removed
• ## .travis.yml

 r335271e env: - PY=2.7 - DEPLOY=True #- DEPLOY=True - os: linux env:
• ## doc/guide/magnetism/magnetism.rst

 r4f5afc9 .. math:: -- &= ((1-u_i)(1-u_f))^{1/4} \\ -+ &= ((1-u_i)(u_f))^{1/4} \\ +- &= ((u_i)(1-u_f))^{1/4} \\ ++ &= ((u_i)(u_f))^{1/4} -- &= (1-u_i)(1-u_f) \\ -+ &= (1-u_i)(u_f) \\ +- &= (u_i)(1-u_f) \\ ++ &= (u_i)(u_f) Ideally the experiment would measure the pure spin states independently and | 2015-05-02 Steve King | 2017-11-15 Paul Kienzle | 2018-06-02 Adam Washington
• ## doc/guide/pd/polydispersity.rst

 r9ce5bcb P(q) = \text{scale} \langle F^* F \rangle / V + \text{background} where $F$ is the scattering amplitude and $\langle\cdot\rangle$ denotes an average over the size distribution. where $F$ is the scattering amplitude and $\langle\cdot\rangle$ denotes an average over the size distribution $f(x; \bar x, \sigma)$, giving .. math:: P(q) = \frac{\text{scale}}{V} \int_\mathbb{R} f(x; \bar x, \sigma) F^2(q, x)\, dx + \text{background} Each distribution is characterized by a center value $\bar x$ or with larger values of $N_\sigma$ required for heavier tailed distributions. The scattering in general falls rapidly with $qr$ so the usual assumption that $G(r - 3\sigma_r)$ is tiny and therefore $f(r - 3\sigma_r)G(r - 3\sigma_r)$ that $f(r - 3\sigma_r)$ is tiny and therefore $f(r - 3\sigma_r)f(r - 3\sigma_r)$ will not contribute much to the average may not hold when particles are large. This, too, will require increasing $N_\sigma$. These are all implemented as *number-average* distributions. .. note:: In 2009 IUPAC decided to introduce the new term 'dispersity' to replace the term 'polydispersity' (see Pure Appl. Chem., (2009), 81(2), 351-353 _ in order to make the terminology describing distributions of properties unambiguous. Throughout the SasView documentation we continue to use the term polydispersity because one of the consequences of the IUPAC change is that orientational polydispersity would not meet their new criteria (which requires dispersity to be dimensionless). Suggested Applications
• ## doc/guide/plugin.rst

 r7e6bc45e :code:source = ["lib/Si.c", ...] (Si.c _) (Si.c _) sas_3j1x_x(x):
• ## example/model_ellipsoid_hayter_msa.py

 r8a5f021 # DEFINE THE MODEL kernel = load_model('ellipsoid*hayter_msa') kernel = load_model('ellipsoid@hayter_msa') pars = dict(scale=6.4, background=0.06, sld=0.33, sld_solvent=2.15, radius_polar=14.0,
• ## sasmodels/compare.py

 raa25fc7 -title="note" adds note to the plot title, after the model name -weights shows weights plots for the polydisperse parameters -profile shows the sld profile if the model has a plottable sld profile === output options === -edit starts the parameter explorer -help/-html shows the model docs instead of running the model === environment variables === -DSAS_MODELPATH=path sets directory containing custom models -DSAS_OPENCL=vendor:device|none sets the target OpenCL device -DXDG_CACHE_HOME=~/.cache sets the pyopencl cache root (linux only) -DSAS_COMPILER=tinycc|msvc|mingw|unix sets the DLL compiler -DSAS_OPENMP=1 turns on OpenMP for the DLLs -DSAS_DLL_PATH=path sets the path to the compiled modules The interpretation of quad precision depends on architecture, and may def make_data(opts): # type: (Dict[str, Any]) -> Tuple[Data, np.ndarray] # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] """ Generate an empty dataset, used with the model to set Q points if opts['zero']: q = np.hstack((0, q)) data = empty_data1D(q, resolution=res) # TODO: provide command line control of lambda and Delta lambda/lambda #L, dLoL = 5, 0.14/np.sqrt(6)  # wavelength and 14% triangular FWHM L, dLoL = 0, 0 data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) index = slice(None, None) return data, index dim = base._kernel.dim weights.plot_weights(model_info, get_mesh(model_info, base_pars, dim=dim)) if opts['show_profile']: import pylab base, comp = opts['engines'] base_pars, comp_pars = opts['pars'] have_base = base._kernel.info.profile is not None have_comp = ( comp is not None and comp._kernel.info.profile is not None and base_pars != comp_pars ) if have_base or have_comp: pylab.figure() if have_base: plot_profile(base._kernel.info, **base_pars) if have_comp: plot_profile(comp._kernel.info, label='comp', **comp_pars) pylab.legend() if opts['plot']: import matplotlib.pyplot as plt return limits def plot_profile(model_info, label='base', **args): # type: (ModelInfo, List[Tuple[float, np.ndarray, np.ndarray]]) -> None """ Plot the profile returned by the model profile method. *model_info* defines model parameters, etc. *mesh* is a list of tuples containing (*value*, *dispersity*, *weights*) for each parameter, where (*dispersity*, *weights*) pairs are the distributions to be plotted. """ import pylab args = dict((k, v) for k, v in args.items() if "_pd" not in k and ":" not in k and k not in ("background", "scale", "theta", "phi", "psi")) args = args.copy() args.pop('scale', 1.) args.pop('background', 0.) z, rho = model_info.profile(**args) #pylab.interactive(True) pylab.plot(z, rho, '-', label=label) pylab.grid(True) #pylab.show() def run_models(opts, verbose=False): # type: (Dict[str, Any]) -> Dict[str, Any] base_n, comp_n = opts['count'] base_pars, comp_pars = opts['pars'] data = opts['data'] base_data, comp_data = opts['data'] comparison = comp is not None print("%s t=%.2f ms, intensity=%.0f" % (base.engine, base_time, base_value.sum())) _show_invalid(data, base_value) _show_invalid(base_data, base_value) except ImportError: traceback.print_exc() print("%s t=%.2f ms, intensity=%.0f" % (comp.engine, comp_time, comp_value.sum())) _show_invalid(data, comp_value) _show_invalid(base_data, comp_value) except ImportError: traceback.print_exc() have_base, have_comp = (base_value is not None), (comp_value is not None) base, comp = opts['engines'] data = opts['data'] base_data, comp_data = opts['data'] use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) # Plot if requested view = opts['view'] #view = 'log' if limits is None: vmin, vmax = np.inf, -np.inf if have_comp: plt.subplot(131) plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) plt.title("%s t=%.2f ms"%(base.engine, base_time)) #cbar_title = "log I" plt.subplot(132) if not opts['is2d'] and have_base: plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) plot_theory(data, comp_value, view=view, use_data=use_data, limits=limits) plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) #cbar_title = "log I" err[err > cutoff] = cutoff #err,errstr = base/comp,"ratio" plot_theory(data, None, resid=err, view=errview, use_data=use_data) # Note: base_data only since base and comp have same q values (though # perhaps different resolution), and we are plotting the difference # at each q plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') OPTIONS = [ # Plotting 'plot', 'noplot', 'weights', 'plot', 'noplot', 'weights', 'profile', 'linear', 'log', 'q4', 'rel', 'abs', invalid = [o[1:] for o in flags if o[1:] not in NAME_OPTIONS and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)] if not (o[1:] in NAME_OPTIONS or any(o.startswith('-%s='%t) for t in VALUE_OPTIONS) or o.startswith('-D'))] if invalid: print("Invalid options: %s"%(", ".join(invalid))) 'qmax'      : 0.05, 'nq'        : 128, 'res'       : 0.0, 'res'       : '0.0', 'noise'     : 0.0, 'accuracy'  : 'Low', 'count'     : '1', 'show_weights' : False, 'show_profile' : False, 'sphere'    : 0, 'ngauss'    : '0', elif arg.startswith('-q='): opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] elif arg.startswith('-res='):      opts['res'] = float(arg[5:]) elif arg.startswith('-res='):      opts['res'] = arg[5:] elif arg.startswith('-noise='):    opts['noise'] = float(arg[7:]) elif arg.startswith('-sets='):     opts['sets'] = int(arg[6:]) elif arg == '-default': opts['use_demo'] = False elif arg == '-weights': opts['show_weights'] = True elif arg == '-profile': opts['show_profile'] = True elif arg == '-html':    opts['html'] = True elif arg == '-help':    opts['html'] = True elif arg.startswith('-D'): var, val = arg[2:].split('=') os.environ[var] = val # pylint: enable=bad-whitespace,C0321 if opts['qmin'] is None: opts['qmin'] = 0.001*opts['qmax'] if opts['datafile'] is not None: data = load_data(os.path.expanduser(opts['datafile'])) else: data, _ = make_data(opts) comparison = any(PAR_SPLIT in v for v in values) opts['cutoff'] = [float(opts['cutoff'])]*2 base = make_engine(model_info, data, opts['engine'], if PAR_SPLIT in opts['res']: opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] comparison = True else: opts['res'] = [float(opts['res'])]*2 if opts['datafile'] is not None: data = load_data(os.path.expanduser(opts['datafile'])) else: # Hack around the fact that make_data doesn't take a pair of resolutions res = opts['res'] opts['res'] = res data0, _ = make_data(opts) if res != res: opts['res'] = res data1, _ = make_data(opts) else: data1 = data0 opts['res'] = res data = data0, data1 base = make_engine(model_info, data, opts['engine'], opts['cutoff'], opts['ngauss']) if comparison: comp = make_engine(model_info, data, opts['engine'], comp = make_engine(model_info, data, opts['engine'], opts['cutoff'], opts['ngauss']) else:
• ## sasmodels/core.py

 r3221de0 if CUSTOM_MODEL_PATH == "": CUSTOM_MODEL_PATH = joinpath(os.path.expanduser("~"), ".sasmodels", "custom_models") if not os.path.isdir(CUSTOM_MODEL_PATH): os.makedirs(CUSTOM_MODEL_PATH) #if not os.path.isdir(CUSTOM_MODEL_PATH): #    os.makedirs(CUSTOM_MODEL_PATH) # TODO: refactor composite model support
• ## sasmodels/data.py

 rd86f0fc import numpy as np  # type: ignore from numpy import sqrt, sin, cos, pi # pylint: disable=unused-import def empty_data1D(q, resolution=0.0): def empty_data1D(q, resolution=0.0, L=0., dL=0.): # type: (np.ndarray, float) -> Data1D """ r""" Create empty 1D data using the given *q* as the x value. *resolution* dq/q defaults to 5%. rms *resolution* $\Delta q/q$ defaults to 0%.  If wavelength *L* and rms wavelength divergence *dL* are defined, then *resolution* defines rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from $q = 4\pi/\lambda \sin(\theta)$. """ Iq, dIq = None, None q = np.asarray(q) data = Data1D(q, Iq, dx=resolution * q, dy=dIq) if L != 0 and resolution != 0: theta = np.arcsin(q*L/(4*pi)) dtheta = theta*resolution ## Solving Gaussian error propagation from ##   Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 ## gives ##   (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 ## Take the square root and multiply by q, giving ##   Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) else: dq = resolution * q data = Data1D(q, Iq, dx=dq, dy=dIq) data.filename = "fake data" return data # Note: masks merge, so any masked theory points will stay masked, # and the data mask will be added to it. mtheory = masked_array(theory, data.mask.copy()) #mtheory = masked_array(theory, data.mask.copy()) theory_x = data.x[~data.mask] mtheory = masked_array(theory) mtheory[~np.isfinite(mtheory)] = masked if view is 'log': mtheory[mtheory <= 0] = masked plt.plot(data.x, scale*mtheory, '-') plt.plot(theory_x, scale*mtheory, '-') all_positive = all_positive and (mtheory > 0).all() some_present = some_present or (mtheory.count() > 0) if use_resid: mresid = masked_array(resid, data.mask.copy()) theory_x = data.x[~data.mask] mresid = masked_array(resid) mresid[~np.isfinite(mresid)] = masked some_present = (mresid.count() > 0) if num_plots > 1: plt.subplot(1, num_plots, use_calc + 2) plt.plot(data.x, mresid, '.') plt.plot(theory_x, mresid, '.') plt.xlabel("$q$/A$^{-1}$") plt.ylabel('residuals')
• ## sasmodels/direct_model.py

 rd18d6dd qmax = getattr(data, 'qmax', np.inf) accuracy = getattr(data, 'accuracy', 'Low') index = ~data.mask & (q >= qmin) & (q <= qmax) index = (data.mask == 0) & (q >= qmin) & (q <= qmax) if data.data is not None: index &= ~np.isnan(data.data) elif self.data_type == 'Iq': index = (data.x >= data.qmin) & (data.x <= data.qmax) mask = getattr(data, 'mask', None) if mask is not None: index &= (mask == 0) if data.y is not None: index &= ~np.isnan(data.y)
• ## sasmodels/jitter.py

 rb3703f5 # set small jitter as 0 if multiple pd dims dims = sum(v > 0 for v in jitter) limit = [0, 0, 0.5, 5][dims] limit = [0, 0.5, 5][dims] jitter = [0 if v < limit else v for v in jitter] axes.cla()
• ## sasmodels/kernel_iq.c

 rdc6f601 in_spin = clip(in_spin, 0.0, 1.0); out_spin = clip(out_spin, 0.0, 1.0); // Note: sasview 3.1 scaled all slds by sqrt(weight) and assumed that // Previous version of this function took the square root of the weights, // under the assumption that // //     w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) // which is likely to be the case for simple models. weight = sqrt((1.0-in_spin) * (1.0-out_spin)); // dd weight = sqrt((1.0-in_spin) * out_spin);       // du.real weight = sqrt(in_spin * (1.0-out_spin));       // ud.real weight = sqrt(in_spin * out_spin);             // uu // // However, since the weights are applied to the final intensity and // are not interned inside the I(q) function, we want the full // weight and not the square root.  Any function using // set_spin_weights as part of calculating an amplitude will need to // manually take that square root, but there is currently no such // function. weight = (1.0-in_spin) * (1.0-out_spin); // dd weight = (1.0-in_spin) * out_spin;       // du weight = in_spin * (1.0-out_spin);       // ud weight = in_spin * out_spin;             // uu weight = weight; // du.imag weight = weight; // ud.imag
• ## sasmodels/kerneldll.py

 r33969b6 pass # pylint: enable=unused-import if "SAS_DLL_PATH" in os.environ: SAS_DLL_PATH = os.environ["SAS_DLL_PATH"] else: # Assume the default location of module DLLs is in .sasmodels/compiled_models. SAS_DLL_PATH = os.path.join(os.path.expanduser("~"), ".sasmodels", "compiled_models") if "SAS_COMPILER" in os.environ: return CC + [source, "-o", output, "-lm"] # Assume the default location of module DLLs is in .sasmodels/compiled_models. DLL_PATH = os.path.join(os.path.expanduser("~"), ".sasmodels", "compiled_models") ALLOW_SINGLE_PRECISION_DLLS = True return path return joinpath(DLL_PATH, basename) return joinpath(SAS_DLL_PATH, basename) exist yet if it hasn't been compiled. """ return os.path.join(DLL_PATH, dll_name(model_info, dtype)) return os.path.join(SAS_DLL_PATH, dll_name(model_info, dtype)) models are not allowed as DLLs. Set *sasmodels.kerneldll.DLL_PATH* to the compiled dll output path. Set *sasmodels.kerneldll.SAS_DLL_PATH* to the compiled dll output path. Alternatively, set the environment variable *SAS_DLL_PATH*. The default is in ~/.sasmodels/compiled_models. """ if need_recompile: # Make sure the DLL path exists if not os.path.exists(DLL_PATH): os.makedirs(DLL_PATH) if not os.path.exists(SAS_DLL_PATH): os.makedirs(SAS_DLL_PATH) basename = splitext(os.path.basename(dll)) + "_" system_fd, filename = tempfile.mkstemp(suffix=".c", prefix=basename)
• ## sasmodels/models/core_shell_parallelepiped.c

 re077231 // outer integral (with gauss points), integration limits = 0, 1 // substitute d_cos_alpha for sin_alpha d_alpha double outer_sum = 0; //initialize integral for( int i=0; i
• ## sasmodels/models/core_shell_parallelepiped.py

 r97be877 Calculates the form factor for a rectangular solid with a core-shell structure. The thickness and the scattering length density of the shell or "rim" can be different on each (pair) of faces. The thickness and the scattering length density of the shell or "rim" can be different on each (pair) of faces. The three dimensions of the core of the parallelepiped (strictly here a cuboid) may be given in *any* size order as long as the particles are randomly oriented (i.e. take on all possible orientations see notes on 2D below). To avoid multiple fit solutions, especially with Monte-Carlo fit methods, it may be advisable to restrict their ranges. There may be a number of closely similar "best fits", so some trial and error, or fixing of some dimensions at expected values, may help. The form factor is normalized by the particle volume $V$ such that .. math:: I(q) = \text{scale}\frac{\langle f^2 \rangle}{V} + \text{background} I(q) = \frac{\text{scale}}{V} \langle P(q,\alpha,\beta) \rangle + \text{background} where $\langle \ldots \rangle$ is an average over all possible orientations of the rectangular solid. The function calculated is the form factor of the rectangular solid below. The core of the solid is defined by the dimensions $A$, $B$, $C$ such that $A < B < C$. .. image:: img/core_shell_parallelepiped_geometry.jpg of the rectangular solid, and the usual $\Delta \rho^2 \ V^2$ term cannot be pulled out of the form factor term due to the multiple slds in the model. The core of the solid is defined by the dimensions $A$, $B$, $C$ here shown such that $A < B < C$. .. figure:: img/parallelepiped_geometry.jpg Core of the core shell parallelepiped with the corresponding definition of sides. There are rectangular "slabs" of thickness $t_A$ that add to the $A$ dimension (on the $BC$ faces). There are similar slabs on the $AC$ $(=t_B)$ and $AB$ $(=t_C)$ faces. The projection in the $AB$ plane is then .. image:: img/core_shell_parallelepiped_projection.jpg The volume of the solid is $(=t_C)$ faces. The projection in the $AB$ plane is .. figure:: img/core_shell_parallelepiped_projection.jpg AB cut through the core-shell parallelipiped showing the cross secion of four of the six shell slabs. As can be seen, this model leaves **"gaps"** at the corners of the solid. The total volume of the solid is thus given as .. math:: V = ABC + 2t_ABC + 2t_BAC + 2t_CAB **meaning that there are "gaps" at the corners of the solid.** The intensity calculated follows the :ref:parallelepiped model, with the core-shell intensity being calculated as the square of the sum of the amplitudes of the core and the slabs on the edges. the scattering amplitude is computed for a particular orientation of the core-shell parallelepiped with respect to the scattering vector and then averaged over all possible orientations, where $\alpha$ is the angle between the $z$ axis and the $C$ axis of the parallelepiped, $\beta$ is the angle between projection of the particle in the $xy$ detector plane and the $y$ axis. .. math:: F(Q) amplitudes of the core and the slabs on the edges. The scattering amplitude is computed for a particular orientation of the core-shell parallelepiped with respect to the scattering vector and then averaged over all possible orientations, where $\alpha$ is the angle between the $z$ axis and the $C$ axis of the parallelepiped, and $\beta$ is the angle between the projection of the particle in the $xy$ detector plane and the $y$ axis. .. math:: P(q)=\frac {\int_{0}^{\pi/2}\int_{0}^{\pi/2}F^2(q,\alpha,\beta) \ sin\alpha \ d\alpha \ d\beta} {\int_{0}^{\pi/2} \ sin\alpha \ d\alpha \ d\beta} and .. math:: F(q,\alpha,\beta) &= (\rho_\text{core}-\rho_\text{solvent}) S(Q_A, A) S(Q_B, B) S(Q_C, C) \\ &+ (\rho_\text{A}-\rho_\text{solvent}) \left[S(Q_A, A+2t_A) - S(Q_A, Q)\right] S(Q_B, B) S(Q_C, C) \\ \left[S(Q_A, A+2t_A) - S(Q_A, A)\right] S(Q_B, B) S(Q_C, C) \\ &+ (\rho_\text{B}-\rho_\text{solvent}) S(Q_A, A) \left[S(Q_B, B+2t_B) - S(Q_B, B)\right] S(Q_C, C) \\ .. math:: S(Q, L) = L \frac{\sin \tfrac{1}{2} Q L}{\tfrac{1}{2} Q L} S(Q_X, L) = L \frac{\sin (\tfrac{1}{2} Q_X L)}{\tfrac{1}{2} Q_X L} and .. math:: Q_A &= \sin\alpha \sin\beta \\ Q_B &= \sin\alpha \cos\beta \\ Q_C &= \cos\alpha Q_A &= q \sin\alpha \sin\beta \\ Q_B &= q \sin\alpha \cos\beta \\ Q_C &= q \cos\alpha where $\rho_\text{core}$, $\rho_\text{A}$, $\rho_\text{B}$ and $\rho_\text{C}$ are the scattering length of the parallelepiped core, and the rectangular are the scattering lengths of the parallelepiped core, and the rectangular slabs of thickness $t_A$, $t_B$ and $t_C$, respectively. $\rho_\text{solvent}$ is the scattering length of the solvent. .. note:: the code actually implements two substitutions: $d(cos\alpha)$ is substituted for -$sin\alpha \ d\alpha$ (note that in the :ref:parallelepiped code this is explicitly implemented with $\sigma = cos\alpha$), and $\beta$ is set to $\beta = u \pi/2$ so that $du = \pi/2 \ d\beta$.  Thus both integrals go from 0 to 1 rather than 0 to $\pi/2$. FITTING NOTES ~~~~~~~~~~~~~ If the scale is set equal to the particle volume fraction, $\phi$, the returned value is the scattered intensity per unit volume, $I(q) = \phi P(q)$. However, **no interparticle interference effects are included in this calculation.** There are many parameters in this model. Hold as many fixed as possible with known values, or you will certainly end up at a solution that is unphysical. The returned value is in units of |cm^-1|, on absolute scale. NB: The 2nd virial coefficient of the core_shell_parallelepiped is calculated based on the the averaged effective radius $(=\sqrt{(A+2t_A)(B+2t_B)/\pi})$ and length $(C+2t_C)$ values, after appropriately sorting the three dimensions to give an oblate or prolate particle, to give an effective radius, for $S(Q)$ when $P(Q) * S(Q)$ is applied. For 2d data the orientation of the particle is required, described using angles $\theta$, $\phi$ and $\Psi$ as in the diagrams below, for further details of the calculation and angular dispersions see :ref:orientation. The angle $\Psi$ is the rotational angle around the *long_c* axis. For example, $\Psi = 0$ when the *short_b* axis is parallel to the *x*-axis of the detector. For 2d, constraints must be applied during fitting to ensure that the inequality $A < B < C$ is not violated, and hence the correct definition of angles is preserved. The calculation will not report an error, but the results may be not correct. #. There are many parameters in this model. Hold as many fixed as possible with known values, or you will certainly end up at a solution that is unphysical. #. The 2nd virial coefficient of the core_shell_parallelepiped is calculated based on the the averaged effective radius $(=\sqrt{(A+2t_A)(B+2t_B)/\pi})$ and length $(C+2t_C)$ values, after appropriately sorting the three dimensions to give an oblate or prolate particle, to give an effective radius for $S(q)$ when $P(q) * S(q)$ is applied. #. For 2d data the orientation of the particle is required, described using angles $\theta$, $\phi$ and $\Psi$ as in the diagrams below, where $\theta$ and $\phi$ define the orientation of the director in the laboratry reference frame of the beam direction ($z$) and detector plane ($x-y$ plane), while the angle $\Psi$ is effectively the rotational angle around the particle $C$ axis. For $\theta = 0$ and $\phi = 0$, $\Psi = 0$ corresponds to the $B$ axis oriented parallel to the y-axis of the detector with $A$ along the x-axis. For other $\theta$, $\phi$ values, the order of rotations matters. In particular, the parallelepiped must first be rotated $\theta$ degrees in the $x-z$ plane before rotating $\phi$ degrees around the $z$ axis (in the $x-y$ plane). Applying orientational distribution to the particle orientation (i.e  jitter to one or more of these angles) can get more confusing as jitter is defined **NOT** with respect to the laboratory frame but the particle reference frame. It is thus highly recmmended to read :ref:orientation for further details of the calculation and angular dispersions. .. note:: For 2d, constraints must be applied during fitting to ensure that the order of sides chosen is not altered, and hence that the correct definition of angles is preserved. For the default choice shown here, that means ensuring that the inequality $A < B < C$ is not violated,  The calculation will not report an error, but the results may be not correct. .. figure:: img/parallelepiped_angle_definition.png Definition of the angles for oriented core-shell parallelepipeds. Note that rotation $\theta$, initially in the $xz$ plane, is carried Note that rotation $\theta$, initially in the $x-z$ plane, is carried out first, then rotation $\phi$ about the $z$ axis, finally rotation $\Psi$ is now around the axis of the cylinder. The neutron or X-ray beam is along the $z$ axis. $\Psi$ is now around the $C$ axis of the particle. The neutron or X-ray beam is along the $z$ axis and the detecotr defines the $x-y$ plane. .. figure:: img/parallelepiped_angle_projection.png Examples of the angles for oriented core-shell parallelepipeds against the detector plane. Validation ---------- Cross-checked against hollow rectangular prism and rectangular prism for equal thickness overlapping sides, and by Monte Carlo sampling of points within the shape for non-uniform, non-overlapping sides. References * **Author:** NIST IGOR/DANSE **Date:** pre 2010 * **Converted to sasmodels by:** Miguel Gonzales **Date:** February 26, 2016 * **Converted to sasmodels by:** Miguel Gonzalez **Date:** February 26, 2016 * **Last Modified by:** Paul Kienzle **Date:** October 17, 2017 * Cross-checked against hollow rectangular prism and rectangular prism for equal thickness overlapping sides, and by Monte Carlo sampling of points within the shape for non-uniform, non-overlapping sides. * **Last Reviewed by:** Paul Butler **Date:** May 24, 2018 - documentation updated """
• ## sasmodels/models/core_shell_sphere.py

 r2d81cfe .. math:: F^2(q) = \frac{3}{V_s}\left[ F(q) = \frac{3}{V_s}\left[ V_c(\rho_c-\rho_s)\frac{\sin(qr_c)-qr_c\cos(qr_c)}{(qr_c)^3} + V_s(\rho_s-\rho_\text{solv})\frac{\sin(qr_s)-qr_s\cos(qr_s)}{(qr_s)^3}
• ## sasmodels/models/guinier.py

 r2d81cfe .. math:: I(q) = \text{scale} \cdot \exp{\left[ \frac{-Q^2R_g^2}{3} \right]} I(q) = \text{scale} \cdot \exp{\left[ \frac{-Q^2 R_g^2 }{3} \right]} + \text{background} .. math:: q = \sqrt{q_x^2 + q_y^2} In scattering, the radius of gyration $R_g$ quantifies the objects's distribution of SLD (not mass density, as in mechanics) from the objects's SLD centre of mass. It is defined by .. math:: R_g^2 = \frac{\sum_i\rho_i\left(r_i-r_0\right)^2}{\sum_i\rho_i} where $r_0$ denotes the object's SLD centre of mass and $\rho_i$ is the SLD at a point $i$. Notice that $R_g^2$ may be negative (since SLD can be negative), which happens when a form factor $P(Q)$ is increasing with $Q$ rather than decreasing. This can occur for core/shell particles, hollow particles, or for composite particles with domains of different SLDs in a solvent with an SLD close to the average match point. (Alternatively, this might be regarded as there being an internal inter-domain "structure factor" within a single particle which gives rise to a peak in the scattering). To specify a negative value of $R_g^2$ in SasView, simply give $R_g$ a negative value ($R_g^2$ will be evaluated as $R_g |R_g|$). Note that the physical radius of gyration, of the exterior of the particle, will still be large and positive. It is only the apparent size from the small $Q$ data that will give a small or negative value of $R_g^2$. References #             ["name", "units", default, [lower, upper], "type","description"], parameters = [["rg", "Ang", 60.0, [0, inf], "", "Radius of Gyration"]] parameters = [["rg", "Ang", 60.0, [-inf, inf], "", "Radius of Gyration"]] Iq = """ double exponent = rg*rg*q*q/3.0; double exponent = fabs(rg)*rg*q*q/3.0; double value = exp(-exponent); return value; # parameters for demo demo = dict(scale=1.0, rg=60.0) demo = dict(scale=1.0,  background=0.001, rg=60.0 ) # parameters for unit tests
• ## sasmodels/models/mass_surface_fractal.py

 r2d81cfe The surface ( $D_s$ ) and mass ( $D_m$ ) fractal dimensions are only valid if $0 < surface\_dim < 6$ , $0 < mass\_dim < 6$ , and $(surface\_dim + mass\_dim ) < 6$ . $(surface\_dim + mass\_dim ) < 6$ . Older versions of sasview may have the default primary particle radius larger than the cluster radius, this was an error, also present in the Schmidt review paper below. The primary particle should be the smaller as described in the original Hurd et.al. who also point out that polydispersity in the primary particle sizes may affect their apparent surface fractal dimension. References ---------- P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) .. [#] P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) .. [#] A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 35 (1987) 2361-2364 Equation(2) A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 35 (1987) 2361-2364 Equation(2) Authorship and Verification ---------------------------- * **Converted to sasmodels by:** Piotr Rozyczko **Date:** Jan 20, 2016 * **Last Reviewed by:** Richard Heenan **Date:** May 30, 2018 """ rg_primary    =  rg background   =  background Ref: Schmidt, J Appl Cryst, eq(19), (1991), 24, 414-435 Hurd, Schaefer, Martin, Phys Rev A, eq(2),(1987),35, 2361-2364 Note that 0 < Ds< 6 and 0 < Dm < 6. ["fractal_dim_mass", "",      1.8, [0.0, 6.0], "", "Mass fractal dimension"], ["fractal_dim_surf", "",      2.3, [0.0, 6.0], "", "Surface fractal dimension"], ["rg_cluster",       "Ang",  86.7, [0.0, inf], "", "Cluster radius of gyration"], ["rg_primary",       "Ang", 4000., [0.0, inf], "", "Primary particle radius of gyration"], ["rg_cluster",       "Ang", 4000., [0.0, inf], "", "Cluster radius of gyration"], ["rg_primary",       "Ang",  86.7, [0.0, inf], "", "Primary particle radius of gyration"], ] # pylint: enable=bad-whitespace, line-too-long fractal_dim_mass=1.8, fractal_dim_surf=2.3, rg_cluster=86.7, rg_primary=4000.0) rg_cluster=4000.0, rg_primary=86.7) tests = [ # Accuracy tests based on content in test/utest_other_models.py [{'fractal_dim_mass':      1.8, # Accuracy tests based on content in test/utest_other_models.py  All except first, changed so rg_cluster is the larger, RKH 30 May 2018 [{'fractal_dim_mass':   1.8, 'fractal_dim_surf':   2.3, 'rg_cluster':   86.7, [{'fractal_dim_mass':      3.3, 'fractal_dim_surf':   1.0, 'rg_cluster':   90.0, 'rg_primary': 4000.0, }, 0.001, 0.18562699016], 'rg_cluster': 4000.0, 'rg_primary':   90.0, }, 0.001, 0.0932516614456], [{'fractal_dim_mass':      1.3, 'fractal_dim_surf':   1.0, 'rg_cluster':   90.0, 'rg_primary': 2000.0, 'fractal_dim_surf':   2.0, 'rg_cluster': 2000.0, 'rg_primary':   90.0, 'background':    0.8, }, 0.001, 1.16539753641], }, 0.001, 1.28296431786], [{'fractal_dim_mass':      2.3, 'fractal_dim_surf':   1.0, 'rg_cluster':   90.0, 'rg_primary': 1000.0, 'fractal_dim_surf':   3.1, 'rg_cluster':  1000.0, 'rg_primary':  30.0, 'scale':        10.0, 'background':    0.0, }, 0.051, 0.000169548800377], }, 0.051, 0.00333804044899], ]
• ## sasmodels/models/parallelepiped.c

 r108e70e inner_total += GAUSS_W[j] * square(si1 * si2); } // now complete change of inner integration variable (1-0)/(1-(-1))= 0.5 inner_total *= 0.5; outer_total += GAUSS_W[i] * inner_total * si * si; } // now complete change of outer integration variable (1-0)/(1-(-1))= 0.5 outer_total *= 0.5;

• ## sasmodels/sasview_model.py

 rd533590 from . import modelinfo from .details import make_kernel_args, dispersion_mesh # Hack: load in any custom distributions # Uses ~/.sasview/weights/*.py unless SASMODELS_WEIGHTS is set in the environ. # Override with weights.load_weights(pattern="/*.py") weights.load_weights() # pylint: disable=unused-import
• ## sasmodels/weights.py

 r3d58247 )) SASMODELS_WEIGHTS = "~/.sasview/weights/*.py" def load_weights(pattern=None): # type: (str) -> None """ Load dispersion distributions matching the given pattern """ import logging import os import os.path import glob import traceback from .custom import load_custom_kernel_module if pattern is None: pattern = os.environ.get("SASMODELS_WEIGHTS", SASMODELS_WEIGHTS) for filename in sorted(glob.glob(os.path.expanduser(pattern))): try: #print("loading weights from", filename) module = load_custom_kernel_module(filename) MODELS[module.Dispersion.type] = module.Dispersion except Exception as exc: logging.error(traceback.format_exc(exc)) def get_weights(disperser, n, width, nsigmas, value, limits, relative):
Note: See TracChangeset for help on using the changeset viewer.