Changes in / [6ab64c9:706f466] in sasmodels
- Files:
-
- 2 added
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/guide/magnetism/magnetism.rst
r1f058ea r59485a4 77 77 =========== ================================================================ 78 78 M0_sld = $D_M M_0$ 79 Up_theta = $\theta_ \mathrm{up}$79 Up_theta = $\theta_{up}$ 80 80 M_theta = $\theta_M$ 81 81 M_phi = $\phi_M$ -
doc/guide/pd/polydispersity.rst
r1f058ea rf8a2baa 95 95 \exp\left(-\frac{(x - \bar x)^2}{2\sigma^2}\right) 96 96 97 where $\bar x$ is the mean of the distribution and *Norm* is a normalization 98 factorwhich is determined during the numerical calculation.97 where $\bar x$ is the mean of the distribution and *Norm* is a normalization factor 98 which is determined during the numerical calculation. 99 99 100 100 The polydispersity is … … 122 122 during the numerical calculation. 123 123 124 The median value for the distribution will be the value given for the 125 respectivesize parameter, for example, *radius=60*.124 The median value for the distribution will be the value given for the respective 125 size parameter, for example, *radius=60*. 126 126 127 127 The polydispersity is given by $\sigma$ … … 208 208 209 209 Many commercial Dynamic Light Scattering (DLS) instruments produce a size 210 polydispersity parameter, sometimes even given the symbol $p$ \ !This210 polydispersity parameter, sometimes even given the symbol $p$ This 211 211 parameter is defined as the relative standard deviation coefficient of 212 212 variation of the size distribution and is NOT the same as the polydispersity -
doc/guide/resolution.rst
r1f058ea r30b60d2 17 17 resolution contribution into a model calculation/simulation (which by definition 18 18 will be exact) to make it more representative of what has been measured 19 experimentally - a process called *smearing*. Sasmodels does the latter.19 experimentally - a process called *smearing*. sasmodels does the latter. 20 20 21 21 Both smearing and desmearing rely on functions to describe the resolution 22 effect. Sasmodels provides three smearing algorithms:22 effect. sasmodels provides three smearing algorithms: 23 23 24 24 * *Slit Smearing* … … 99 99 100 100 For discrete $q$ values, at the $q$ values of the data points and at the $q$ 101 values extended up to $q_N = q_i + \Delta q_ u$ the smeared101 values extended up to $q_N = q_i + \Delta q_v$ the smeared 102 102 intensity can be approximately calculated as 103 103 -
doc/guide/scripting.rst
r4aa5dce r2e66ef5 69 69 $ bumps example/model.py --preview 70 70 71 Note that bumps and sasmodels are included as part of the SasView72 distribution. On windows, bumps can be called from the cmd prompt73 as follows::74 75 SasViewCom bumps.cli example/model.py --preview76 77 71 Using sasmodels directly 78 72 ======================== … … 111 105 plt.loglog(q, Iq) 112 106 plt.show() 113 114 On windows, this can be called from the cmd prompt using sasview as::115 116 SasViewCom example/cylinder_eval.py -
example/oriented_usans.py
r74b0495 r1cd24b4 6 6 7 7 # Spherical particle data, not ellipsoids 8 sans, usans = load_data(' latex_smeared.xml', index='all')8 sans, usans = load_data('../../sasview/sasview/test/1d_data/latex_smeared.xml') 9 9 usans.qmin, usans.qmax = np.min(usans.x), np.max(usans.x) 10 10 usans.mask = (usans.x < 0.0) -
example/simul_fit.py
r74b0495 r1a4d4c0 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 # To Sasview/documents/scripts 5 1 6 from bumps.names import * 2 7 from sasmodels.core import load_model … … 4 9 from sasmodels.data import load_data, plot_data 5 10 6 # latex data, same sample usans and sans7 # particles radius ~2300, uniform dispersity8 datasets = load_data('latex_smeared.xml', index='all')9 #[print(data) for data in datasets]10 11 11 # A single sphere model to share between the datasets. We will use 12 # FreeVariables below to set the parameters that are independent between 13 # the datasets. 14 kernel = load_model('sphere') 15 pars = dict(scale=0.01, background=0.0, sld=5.0, sld_solvent=0.0, radius=1500., 16 #radius_pd=0.1, radius_pd_n=35, 17 ) 12 """ IMPORT THE DATA USED """ 13 datafiles = ['latex_smeared_out_0.txt', 'latex_smeared_out_1.txt'] 14 datasets = [load_data(el) for el in datafiles] 15 16 for data in datasets: 17 data.qmin = 0.0 18 data.qmax = 10.0 19 20 #sphere model 21 kernel = load_model('sphere', dtype="single") 22 pars = dict(scale=0.01, background=0.0, sld=1.0, sld_solvent=6.0, radius=1500.) 18 23 model = Model(kernel, **pars) 24 model.radius.range(0, inf) 25 #model.background.range(-inf, inf) 26 #model.scale.range(0, inf) 27 model.sld.range(-inf, inf) 28 model.sld_solvent.range(-inf, inf) 19 29 20 # radius and polydispersity (if any) are shared21 model.radius.range(0, inf)22 #model.radius_pd.range(0, 1)23 24 # Contrast and dilution are the same for both measurements, but are not25 # separable with a single measurement (i.e., I(q) ~ F(q) contrast^2 Vf),26 # so fit one of scale, sld or solvent sld. With absolute scaling from27 # data reduction, can use the same parameter for both datasets.28 model.scale.range(0, inf)29 #model.sld.range(-inf, inf)30 #model.sld_solvent.range(-inf, inf)31 32 # Background is different for sans and usans so set it as a free variable33 # in the model.34 30 free = FreeVariables( 35 names=[data. run[0]for data in datasets],31 names=[data.filename for data in datasets], 36 32 background=model.background, 33 scale=model.scale, 37 34 ) 38 35 free.background.range(-inf, inf) 36 free.scale.range(0, inf) 39 37 40 # Note: can access the parameters for the individual models using 41 # free.background[0] and free.background[1], setting constraints or 42 # ranges as appropriate. 43 44 # For more complex systems where different datasets require independent models, 45 # separate models can be defined, with parameters tied together using 46 # constraint expressions. For example, the following could be used to fit 47 # data set 1 to spheres and data set 2 to cylinders of the same volume: 48 # model1 = Model(load_model('sphere')) 49 # model2 = Model(load_model('cylinder')) 50 # model1.sld = model2.sld 51 # model1.sld_solvent = model2.sld_solvent 52 # model1.scale = model2.scale 53 # # set cylinders and spheres to the same volume 54 # model1.radius = (3/4*model2.radius**2*model2.length)**(1/3) 55 # model1.background.range(0, 2) 56 # model2.background.range(0, 2) 57 58 # Setup the experiments, sharing the same model across all datasets. 59 M = [Experiment(data=data, model=model, name=data.run[0]) for data in datasets] 38 M = [Experiment(data=data, model=model) for data in datasets] 60 39 61 40 problem = FitProblem(M, freevars=free) 41 42 print(problem._parameters) -
sasmodels/bumps_model.py
r74b0495 r3330bb4 133 133 """ 134 134 _cache = None # type: Dict[str, np.ndarray] 135 def __init__(self, data, model, cutoff=1e-5 , name=None):135 def __init__(self, data, model, cutoff=1e-5): 136 136 # type: (Data, Model, float) -> None 137 137 # remember inputs so we can inspect from outside 138 self.name = data.filename if name is None else name139 138 self.model = model 140 139 self.cutoff = cutoff … … 205 204 """ 206 205 data, theory, resid = self._data, self.theory(), self.residuals() 207 # TODO: hack to display oriented usans 2-D pattern 208 Iq_calc = self.Iq_calc if isinstance(self.Iq_calc, tuple) else None 209 plot_theory(data, theory, resid, view, Iq_calc=Iq_calc) 206 plot_theory(data, theory, resid, view, Iq_calc=self.Iq_calc) 210 207 211 208 def simulate_data(self, noise=None): -
sasmodels/compare.py
rb76191e r765eb0e 533 533 % (pd, n, nsigma, nsigma, pdtype) 534 534 if M0 != 0.: 535 line += " M0:%.3f m theta:%.1f mphi:%.1f" % (M0, mtheta, mphi)535 line += " M0:%.3f mphi:%.1f mtheta:%.1f" % (M0, mphi, mtheta) 536 536 return line 537 537 -
sasmodels/data.py
r09141ff r630156b 44 44 Data = Union["Data1D", "Data2D", "SesansData"] 45 45 46 def load_data(filename , index=0):46 def load_data(filename): 47 47 # type: (str) -> Data 48 48 """ … … 55 55 filename, indexstr = filename[:-1].split('[') 56 56 index = int(indexstr) 57 else: 58 index = None 57 59 datasets = loader.load(filename) 58 if not datasets: # None or []60 if datasets is None: 59 61 raise IOError("Data %r could not be loaded" % filename) 60 62 if not isinstance(datasets, list): 61 63 datasets = [datasets] 62 for data in datasets: 63 if hasattr(data, 'x'): 64 data.qmin, data.qmax = data.x.min(), data.x.max() 65 data.mask = (np.isnan(data.y) if data.y is not None 66 else np.zeros_like(data.x, dtype='bool')) 67 elif hasattr(data, 'qx_data'): 68 data.mask = ~data.mask 69 return datasets[index] if index != 'all' else datasets 64 if index is None and len(datasets) > 1: 65 raise ValueError("Need to specify filename[index] for multipart data") 66 data = datasets[index if index is not None else 0] 67 if hasattr(data, 'x'): 68 data.qmin, data.qmax = data.x.min(), data.x.max() 69 data.mask = (np.isnan(data.y) if data.y is not None 70 else np.zeros_like(data.x, dtype='bool')) 71 elif hasattr(data, 'qx_data'): 72 data.mask = ~data.mask 73 return data 70 74 71 75
Note: See TracChangeset
for help on using the changeset viewer.