# Changeset eeb772e in sasmodels

Ignore:
Timestamp:
Apr 1, 2019 10:54:03 AM (4 months ago)
Parents:
3448301 (diff), 7eb2a4d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
Paul Kienzle <pkienzle@…> (04/01/19 10:54:03)
git-committer:
Message:
Files:
17 edited

Unmodified
Removed
• ## sasmodels/mixture.py

 rb297ba9 combined_pars.append(p) parameters = ParameterTable(combined_pars) # Allow for the scenario in which each component has all its PD parameters # active simultaneously.  details.make_details() will throw an error if # too many are used from any one component. parameters.max_pd = sum(part.parameters.max_pd for part in parts)
• ## sasmodels/modelinfo.py

 ra34b811 processed.append(parse_parameter(*p)) partable = ParameterTable(processed) partable.check_angles() partable.check_angles(strict=True) return partable # properties, such as default=0.0 for structure factor backgrounds. self.common_parameters = [Parameter(*p) for p in COMMON_PARAMETERS] self.kernel_parameters = parameters self._set_vector_lengths() self.pd_2d = set(p.name for p in self.call_parameters if p.polydisperse) # Final checks self.check_duplicates() self.check_angles() def set_zero_background(self): """ self.defaults = self._get_defaults() def check_angles(self): def check_angles(self, strict=False): """ Check that orientation angles are theta, phi and possibly psi. *strict* should be True when checking a parameter table defined in a model file, but False when checking from mixture models, etc., where the parameters aren't being passed to a calculator directly. """ theta = phi = psi = -1 if p.type != 'orientation': raise TypeError("psi must be an orientation parameter") elif p.type == 'orientation': elif strict and p.type == 'orientation': raise TypeError("only theta, phi and psi can be orientation parameters") if theta >= 0 and phi >= 0: if psi >= 0 and psi != phi+1: raise TypeError("psi must follow phi") if (psi >= 0 and psi != last_par) or (psi < 0 and phi != last_par): # TODO: Why must theta/phi/psi be at the end?  Consistency only? if strict and phi != last_par and psi != last_par: raise TypeError("orientation parameters must appear at the " "end of the parameter table") elif theta >= 0 or phi >= 0 or psi >= 0: raise TypeError("oriented shapes must have both theta and phi and maybe psi") def check_duplicates(self): """ Check for duplicate parameter names """ checked, dups = set(), set() for p in self.call_parameters: if p.id in checked: dups.add(p.id) else: checked.add(p.id) if dups: raise TypeError("Duplicate parameters: {}" .format(", ".join(sorted(dups)))) def __getitem__(self, key):
• ## sasmodels/product.py

 r065d77d To use it, first load form factor P and structure factor S, then create *make_product_info(P, S)*. The P@S models is somewhat complicated because there are many special parameters that need to be handled in particular ways.  Much of the code is used to figure out what special parameters we have, where to find them in the P@S model inputs and how to distribute them to the underlying P and S model calculators. The parameter packet received by the P@S is a :class:details.CallDetails structure along with a data vector. The CallDetails structure indicates which parameters are polydisperse, the length of the distribution, and where to find it in the data vector.  The distributions are ordered from longest to shortest, with length 1 distributions filling out the distribution set.  That way the kernel calculator doesn't have to check if it needs another nesting level since it is always there.  The data vector consists of a list of target values for the parameters, followed by a concatenation of the distribution values, and then followed by a concatenation of the distribution weights. Given the combined details and data for P@S, we must decompose them in to details for P and details for S separately, which unfortunately requires intimate knowledge of the data structures and tricky code. The special parameters are: * *scale* and *background*: First two parameters of the value list in each of P, S and P@S. When decomposing P@S parameters, ignore *scale* and *background*, instead using 1 and 0 for the first two slots of both P and S. After calling P and S individually, the results are combined as :code:volfraction*scale*P*S + background.  The *scale* and *background* do not show up in the polydispersity structure so they are easy to handle. * *volfraction*: Always the first parameter of S, but it may also be in P. If it is in P, then *P.volfraction* is used in the combined P@S list, and *S.volfraction* is elided, otherwise *S.volfraction* is used. If we are using *volfraction* from P we can treat it like all the other P parameters when calling P, but when calling S we need to insert the *P.volfraction* into data vector for S and assign a slot of length 1 in the distribution. Because we are using the original layout of the distribution vectors from P@S, but copying it into private data vectors for S and P, we are free to "borrow" a P slots to store the missing *S.volfraction* distribution.  We use the *P.volfraction* slot itself but any slot will work. For hollow shapes, *volfraction* represents the volume fraction of material but S needs the volume fraction enclosed by the shape. The answer is to scale the user specified volume fraction by the form:shell ratio computed from the average form volume and average shell volume returned from P. Use the original *volfraction* divided by *shell_volume* to compute the number density, and scale P@S by that to get absolute scaling on the final *I(q)*. The *scale* for P@S should therefore usually be one. * *radius_effective*: Always the second parameter of S and always part of P@S, but never in P. The value may be calculated using *P.radius_effective()* or it may be set to the *radius_effective* value in P@S, depending on *radius_effective_mode*.  If part of S, the value may be polydisperse. If calculated by P, then it will be the weighted average of effective radii computed for the polydisperse shape parameters. * *structure_factor_mode* If P@S supports beta approximation (i.e., if it has the *Fq* function that returns and ), then *structure_factor_mode* will be added to the P@S parameters right after the S parameters.  This mode may be 0 for the monodisperse approximation or 1 for the beta approximation.  We will add more values here as we implemented more complicated operations, but for now P and S must be computed separately.  If beta, then we return *I = scale volfrac/volume ( + ^2 (S-1)) + background*. If not beta then return *I = scale/volume P S + background* .  In both cases, return the appropriate immediate values. * *radius_effective_mode* If P defines the *radius_effective* function (and therefore *P.info.radius_effective_modes* is a list of effective radius modes), then *radius_effective_mode* will be the final parameter in P@S.  Mode will be zero if *radius_effective* is defined by the user using the S parameter; any other value and the *radius_effective* parameter will be filled in from the value computed in P.  In the latter case, the polydispersity information for *S.radius_effective* will need to be suppressed, with pd length set to 1, the first value set to the effective radius and the first weight set to 1.  Do this after composing the S data vector so the inputs are left untouched. * *regular parameters* The regular P parameters form a block of length *P.info.npars* at the start of the data vector (after scale and background).  These will be followed by *S.effective_radius*, and *S.volfraction* (if *P.volfraction* is absent), and then the regular S parameters.  The P and S blocks can be copied as a group into the respective P and S data vectors. We can copy the distribution value and weight vectors untouched to both the P and S data vectors since they are referenced by offset and length. We can update the radius_effective slots in the P data vector with *P.radius_effective()* if needed. * *magnetic parameters* For each P parameter that is an SLD there will be a set of three magnetic parameters tacked on to P@S after the regular P and S and after the special *structure_factor_mode* and *radius_effective_mode*.  These can be copied as a group after the regular P parameters.  There won't be any magnetic S parameters. """ from __future__ import print_function, division if not s_info.parameters.magnetism_index == []: raise TypeError("S should not have SLD parameters") if RADIUS_ID in p_info.parameters: raise TypeError("P should not have {}".format(RADIUS_ID)) p_id, p_name, p_pars = p_info.id, p_info.name, p_info.parameters s_id, s_name, s_pars = s_info.id, s_info.name, s_info.parameters # Create list of parameters for the combined model.  If there # are any names in P that overlap with those in S, modify the name in S # to distinguish it. p_has_volfrac = VOLFRAC_ID in p_info.parameters # Create list of parameters for the combined model.  If a name in # S overlaps a name in P, tag the S parameter name to distinguish it. # If the tagged name also collides it will be caught by the parameter # table builder.  Similarly if any special names are abused.  Need the # pairs to create the translation table for random model generation. p_set = set(p.id for p in p_pars.kernel_parameters) s_list = [(_tag_parameter(par) if par.id in p_set else par) for par in s_pars.kernel_parameters] # Check if still a collision after renaming.  This could happen if for # example S has volfrac and P has both volfrac and volfrac_S. if any(p.id in p_set for p in s_list): raise TypeError("name collision: P has P.name and P.name_S while S has S.name") # make sure effective radius is not a polydisperse parameter in product s_list[0] = copy(s_list[0]) s_list[0].polydisperse = False translate_name = dict((old.id, new.id) for old, new in zip(s_pars.kernel_parameters, s_list)) s_pairs = [(par, (_tag_parameter(par) if par.id in p_set else par)) for par in s_pars.kernel_parameters # Note: exclude volfraction from s_list if volfraction in p if par.id != VOLFRAC_ID or not p_has_volfrac] s_list = [pair[0] for pair in s_pairs] # Build combined parameter table combined_pars = p_pars.kernel_parameters + s_list + make_extra_pars(p_info) parameters = ParameterTable(combined_pars) parameters.max_pd = p_pars.max_pd + s_pars.max_pd # Allow for the scenario in which each component has all its PD parameters # active simultaneously.  details.make_details() will throw an error if # too many are used from any one component. parameters.Pumax_pd = p_pars.max_pd + s_pars.max_pd # TODO: does user-defined polydisperse S.radius_effective make sense? # make sure effective radius is not a polydisperse parameter in product #s_list[0] = copy(s_list[0]) #s_list[0].polydisperse = False s_translate = {old.id: new.id for old, new in s_pairs} def random(): """Random set of model parameters for product model""" combined_pars = p_info.random() s_names = set(par.id for par in s_pars.kernel_parameters) combined_pars.update((translate_name[k], v) combined_pars.update((s_translate[k], v) for k, v in s_info.random().items() if k in s_names) if k in s_translate) return combined_pars def _intermediates( F1,               # type: np.ndarray F2,               # type: np.ndarray F,                # type: np.ndarray Fsq,              # type: np.ndarray S,                # type: np.ndarray scale,            # type: float # TODO: 2. consider implications if there are intermediate results in P(Q) parts = OrderedDict(( ("P(Q)", scale*F2), ("P(Q)", scale*Fsq), ("S(Q)", S), ("beta(Q)", F1**2 / F2), ("S_eff(Q)", 1 + (F1**2 / F2)*(S-1)), ("beta(Q)", F**2 / Fsq), ("S_eff(Q)", 1 + (F**2 / Fsq)*(S-1)), ("effective_radius", radius_effective), ("radius_effective", radius_effective), # ("I(Q)", scale*(F2 + (F1**2)*(S-1)) + bg), # ("I(Q)", scale*(Fsq + (F**2)*(S-1)) + bg), )) else: parts = OrderedDict(( ("P(Q)", scale*F2), ("P(Q)", scale*Fsq), ("S(Q)", S), ("effective_radius", radius_effective), self.results = []  # type: List[np.ndarray] # Find index of volfraction parameter in parameter list for k, p in enumerate(model_info.parameters.call_parameters): if p.id == VOLFRAC_ID: self._volfrac_index = k break else: raise RuntimeError("no %s parameter in %s"%(VOLFRAC_ID, self)) p_info, s_info = self.info.composition[1] p_npars = p_info.parameters.npars s_npars = s_info.parameters.npars have_beta_mode = p_info.have_Fq have_er_mode = p_info.radius_effective_modes is not None volfrac_in_p = self._volfrac_index < p_npars + 2  # scale & background # Slices into the details length/offset structure for P@S. # Made complicated by the possibly missing volfraction in S. self._p_detail_slice = slice(0, p_npars) self._s_detail_slice = slice(p_npars, p_npars+s_npars-volfrac_in_p) self._volfrac_in_p = volfrac_in_p # P block from data vector, without scale and background first_p = 2 last_p = p_npars + 2 self._p_value_slice = slice(first_p, last_p) # radius_effective is the first parameter in S from the data vector. self._er_index = last_p # S block from data vector, without scale, background, volfrac or er. first_s = last_p + 2 - volfrac_in_p last_s = first_s + s_npars - 2 self._s_value_slice = slice(first_s, last_s) # S distribution block in S data vector starts after all S values self._s_dist_slice = slice(2 + s_npars, None) # structure_factor_mode is the first parameter after P and S.  Skip # 2 for scale and background, and subtract 1 in case there is no # volfraction in S. self._beta_mode_index = last_s if have_beta_mode else 0 # radius_effective_mode is the second parameter after P and S # unless structure_factor_mode isn't available, in which case it # is first. self._er_mode_index = last_s + have_beta_mode if have_er_mode else 0 # Magnetic parameters are after everything else.  If they exist, # they will only be for form factor P, not structure factor S. first_mag = last_s + have_beta_mode + have_er_mode mag_pars = 3*p_info.parameters.nmagnetic last_mag = first_mag + (mag_pars + 3 if mag_pars else 0) self._magentic_slice = slice(first_mag, last_mag) def Iq(self, call_details, values, cutoff, magnetic): # type: (CallDetails, np.ndarray, float, bool) -> np.ndarray p_info, s_info = self.info.composition[1] p_npars = p_info.parameters.npars p_length = call_details.length[:p_npars] p_offset = call_details.offset[:p_npars] s_npars = s_info.parameters.npars s_length = call_details.length[p_npars:p_npars+s_npars] s_offset = call_details.offset[p_npars:p_npars+s_npars] # Beta mode parameter is the first parameter after P and S parameters have_beta_mode = p_info.have_Fq beta_mode_offset = 2+p_npars+s_npars beta_mode = (values[beta_mode_offset] > 0) if have_beta_mode else False if beta_mode and self.p_kernel.dim == '2d': raise NotImplementedError("beta not yet supported for 2D") # R_eff type parameter is the second parameter after P and S parameters # unless the model doesn't support beta mode, in which case it is first have_radius_type = p_info.radius_effective_modes is not None #print(p_npars,s_npars) radius_type_offset = 2+p_npars+s_npars + (1 if have_beta_mode else 0) #print(values[radius_type_offset]) radius_type = int(values[radius_type_offset]) if have_radius_type else 0 # Retrieve the volume fraction, which is the second of the # 'S' parameters in the parameter list, or 2+np in 0-origin, # as well as the scale and background. volfrac = values[3+p_npars] # Retrieve values from the data vector scale, background = values[0], values[1] # if there are magnetic parameters, they will only be on the # form factor P, not the structure factor S. nmagnetic = len(self.info.parameters.magnetism_index) if nmagnetic: spin_index = self.info.parameters.npars + 2 magnetism = values[spin_index: spin_index+3+3*nmagnetic] else: magnetism = [] volfrac = values[self._volfrac_index] er_mode = (int(values[self._er_mode_index]) if self._er_mode_index > 0 else 0) beta_mode = (values[self._beta_mode_index] > 0 if self._beta_mode_index > 0 else False) nvalues = self.info.parameters.nvalues nweights = call_details.num_weights weights = values[nvalues:nvalues + 2*nweights] # Can't do 2d and beta_mode just yet if beta_mode and self.p_kernel.dim == '2d': raise NotImplementedError("beta not yet supported for 2D") # Construct the calling parameters for P. p_length = call_details.length[self._p_detail_slice] p_offset = call_details.offset[self._p_detail_slice] p_details = make_details(p_info, p_length, p_offset, nweights) p_values = [ [1., 0.], # scale=1, background=0, values[2:2+p_npars], magnetism, values[self._p_value_slice], values[self._magentic_slice], weights] spacer = (32 - sum(len(v) for v in p_values)%32)%32 p_values = np.hstack(p_values).astype(self.p_kernel.dtype) # Call the form factor kernel to compute and . # If the model doesn't support Fq the returned will be None. F, Fsq, radius_effective, shell_volume, volume_ratio \ = self.p_kernel.Fq(p_details, p_values, cutoff, magnetic, er_mode) # TODO: async call to the GPU # Construct the calling parameters for S. if radius_type > 0: # If R_eff comes from form factor, make sure it is monodisperse. # weight is set to 1 later, after the value array is created s_length = call_details.length[self._s_detail_slice] s_offset = call_details.offset[self._s_detail_slice] if self._volfrac_in_p: # Volfrac is in P and missing from S so insert a slot for it.  Say # the distribution is length 1 and use the slot for volfraction # from the P distribution. s_length = np.insert(s_length, 1, 1) s_offset = np.insert(s_offset, 1, p_offset[self._volfrac_index - 2]) if er_mode > 0: # If effective_radius comes from P, make sure it is monodisperse. # Weight is set to 1 later, after the value array is created s_length[0] = 1 s_details = make_details(s_info, s_length, s_offset, nweights) s_values = [ [1., 0.], # scale=1, background=0, values[2+p_npars:2+p_npars+s_npars], [1., # scale=1 0., # background=0, values[self._er_index], # S.radius_effective; may be replaced by P 0.], # volfraction; will be replaced by volfrac * volume_ratio # followed by S parameters after effective_radius and volfraction values[self._s_value_slice], weights, ] s_values = np.hstack(s_values).astype(self.s_kernel.dtype) # Call the form factor kernel to compute and . # If the model doesn't support Fq the returned will be None. F1, F2, radius_effective, shell_volume, volume_ratio = self.p_kernel.Fq( p_details, p_values, cutoff, magnetic, radius_type) # Call the structure factor kernel to compute S. # Plug R_eff from the form factor into structure factor parameters # and scale volume fraction by form:shell volume ratio. These changes #print("R_eff=%d:%g, volfrac=%g, volume ratio=%g" #      % (radius_type, radius_effective, volfrac, volume_ratio)) if radius_type > 0: s_dist = s_values[self._s_dist_slice] if er_mode > 0: # set the value to the model R_eff and set the weight to 1 s_values[2] = s_values[2+s_npars+s_offset[0]] = radius_effective s_values[2+s_npars+s_offset[0]+nweights] = 1.0 s_values[3] = s_values[2+s_npars+s_offset[1]] = volfrac*volume_ratio s_values[2] = s_dist[s_offset[0]] = radius_effective s_dist[s_offset[0]+nweights] = 1.0 s_values[3] = s_dist[s_offset[1]] = volfrac*volume_ratio s_dist[s_offset[1]+nweights] = 1.0 # Call the structure factor kernel to compute S. S = self.s_kernel.Iq(s_details, s_values, cutoff, False) #print("P", Fsq[:10]) #print("S", S[:10]) #print(radius_effective, volfrac*volume_ratio) # Combine form factor and structure factor #print("beta", beta_mode, F, Fsq, S) PS = Fsq + F**2*(S-1) if beta_mode else Fsq*S # Determine overall scale factor. Hollow shapes are weighted by # shell_volume, so that is needed for volume normalization.  For # solid shapes we can use shell_volume as well since it is equal # to form volume. combined_scale = scale*volfrac/shell_volume # Combine form factor and structure factor #print("beta", beta_mode, F1, F2, S) PS = F2 + F1**2*(S-1) if beta_mode else F2*S final_result = combined_scale*PS + background # shell_volume, so that is needed for number density estimation. # For solid shapes we can use shell_volume as well since it is # equal to form volume.  If P already has a volfraction parameter, # then assume that it is already on absolute scale, and don't # include volfrac in the combined_scale. combined_scale = scale*(volfrac if not self._volfrac_in_p else 1.0) final_result = combined_scale/shell_volume*PS + background # Capture intermediate values so user can see them.  These are # the results directly rather than through a lazy evaluator. self.results = lambda: _intermediates( F1, F2, S, combined_scale, radius_effective, beta_mode) F, Fsq, S, combined_scale, radius_effective, beta_mode) return final_result
• ## doc/guide/index.rst

 rda5536f pd/polydispersity.rst resolution.rst plugin.rst fitting_sq.rst magnetism/magnetism.rst orientation/orientation.rst sesans/sans_to_sesans.rst sesans/sesans_fitting.rst plugin.rst scripting.rst refs.rst
• ## doc/guide/pd/polydispersity.rst

 rd089a00 -------------------------------------------- For some models we can calculate the average intensity for a population of particles that possess size and/or orientational (ie, angular) distributions. In SasView we call the former *polydispersity* but use the parameter *PD* to parameterise both. In other words, the meaning of *PD* in a model depends on For some models we can calculate the average intensity for a population of particles that possess size and/or orientational (ie, angular) distributions. In SasView we call the former *polydispersity* but use the parameter *PD* to parameterise both. In other words, the meaning of *PD* in a model depends on the actual parameter it is being applied too. The resultant intensity is then normalized by the average particle volume such The resultant intensity is then normalized by the average particle volume such that P(q) = \text{scale} \langle F^* F \rangle / V + \text{background} where $F$ is the scattering amplitude and $\langle\cdot\rangle$ denotes an where $F$ is the scattering amplitude and $\langle\cdot\rangle$ denotes an average over the distribution $f(x; \bar x, \sigma)$, giving .. math:: P(q) = \frac{\text{scale}}{V} \int_\mathbb{R} P(q) = \frac{\text{scale}}{V} \int_\mathbb{R} f(x; \bar x, \sigma) F^2(q, x)\, dx + \text{background} Each distribution is characterized by a center value $\bar x$ or $x_\text{med}$, a width parameter $\sigma$ (note this is *not necessarily* the standard deviation, so read the description of the distribution carefully), the number of sigmas $N_\sigma$ to include from the tails of the distribution, and the number of points used to compute the average. The center of the distribution is set by the value of the model parameter. The distribution width applied to *volume* (ie, shape-describing) parameters is relative to the center value such that $\sigma = \mathrm{PD} \cdot \bar x$. However, the distribution width applied to *orientation* parameters is just $\sigma = \mathrm{PD}$. the standard deviation, so read the description carefully), the number of sigmas $N_\sigma$ to include from the tails of the distribution, and the number of points used to compute the average. The center of the distribution is set by the value of the model parameter. The meaning of a polydispersity parameter *PD* (not to be confused with a molecular weight distributions in polymer science) in a model depends on the type of parameter it is being applied too. The distribution width applied to *volume* (ie, shape-describing) parameters is relative to the center value such that $\sigma = \mathrm{PD} \cdot \bar x$. However, the distribution width applied to *orientation* (ie, angle-describing) parameters is just $\sigma = \mathrm{PD}$. $N_\sigma$ determines how far into the tails to evaluate the distribution, Users should note that the averaging computation is very intensive. Applying polydispersion and/or orientational distributions to multiple parameters at the same time, or increasing the number of points in the distribution, will require patience! However, the calculations are generally more robust with polydispersion and/or orientational distributions to multiple parameters at the same time, or increasing the number of points in the distribution, will require patience! However, the calculations are generally more robust with more data points or more angles. *  *Schulz Distribution* *  *Array Distribution* *  *User-defined Distributions* These are all implemented as *number-average* distributions. Additional distributions are under consideration. **Beware: when the Polydispersity & Orientational Distribution panel in SasView is** **This may not be suitable. See Suggested Applications below.** .. note:: In 2009 IUPAC decided to introduce the new term 'dispersity' to replace the term 'polydispersity' (see Pure Appl. Chem., (2009), 81(2), 351-353 _ in order to make the terminology describing distributions of chemical properties unambiguous. However, these terms are unrelated to the proportional size distributions and orientational distributions used in .. note:: In 2009 IUPAC decided to introduce the new term 'dispersity' to replace the term 'polydispersity' (see Pure Appl. Chem., (2009), 81(2), 351-353 _ in order to make the terminology describing distributions of chemical properties unambiguous. However, these terms are unrelated to the proportional size distributions and orientational distributions used in SasView models. or angular orientations, consider using the Gaussian or Boltzmann distributions. If applying polydispersion to parameters describing angles, use the Uniform distribution. Beware of using distributions that are always positive (eg, the If applying polydispersion to parameters describing angles, use the Uniform distribution. Beware of using distributions that are always positive (eg, the Lognormal) because angles can be negative! The array distribution allows a user-defined distribution to be applied. The array distribution provides a very simple means of implementing a user- defined distribution, but without any fittable parameters. Greater flexibility is conferred by the user-defined distribution. .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ User-defined Distributions ^^^^^^^^^^^^^^^^^^^^^^^^^^ You can also define your own distribution by creating a python file defining a *Distribution* object with a *_weights* method.  The *_weights* method takes *center*, *sigma*, *lb* and *ub* as arguments, and can access *self.npts* and *self.nsigmas* from the distribution.  They are interpreted as follows: * *center* the value of the shape parameter (for size dispersity) or zero if it is an angular dispersity.  This parameter may be fitted. * *sigma* the width of the distribution, which is the polydispersity parameter times the center for size dispersity, or the polydispersity parameter alone for angular dispersity.  This parameter may be fitted. * *lb*, *ub* are the parameter limits (lower & upper bounds) given in the model definition file.  For example, a radius parameter has *lb* equal to zero.  A volume fraction parameter would have *lb* equal to zero and *ub* equal to one. * *self.nsigmas* the distance to go into the tails when evaluating the distribution.  For a two parameter distribution, this value could be co-opted to use for the second parameter, though it will not be available for fitting. * *self.npts* the number of points to use when evaluating the distribution. The user will adjust this to trade calculation time for accuracy, but the distribution code is free to return more or fewer, or use it for the third parameter in a three parameter distribution. As an example, the code following wraps the Laplace distribution from scipy stats:: import numpy as np from scipy.stats import laplace from sasmodels import weights class Dispersion(weights.Dispersion): r""" Laplace distribution .. math:: w(x) = e^{-\sigma |x - \mu|} """ type = "laplace" default = dict(npts=35, width=0, nsigmas=3)  # default values def _weights(self, center, sigma, lb, ub): x = self._linspace(center, sigma, lb, ub) wx = laplace.pdf(x, center, sigma) return x, wx You can plot the weights for a given value and width using the following:: from numpy import inf from matplotlib import pyplot as plt from sasmodels import weights # reload the user-defined weights weights.load_weights() x, wx = weights.get_weights('laplace', n=35, width=0.1, nsigmas=3, value=50, limits=[0, inf], relative=True) # plot the weights plt.interactive(True) plt.plot(x, wx, 'x') The *self.nsigmas* and *self.npts* parameters are normally used to control the accuracy of the distribution integral. The *self._linspace* function uses them to define the *x* values (along with the *center*, *sigma*, *lb*, and *ub* which are passed as parameters).  If you repurpose npts or nsigmas you will need to generate your own *x*.  Be sure to honour the limits *lb* and *ub*, for example to disallow a negative radius or constrain the volume fraction to lie between zero and one. To activate a user-defined distribution, put it in a file such as *distname.py* in the *SAS_WEIGHTS_PATH* folder.  This is defined with an environment variable, defaulting to:: SAS_WEIGHTS_PATH=~/.sasview/weights The weights path is loaded on startup.  To update the distribution definition in a running application you will need to enter the following python commands:: import sasmodels.weights sasmodels.weights.load_weights('path/to/distname.py') .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ Note about DLS polydispersity ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Several measures of polydispersity abound in Dynamic Light Scattering (DLS) and it should not be assumed that any of the following can be simply equated with Several measures of polydispersity abound in Dynamic Light Scattering (DLS) and it should not be assumed that any of the following can be simply equated with the polydispersity *PD* parameter used in SasView. The dimensionless **Polydispersity Index (PI)** is a measure of the width of the distribution of autocorrelation function decay rates (*not* the distribution of particle sizes itself, though the two are inversely related) and is defined by The dimensionless **Polydispersity Index (PI)** is a measure of the width of the distribution of autocorrelation function decay rates (*not* the distribution of particle sizes itself, though the two are inversely related) and is defined by ISO 22412:2017 as PI = \mu_{2} / \bar \Gamma^2 where $\mu_\text{2}$ is the second cumulant, and $\bar \Gamma^2$ is the where $\mu_\text{2}$ is the second cumulant, and $\bar \Gamma^2$ is the intensity-weighted average value, of the distribution of decay rates. PI = \sigma^2 / 2\bar \Gamma^2 where $\sigma$ is the standard deviation, allowing a **Relative Polydispersity (RP)** where $\sigma$ is the standard deviation, allowing a **Relative Polydispersity (RP)** to be defined as RP = \sigma / \bar \Gamma = \sqrt{2 \cdot PI} PI values smaller than 0.05 indicate a highly monodisperse system. Values PI values smaller than 0.05 indicate a highly monodisperse system. Values greater than 0.7 indicate significant polydispersity. The **size polydispersity P-parameter** is defined as the relative standard deviation coefficient of variation The **size polydispersity P-parameter** is defined as the relative standard deviation coefficient of variation .. math:: where $\nu$ is the variance of the distribution and $\bar R$ is the mean value of $R$. Here, the product $P \bar R$ is *equal* to the standard value of $R$. Here, the product $P \bar R$ is *equal* to the standard deviation of the Lognormal distribution.
• ## doc/guide/plugin.rst

 rdb1d9d5 ............. .. note:: Pure python models do not yet support direct computation of  or $^2$. Neither do they support orientational distributions or magnetism (use C models if these are required). For pure python models, define the *Iq* function:: is automatically scaled by *form_volume/shell_volume* prior to calling the structure factor. **Note: Pure python models do not yet support direct computation of the** **average of $F(q)$ and $F^2(q)$. Neither do they support orientational** **distributions or magnetism (use C models if these are required).** Embedded C Models
• ## sasmodels/compare.py

 rb297ba9 from . import core from . import weights from . import kerneldll from . import kernelcl from .direct_model import DirectModel, get_mesh from .generate import FLOAT_RE, set_integration_size from .weights import plot_weights # pylint: disable=unused-import === environment variables === -DSAS_MODELPATH=path sets directory containing custom models -DSAS_MODELPATH=~/.sasmodels/custom_models sets path to custom models -DSAS_WEIGHTS_PATH=~/.sasview/weights sets path to custom distributions -DSAS_OPENCL=vendor:device|cuda:device|none sets the target GPU device -DXDG_CACHE_HOME=~/.cache sets the pyopencl cache root (linux only) -DSAS_COMPILER=tinycc|msvc|mingw|unix sets the DLL compiler -DSAS_OPENMP=1 turns on OpenMP for the DLLs -DSAS_DLL_PATH=path sets the path to the compiled modules -DSAS_OPENMP=0 set to 1 to turn on OpenMP for the DLLs -DSAS_DLL_PATH=~/.sasmodels/compiled_models sets the DLL cache The interpretation of quad precision depends on architecture, and may model_info = base._kernel.info dim = base._kernel.dim plot_weights(model_info, get_mesh(model_info, base_pars, dim=dim)) weights.plot_weights(model_info, get_mesh(model_info, base_pars, dim=dim)) if opts['show_profile']: import pylab #import pprint; pprint.pprint(model_info) # Hack to load user-defined distributions; run through all parameters # and make sure any pd_type parameter is a defined distribution. if (any(p.endswith('pd_type') and v not in weights.MODELS for p, v in pars.items()) or any(p.endswith('pd_type') and v not in weights.MODELS for p, v in pars2.items())): weights.load_weights() if opts['show_pars']: if model_info.name != model_info2.name or pars != pars2:
• ## sasmodels/kernelcl.py

 ra34b811 ENV = None def reset_environment(): # type: () -> None """ Call to create a new OpenCL context, such as after a change to SAS_OPENCL. # type: () -> "GpuEnvironment" """ Return a new OpenCL context, such as after a change to SAS_OPENCL. """ global ENV ENV = GpuEnvironment() if use_opencl() else None return ENV def environment():
• ## sasmodels/models/__init__.py

 r2d81cfe """ 1D Modeling for SAS Model definition files ---------------------- The models below are grouped by type.  The list is a snapshot at a particular time and may be out of date. Models with pure form factor (all of which define *F(Q)*): :mod:barbell :mod:capped_cylinder :mod:core_multi_shell :mod:core_shell_bicelle :mod:core_shell_bicelle_elliptical :mod:core_shell_bicelle_elliptical_belt_rough :mod:core_shell_cylinder :mod:core_shell_ellipsoid :mod:core_shell_parallelepipied :mod:core_shell_sphere :mod:cylinder [limiting conditions (long rods, thin disks) not implemented] :mod:ellipsoid :mod:elliptical_cylinder :mod:fuzzy_sphere :mod:hollow_cylinder :mod:hollow_rectangular_prism :mod:hollow_rectangular_prism_thin_walls :mod:multilayer_vesicle :mod:onion :mod:parallelepiped :mod:rectangular_prism :mod:sphere :mod:spherical_sld :mod:triaxial_ellipsoid :mod:vesicle Models with local structure factor: :mod:flexible_cylinder :mod:flexible_cylinder_elliptical :mod:linear_pearls :mod:mono_gauss_coil :mod:pearl_necklace :mod:poly_gauss_coil :mod:polymer_micelle :mod:pringle :mod:raspberry :mod:stacked_disks :mod:star_polymer Models with long range structure factor: :mod:binary_hard_sphere :mod:bcc_paracrystal :mod:fcc_paracrystal :mod:fractal :mod:fractal_core_shell :mod:lamellar :mod:lamellar_hg :mod:lamellar_hg_stack_caille :mod:lamellar_stack_caille :mod:lamellar_stack_paracrystal :mod:mass_fractal :mod:mass_surface_fractal :mod:rpa :mod:sc_paracrystal :mod:surface_fractal Models which are pure structure factors:: :mod:hardsphere :mod:hayter_msa :mod:squarewell :mod:stickyhardsphere Other models: :mod:adsorbed_layer :mod:be_polyelectrolyte :mod:broad_peak :mod:correlation_length :mod:dab :mod:gauss_lorentz_gel :mod:gaussian_peak :mod:gel_fit :mod:guinier_porod :mod:guinier :mod:line :mod:lorentz :mod:peak_lorentz :mod:polymer_excl_volume :mod:porod :mod:power_law :mod:spinodal :mod:teubner_strey :mod:two_lorentzian :mod:unified_power_Rg """
• ## sasmodels/models/hardsphere.py

 rdb1d9d5 Earlier versions of SasView did not incorporate the so-called $\beta(q)$ ("beta") correction [1] for polydispersity and non-sphericity. This is only available in SasView versions 4.2.2 and higher. This is only available in SasView versions 5.0 and higher. radius_effective is the effective hard sphere radius.
• ## sasmodels/models/hayter_msa.py

 rdb1d9d5 Earlier versions of SasView did not incorporate the so-called $\beta(q)$ ("beta") correction [3] for polydispersity and non-sphericity. This is only available in SasView versions 4.2.2 and higher. This is only available in SasView versions 5.0 and higher. The salt concentration is used to compute the ionic strength of the solution

• ## sasmodels/models/squarewell.py

 rdb1d9d5 Earlier versions of SasView did not incorporate the so-called $\beta(q)$ ("beta") correction [2] for polydispersity and non-sphericity. This is only available in SasView versions 4.2.2 and higher. This is only available in SasView versions 5.0 and higher. The well width $(\lambda)$ is defined as multiples of the particle diameter
• ## sasmodels/models/stickyhardsphere.py

 rdb1d9d5 Earlier versions of SasView did not incorporate the so-called $\beta(q)$ ("beta") correction [3] for polydispersity and non-sphericity. This is only available in SasView versions 4.2.2 and higher. This is only available in SasView versions 5.0 and higher. In SasView the effective radius may be calculated from the parameters
• ## sasmodels/sasview_model.py

 ra34b811 from . import modelinfo from .details import make_kernel_args, dispersion_mesh # Hack: load in any custom distributions # Uses ~/.sasview/weights/*.py unless SASMODELS_WEIGHTS is set in the environ. # Override with weights.load_weights(pattern="/*.py") weights.load_weights() # pylint: disable=unused-import
• ## sasmodels/sesans.py

 rb297ba9 from numpy import pi  # type: ignore from scipy.special import j0 class SesansTransform(object): # transform arrays _H = None  # type: np.ndarray _H0 = None # type: np.ndarray _H = None   # type: np.ndarray _H0 = None  # type: np.ndarray def __init__(self, z, SElength, lam, zaccept, Rmax): def __init__(self, z, SElength, lam, zaccept, Rmax, log_spacing=1.0003): # type: (np.ndarray, float, float) -> None #import logging; logging.info("creating SESANS transform") self.q = z self.log_spacing = log_spacing self._set_hankel(SElength, lam, zaccept, Rmax) def _set_hankel(self, SElength, lam, zaccept, Rmax): # type: (np.ndarray, float, float) -> None # Force float32 arrays, otherwise run into memory problems on some machines SElength = np.asarray(SElength, dtype='float32') #Rmax = #value in text box somewhere in FitPage? SElength = np.asarray(SElength) q_max = 2*pi / (SElength[1] - SElength[0]) q_min = 0.1 * 2*pi / (np.size(SElength) * SElength[-1]) q = np.arange(q_min, q_max, q_min, dtype='float32') dq = q_min q = np.exp(np.arange(np.log(q_min), np.log(q_max), np.log(self.log_spacing))) H0 = np.float32(dq/(2*pi)) * q dq = np.diff(q) dq = np.insert(dq, 0, dq[0]) repq = np.tile(q, (SElength.size, 1)).T repSE = np.tile(SElength, (q.size, 1)) H = np.float32(dq/(2*pi)) * j0(repSE*repq) * repq H0 = dq/(2*pi) * q replam = np.tile(lam, (q.size, 1)) reptheta = np.arcsin(repq*replam/2*np.pi) H = np.outer(q, SElength) j0(H, out=H) H *= (dq * q / (2*pi)).reshape((-1, 1)) reptheta = np.outer(q, lam/(2*pi)) np.arcsin(reptheta, out=reptheta) mask = reptheta > zaccept H[mask] = 0
• ## sasmodels/weights.py

 rb297ba9 )) SAS_WEIGHTS_PATH = "~/.sasview/weights" def load_weights(pattern=None): # type: (str) -> None """ Load dispersion distributions matching the given glob pattern """ import logging import os import os.path import glob import traceback from .custom import load_custom_kernel_module if pattern is None: path = os.environ.get("SAS_WEIGHTS_PATH", SAS_WEIGHTS_PATH) pattern = os.path.join(path, "*.py") for filename in sorted(glob.glob(os.path.expanduser(pattern))): try: #print("loading weights from", filename) module = load_custom_kernel_module(filename) MODELS[module.Dispersion.type] = module.Dispersion except Exception as exc: logging.error(traceback.format_exc(exc)) def get_weights(disperser, n, width, nsigmas, value, limits, relative):
Note: See TracChangeset for help on using the changeset viewer.