Changes in / [052d4c5:f89ec96] in sasmodels


Ignore:
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • doc/guide/magnetism/magnetism.rst

    rbefe905 r4f5afc9  
    3939 
    4040.. math:: 
    41     -- &= (1-u_i)(1-u_f) \\ 
    42     -+ &= (1-u_i)(u_f) \\ 
    43     +- &= (u_i)(1-u_f) \\ 
    44     ++ &= (u_i)(u_f) 
     41    -- &= ((1-u_i)(1-u_f))^{1/4} \\ 
     42    -+ &= ((1-u_i)(u_f))^{1/4} \\ 
     43    +- &= ((u_i)(1-u_f))^{1/4} \\ 
     44    ++ &= ((u_i)(u_f))^{1/4} 
    4545 
    4646Ideally the experiment would measure the pure spin states independently and 
     
    104104| 2015-05-02 Steve King 
    105105| 2017-11-15 Paul Kienzle 
    106 | 2018-06-02 Adam Washington 
  • doc/guide/plugin.rst

    rf796469 r7e6bc45e  
    822822 
    823823        :code:`source = ["lib/Si.c", ...]` 
    824         (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/sas_Si.c>`_) 
     824        (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/Si.c>`_) 
    825825 
    826826    sas_3j1x_x(x): 
  • sasmodels/compare.py

    r65fbf7c r1fbadb2  
    645645 
    646646def make_data(opts): 
    647     # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] 
     647    # type: (Dict[str, Any]) -> Tuple[Data, np.ndarray] 
    648648    """ 
    649649    Generate an empty dataset, used with the model to set Q points 
     
    667667        if opts['zero']: 
    668668            q = np.hstack((0, q)) 
    669         # TODO: provide command line control of lambda and Delta lambda/lambda 
    670         #L, dLoL = 5, 0.14/np.sqrt(6)  # wavelength and 14% triangular FWHM 
    671         L, dLoL = 0, 0 
    672         data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) 
     669        data = empty_data1D(q, resolution=res) 
    673670        index = slice(None, None) 
    674671    return data, index 
     
    789786    base_n, comp_n = opts['count'] 
    790787    base_pars, comp_pars = opts['pars'] 
    791     base_data, comp_data = opts['data'] 
     788    data = opts['data'] 
    792789 
    793790    comparison = comp is not None 
     
    803800            print("%s t=%.2f ms, intensity=%.0f" 
    804801                  % (base.engine, base_time, base_value.sum())) 
    805         _show_invalid(base_data, base_value) 
     802        _show_invalid(data, base_value) 
    806803    except ImportError: 
    807804        traceback.print_exc() 
     
    815812                print("%s t=%.2f ms, intensity=%.0f" 
    816813                      % (comp.engine, comp_time, comp_value.sum())) 
    817             _show_invalid(base_data, comp_value) 
     814            _show_invalid(data, comp_value) 
    818815        except ImportError: 
    819816            traceback.print_exc() 
     
    869866    have_base, have_comp = (base_value is not None), (comp_value is not None) 
    870867    base, comp = opts['engines'] 
    871     base_data, comp_data = opts['data'] 
     868    data = opts['data'] 
    872869    use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 
    873870 
    874871    # Plot if requested 
    875872    view = opts['view'] 
    876     #view = 'log' 
    877873    if limits is None: 
    878874        vmin, vmax = np.inf, -np.inf 
     
    888884        if have_comp: 
    889885            plt.subplot(131) 
    890         plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) 
     886        plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 
    891887        plt.title("%s t=%.2f ms"%(base.engine, base_time)) 
    892888        #cbar_title = "log I" 
     
    895891            plt.subplot(132) 
    896892        if not opts['is2d'] and have_base: 
    897             plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) 
    898         plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) 
     893            plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 
     894        plot_theory(data, comp_value, view=view, use_data=use_data, limits=limits) 
    899895        plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 
    900896        #cbar_title = "log I" 
     
    912908            err[err > cutoff] = cutoff 
    913909        #err,errstr = base/comp,"ratio" 
    914         # Note: base_data only since base and comp have same q values (though 
    915         # perhaps different resolution), and we are plotting the difference 
    916         # at each q 
    917         plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 
     910        plot_theory(data, None, resid=err, view=errview, use_data=use_data) 
    918911        plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 
    919912        plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') 
     
    10821075        'qmax'      : 0.05, 
    10831076        'nq'        : 128, 
    1084         'res'       : '0.0', 
     1077        'res'       : 0.0, 
    10851078        'noise'     : 0.0, 
    10861079        'accuracy'  : 'Low', 
     
    11221115        elif arg.startswith('-q='): 
    11231116            opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 
    1124         elif arg.startswith('-res='):      opts['res'] = arg[5:] 
     1117        elif arg.startswith('-res='):      opts['res'] = float(arg[5:]) 
    11251118        elif arg.startswith('-noise='):    opts['noise'] = float(arg[7:]) 
    11261119        elif arg.startswith('-sets='):     opts['sets'] = int(arg[6:]) 
     
    11801173    if opts['qmin'] is None: 
    11811174        opts['qmin'] = 0.001*opts['qmax'] 
     1175    if opts['datafile'] is not None: 
     1176        data = load_data(os.path.expanduser(opts['datafile'])) 
     1177    else: 
     1178        data, _ = make_data(opts) 
    11821179 
    11831180    comparison = any(PAR_SPLIT in v for v in values) 
     
    12191216        opts['cutoff'] = [float(opts['cutoff'])]*2 
    12201217 
    1221     if PAR_SPLIT in opts['res']: 
    1222         opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 
    1223         comparison = True 
    1224     else: 
    1225         opts['res'] = [float(opts['res'])]*2 
    1226  
    1227     if opts['datafile'] is not None: 
    1228         data = load_data(os.path.expanduser(opts['datafile'])) 
    1229     else: 
    1230         # Hack around the fact that make_data doesn't take a pair of resolutions 
    1231         res = opts['res'] 
    1232         opts['res'] = res[0] 
    1233         data0, _ = make_data(opts) 
    1234         if res[0] != res[1]: 
    1235             opts['res'] = res[1] 
    1236             data1, _ = make_data(opts) 
    1237         else: 
    1238             data1 = data0 
    1239         opts['res'] = res 
    1240         data = data0, data1 
    1241  
    1242     base = make_engine(model_info[0], data[0], opts['engine'][0], 
     1218    base = make_engine(model_info[0], data, opts['engine'][0], 
    12431219                       opts['cutoff'][0], opts['ngauss'][0]) 
    12441220    if comparison: 
    1245         comp = make_engine(model_info[1], data[1], opts['engine'][1], 
     1221        comp = make_engine(model_info[1], data, opts['engine'][1], 
    12461222                           opts['cutoff'][1], opts['ngauss'][1]) 
    12471223    else: 
  • sasmodels/data.py

    r65fbf7c rd86f0fc  
    3636 
    3737import numpy as np  # type: ignore 
    38 from numpy import sqrt, sin, cos, pi 
    3938 
    4039# pylint: disable=unused-import 
     
    302301 
    303302 
    304 def empty_data1D(q, resolution=0.0, L=0., dL=0.): 
     303def empty_data1D(q, resolution=0.0): 
    305304    # type: (np.ndarray, float) -> Data1D 
    306     r""" 
     305    """ 
    307306    Create empty 1D data using the given *q* as the x value. 
    308307 
    309     rms *resolution* $\Delta q/q$ defaults to 0%.  If wavelength *L* and rms 
    310     wavelength divergence *dL* are defined, then *resolution* defines 
    311     rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from 
    312     $q = 4\pi/\lambda \sin(\theta)$. 
     308    *resolution* dq/q defaults to 5%. 
    313309    """ 
    314310 
     
    317313    Iq, dIq = None, None 
    318314    q = np.asarray(q) 
    319     if L != 0 and resolution != 0: 
    320         theta = np.arcsin(q*L/(4*pi)) 
    321         dtheta = theta[0]*resolution 
    322         ## Solving Gaussian error propagation from 
    323         ##   Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 
    324         ## gives 
    325         ##   (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 
    326         ## Take the square root and multiply by q, giving 
    327         ##   Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 
    328         dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 
    329     else: 
    330         dq = resolution * q 
    331  
    332     data = Data1D(q, Iq, dx=dq, dy=dIq) 
     315    data = Data1D(q, Iq, dx=resolution * q, dy=dIq) 
    333316    data.filename = "fake data" 
    334317    return data 
  • sasmodels/jitter.py

    r1198f90 rb3703f5  
    774774        # set small jitter as 0 if multiple pd dims 
    775775        dims = sum(v > 0 for v in jitter) 
    776         limit = [0, 0.5, 5][dims] 
     776        limit = [0, 0, 0.5, 5][dims] 
    777777        jitter = [0 if v < limit else v for v in jitter] 
    778778        axes.cla() 
  • sasmodels/kernel_iq.c

    r7c35fda rdc6f601  
    8383  in_spin = clip(in_spin, 0.0, 1.0); 
    8484  out_spin = clip(out_spin, 0.0, 1.0); 
    85   // Previous version of this function took the square root of the weights, 
    86   // under the assumption that  
    87   // 
     85  // Note: sasview 3.1 scaled all slds by sqrt(weight) and assumed that 
    8886  //     w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) 
    89   // 
    90   // However, since the weights are applied to the final intensity and 
    91   // are not interned inside the I(q) function, we want the full 
    92   // weight and not the square root.  Any function using 
    93   // set_spin_weights as part of calculating an amplitude will need to 
    94   // manually take that square root, but there is currently no such 
    95   // function. 
    96   weight[0] = (1.0-in_spin) * (1.0-out_spin); // dd 
    97   weight[1] = (1.0-in_spin) * out_spin;       // du 
    98   weight[2] = in_spin * (1.0-out_spin);       // ud 
    99   weight[3] = in_spin * out_spin;             // uu 
     87  // which is likely to be the case for simple models. 
     88  weight[0] = sqrt((1.0-in_spin) * (1.0-out_spin)); // dd 
     89  weight[1] = sqrt((1.0-in_spin) * out_spin);       // du.real 
     90  weight[2] = sqrt(in_spin * (1.0-out_spin));       // ud.real 
     91  weight[3] = sqrt(in_spin * out_spin);             // uu 
    10092  weight[4] = weight[1]; // du.imag 
    10193  weight[5] = weight[2]; // ud.imag 
  • sasmodels/models/mass_surface_fractal.py

    r7994359 r2d81cfe  
    3939    The surface ( $D_s$ ) and mass ( $D_m$ ) fractal dimensions are only 
    4040    valid if $0 < surface\_dim < 6$ , $0 < mass\_dim < 6$ , and 
    41     $(surface\_dim + mass\_dim ) < 6$ .  
    42     Older versions of sasview may have the default primary particle radius 
    43     larger than the cluster radius, this was an error, also present in the  
    44     Schmidt review paper below. The primary particle should be the smaller  
    45     as described in the original Hurd et.al. who also point out that  
    46     polydispersity in the primary particle sizes may affect their  
    47     apparent surface fractal dimension. 
    48      
     41    $(surface\_dim + mass\_dim ) < 6$ . 
     42 
    4943 
    5044References 
    5145---------- 
    5246 
    53 .. [#] P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 
    54 .. [#] A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 
    55    35 (1987) 2361-2364 Equation(2) 
     47P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 
    5648 
    57 Authorship and Verification 
    58 ---------------------------- 
    59  
    60 * **Converted to sasmodels by:** Piotr Rozyczko **Date:** Jan 20, 2016 
    61 * **Last Reviewed by:** Richard Heenan **Date:** May 30, 2018 
     49A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 
     5035 (1987) 2361-2364 Equation(2) 
    6251""" 
    6352 
     
    7867        rg_primary    =  rg 
    7968        background   =  background 
     69        Ref: Schmidt, J Appl Cryst, eq(19), (1991), 24, 414-435 
    8070        Hurd, Schaefer, Martin, Phys Rev A, eq(2),(1987),35, 2361-2364 
    8171        Note that 0 < Ds< 6 and 0 < Dm < 6. 
     
    8878    ["fractal_dim_mass", "",      1.8, [0.0, 6.0], "", "Mass fractal dimension"], 
    8979    ["fractal_dim_surf", "",      2.3, [0.0, 6.0], "", "Surface fractal dimension"], 
    90     ["rg_cluster",       "Ang", 4000., [0.0, inf], "", "Cluster radius of gyration"], 
    91     ["rg_primary",       "Ang",  86.7, [0.0, inf], "", "Primary particle radius of gyration"], 
     80    ["rg_cluster",       "Ang",  86.7, [0.0, inf], "", "Cluster radius of gyration"], 
     81    ["rg_primary",       "Ang", 4000., [0.0, inf], "", "Primary particle radius of gyration"], 
    9282] 
    9383# pylint: enable=bad-whitespace, line-too-long 
     
    117107            fractal_dim_mass=1.8, 
    118108            fractal_dim_surf=2.3, 
    119             rg_cluster=4000.0, 
    120             rg_primary=86.7) 
     109            rg_cluster=86.7, 
     110            rg_primary=4000.0) 
    121111 
    122112tests = [ 
    123113 
    124     # Accuracy tests based on content in test/utest_other_models.py  All except first, changed so rg_cluster is the larger, RKH 30 May 2018 
    125     [{'fractal_dim_mass':   1.8, 
     114    # Accuracy tests based on content in test/utest_other_models.py 
     115    [{'fractal_dim_mass':      1.8, 
    126116      'fractal_dim_surf':   2.3, 
    127117      'rg_cluster':   86.7, 
     
    133123    [{'fractal_dim_mass':      3.3, 
    134124      'fractal_dim_surf':   1.0, 
    135       'rg_cluster': 4000.0, 
    136       'rg_primary':   90.0, 
    137      }, 0.001, 0.0932516614456], 
     125      'rg_cluster':   90.0, 
     126      'rg_primary': 4000.0, 
     127     }, 0.001, 0.18562699016], 
    138128 
    139129    [{'fractal_dim_mass':      1.3, 
    140       'fractal_dim_surf':   2.0, 
    141       'rg_cluster': 2000.0, 
    142       'rg_primary':   90.0, 
     130      'fractal_dim_surf':   1.0, 
     131      'rg_cluster':   90.0, 
     132      'rg_primary': 2000.0, 
    143133      'background':    0.8, 
    144      }, 0.001, 1.28296431786], 
     134     }, 0.001, 1.16539753641], 
    145135 
    146136    [{'fractal_dim_mass':      2.3, 
    147       'fractal_dim_surf':   3.1, 
    148       'rg_cluster':  1000.0, 
    149       'rg_primary':  30.0, 
     137      'fractal_dim_surf':   1.0, 
     138      'rg_cluster':   90.0, 
     139      'rg_primary': 1000.0, 
    150140      'scale':        10.0, 
    151141      'background':    0.0, 
    152      }, 0.051, 0.00333804044899], 
     142     }, 0.051, 0.000169548800377], 
    153143    ] 
  • sasmodels/resolution.py

    r0b9c6df r2d81cfe  
    2020MINIMUM_RESOLUTION = 1e-8 
    2121MINIMUM_ABSOLUTE_Q = 0.02  # relative to the minimum q in the data 
    22 PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC 
    2322 
    2423class Resolution(object): 
     
    6665    *q_calc* is the list of points to calculate, or None if this should 
    6766    be estimated from the *q* and *q_width*. 
    68  
    69     *nsigma* is the width of the resolution function.  Should be 2.5. 
    70     See :func:`pinhole_resolution` for details. 
    71     """ 
    72     def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 
     67    """ 
     68    def __init__(self, q, q_width, q_calc=None, nsigma=3): 
    7369        #*min_step* is the minimum point spacing to use when computing the 
    7470        #underlying model.  It should be on the order of 
     
    8682 
    8783        # Protect against models which are not defined for very low q.  Limit 
    88         # the smallest q value evaluated to 0.02*min.  Note that negative q 
    89         # values are trimmed even for broad resolution.  Although not possible 
    90         # from the geometry, they may appear since we are using a truncated 
    91         # gaussian to represent resolution rather than a skew distribution. 
     84        # the smallest q value evaluated (in absolute) to 0.02*min 
    9285        cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q) 
    93         self.q_calc = self.q_calc[self.q_calc >= cutoff] 
     86        self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff] 
    9487 
    9588        # Build weight matrix from calculated q values 
    9689        self.weight_matrix = pinhole_resolution( 
    97             self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION), 
    98             nsigma=nsigma) 
     90            self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION)) 
     91        self.q_calc = abs(self.q_calc) 
    9992 
    10093    def apply(self, theory): 
     
    108101    *q* points at which the data is measured. 
    109102 
    110     *qx_width* slit width in qx 
    111  
    112     *qy_width* slit height in qy 
     103    *dqx* slit width in qx 
     104 
     105    *dqy* slit height in qy 
    113106 
    114107    *q_calc* is the list of points to calculate, or None if this should 
     
    161154 
    162155 
    163 def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA): 
    164     r""" 
     156def pinhole_resolution(q_calc, q, q_width): 
     157    """ 
    165158    Compute the convolution matrix *W* for pinhole resolution 1-D data. 
    166159 
     
    169162    *W*, the resolution smearing can be computed using *dot(W,q)*. 
    170163 
    171     Note that resolution is limited to $\pm 2.5 \sigma$.[1]  The true resolution 
    172     function is a broadened triangle, and does not extend over the entire 
    173     range $(-\infty, +\infty)$.  It is important to impose this limitation 
    174     since some models fall so steeply that the weighted value in gaussian 
    175     tails would otherwise dominate the integral. 
    176  
    177164    *q_calc* must be increasing.  *q_width* must be greater than zero. 
    178  
    179     [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects 
    180     in Radially Symmetric Small-Angle Neutron Scattering by Numerical and 
    181     Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14. 
    182     https://doi.org/10.1107/S0021889894010095. 
    183165    """ 
    184166    # The current algorithm is a midpoint rectangle rule.  In the test case, 
     
    188170    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 
    189171    weights = cdf[1:] - cdf[:-1] 
    190     # Limit q range to +/- 2.5 sigma 
    191     qhigh = q + nsigma*q_width 
    192     #qlow = q - nsigma*q_width  # linear limits 
    193     qlow = q*q/qhigh  # log limits 
    194     weights[q_calc[:, None] < qlow[None, :]] = 0. 
    195     weights[q_calc[:, None] > qhigh[None, :]] = 0. 
    196172    weights /= np.sum(weights, axis=0)[None, :] 
    197173    return weights 
     
    518494 
    519495 
    520 def gaussian(q, q0, dq, nsigma=2.5): 
    521     """ 
    522     Return the truncated Gaussian resolution function. 
     496def gaussian(q, q0, dq): 
     497    """ 
     498    Return the Gaussian resolution function. 
    523499 
    524500    *q0* is the center, *dq* is the width and *q* are the points to evaluate. 
    525501    """ 
    526     # Calculate the density of the tails; the resulting gaussian needs to be 
    527     # scaled by this amount in ordere to integrate to 1.0 
    528     two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2 
    529     return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density) 
     502    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 
    530503 
    531504 
     
    585558 
    586559 
    587 def romberg_pinhole_1d(q, q_width, form, pars, nsigma=2.5): 
     560def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5): 
    588561    """ 
    589562    Romberg integration for pinhole resolution. 
     
    705678        np.testing.assert_equal(output, self.y) 
    706679 
    707     # TODO: turn pinhole/slit demos into tests 
    708  
    709     @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    710680    def test_pinhole(self): 
    711681        """ 
     
    716686        theory = 12.0-1000.0*resolution.q_calc 
    717687        output = resolution.apply(theory) 
    718         # Note: answer came from output of previous run.  Non-integer 
    719         # values at ends come from the fact that q_calc does not 
    720         # extend beyond q, and so the weights don't balance. 
    721688        answer = [ 
    722             10.47037734, 9.86925860, 
    723             9., 8., 7., 6., 5., 4., 
    724             3.13074140, 2.52962266, 
     689            10.44785079, 9.84991299, 8.98101708, 
     690            7.99906585, 6.99998311, 6.00001689, 
     691            5.00093415, 4.01898292, 3.15008701, 2.55214921, 
    725692            ] 
    726693        np.testing.assert_allclose(output, answer, atol=1e-8) 
     
    765732        self._compare(q, output, answer, 1e-6) 
    766733 
    767     @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    768734    def test_pinhole(self): 
    769735        """ 
     
    780746        self._compare(q, output, answer, 3e-4) 
    781747 
    782     @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    783748    def test_pinhole_romberg(self): 
    784749        """ 
     
    796761        #                     2*np.pi/pars['radius']/200) 
    797762        #tol = 0.001 
    798         ## The default 2.5 sigma and no extra points gets 1% 
     763        ## The default 3 sigma and no extra points gets 1% 
    799764        q_calc = None  # type: np.ndarray 
    800765        tol = 0.01 
     
    11151080 
    11161081    if isinstance(resolution, Slit1D): 
    1117         width, height = resolution.qx_width, resolution.qy_width 
     1082        width, height = resolution.dqx, resolution.dqy 
    11181083        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 
    11191084    else: 
Note: See TracChangeset for help on using the changeset viewer.