Changes in / [3fc4097:f796469] in sasmodels


Ignore:
Location:
sasmodels
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • sasmodels/compare.py

    r65fbf7c r1fbadb2  
    645645 
    646646def make_data(opts): 
    647     # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] 
     647    # type: (Dict[str, Any]) -> Tuple[Data, np.ndarray] 
    648648    """ 
    649649    Generate an empty dataset, used with the model to set Q points 
     
    667667        if opts['zero']: 
    668668            q = np.hstack((0, q)) 
    669         # TODO: provide command line control of lambda and Delta lambda/lambda 
    670         #L, dLoL = 5, 0.14/np.sqrt(6)  # wavelength and 14% triangular FWHM 
    671         L, dLoL = 0, 0 
    672         data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) 
     669        data = empty_data1D(q, resolution=res) 
    673670        index = slice(None, None) 
    674671    return data, index 
     
    789786    base_n, comp_n = opts['count'] 
    790787    base_pars, comp_pars = opts['pars'] 
    791     base_data, comp_data = opts['data'] 
     788    data = opts['data'] 
    792789 
    793790    comparison = comp is not None 
     
    803800            print("%s t=%.2f ms, intensity=%.0f" 
    804801                  % (base.engine, base_time, base_value.sum())) 
    805         _show_invalid(base_data, base_value) 
     802        _show_invalid(data, base_value) 
    806803    except ImportError: 
    807804        traceback.print_exc() 
     
    815812                print("%s t=%.2f ms, intensity=%.0f" 
    816813                      % (comp.engine, comp_time, comp_value.sum())) 
    817             _show_invalid(base_data, comp_value) 
     814            _show_invalid(data, comp_value) 
    818815        except ImportError: 
    819816            traceback.print_exc() 
     
    869866    have_base, have_comp = (base_value is not None), (comp_value is not None) 
    870867    base, comp = opts['engines'] 
    871     base_data, comp_data = opts['data'] 
     868    data = opts['data'] 
    872869    use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 
    873870 
    874871    # Plot if requested 
    875872    view = opts['view'] 
    876     #view = 'log' 
    877873    if limits is None: 
    878874        vmin, vmax = np.inf, -np.inf 
     
    888884        if have_comp: 
    889885            plt.subplot(131) 
    890         plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) 
     886        plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 
    891887        plt.title("%s t=%.2f ms"%(base.engine, base_time)) 
    892888        #cbar_title = "log I" 
     
    895891            plt.subplot(132) 
    896892        if not opts['is2d'] and have_base: 
    897             plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) 
    898         plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) 
     893            plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 
     894        plot_theory(data, comp_value, view=view, use_data=use_data, limits=limits) 
    899895        plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 
    900896        #cbar_title = "log I" 
     
    912908            err[err > cutoff] = cutoff 
    913909        #err,errstr = base/comp,"ratio" 
    914         # Note: base_data only since base and comp have same q values (though 
    915         # perhaps different resolution), and we are plotting the difference 
    916         # at each q 
    917         plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 
     910        plot_theory(data, None, resid=err, view=errview, use_data=use_data) 
    918911        plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 
    919912        plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') 
     
    10821075        'qmax'      : 0.05, 
    10831076        'nq'        : 128, 
    1084         'res'       : '0.0', 
     1077        'res'       : 0.0, 
    10851078        'noise'     : 0.0, 
    10861079        'accuracy'  : 'Low', 
     
    11221115        elif arg.startswith('-q='): 
    11231116            opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 
    1124         elif arg.startswith('-res='):      opts['res'] = arg[5:] 
     1117        elif arg.startswith('-res='):      opts['res'] = float(arg[5:]) 
    11251118        elif arg.startswith('-noise='):    opts['noise'] = float(arg[7:]) 
    11261119        elif arg.startswith('-sets='):     opts['sets'] = int(arg[6:]) 
     
    11801173    if opts['qmin'] is None: 
    11811174        opts['qmin'] = 0.001*opts['qmax'] 
     1175    if opts['datafile'] is not None: 
     1176        data = load_data(os.path.expanduser(opts['datafile'])) 
     1177    else: 
     1178        data, _ = make_data(opts) 
    11821179 
    11831180    comparison = any(PAR_SPLIT in v for v in values) 
     
    12191216        opts['cutoff'] = [float(opts['cutoff'])]*2 
    12201217 
    1221     if PAR_SPLIT in opts['res']: 
    1222         opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 
    1223         comparison = True 
    1224     else: 
    1225         opts['res'] = [float(opts['res'])]*2 
    1226  
    1227     if opts['datafile'] is not None: 
    1228         data = load_data(os.path.expanduser(opts['datafile'])) 
    1229     else: 
    1230         # Hack around the fact that make_data doesn't take a pair of resolutions 
    1231         res = opts['res'] 
    1232         opts['res'] = res[0] 
    1233         data0, _ = make_data(opts) 
    1234         if res[0] != res[1]: 
    1235             opts['res'] = res[1] 
    1236             data1, _ = make_data(opts) 
    1237         else: 
    1238             data1 = data0 
    1239         opts['res'] = res 
    1240         data = data0, data1 
    1241  
    1242     base = make_engine(model_info[0], data[0], opts['engine'][0], 
     1218    base = make_engine(model_info[0], data, opts['engine'][0], 
    12431219                       opts['cutoff'][0], opts['ngauss'][0]) 
    12441220    if comparison: 
    1245         comp = make_engine(model_info[1], data[1], opts['engine'][1], 
     1221        comp = make_engine(model_info[1], data, opts['engine'][1], 
    12461222                           opts['cutoff'][1], opts['ngauss'][1]) 
    12471223    else: 
  • sasmodels/data.py

    r65fbf7c rd86f0fc  
    3636 
    3737import numpy as np  # type: ignore 
    38 from numpy import sqrt, sin, cos, pi 
    3938 
    4039# pylint: disable=unused-import 
     
    302301 
    303302 
    304 def empty_data1D(q, resolution=0.0, L=0., dL=0.): 
     303def empty_data1D(q, resolution=0.0): 
    305304    # type: (np.ndarray, float) -> Data1D 
    306     r""" 
     305    """ 
    307306    Create empty 1D data using the given *q* as the x value. 
    308307 
    309     rms *resolution* $\Delta q/q$ defaults to 0%.  If wavelength *L* and rms 
    310     wavelength divergence *dL* are defined, then *resolution* defines 
    311     rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from 
    312     $q = 4\pi/\lambda \sin(\theta)$. 
     308    *resolution* dq/q defaults to 5%. 
    313309    """ 
    314310 
     
    317313    Iq, dIq = None, None 
    318314    q = np.asarray(q) 
    319     if L != 0 and resolution != 0: 
    320         theta = np.arcsin(q*L/(4*pi)) 
    321         dtheta = theta[0]*resolution 
    322         ## Solving Gaussian error propagation from 
    323         ##   Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 
    324         ## gives 
    325         ##   (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 
    326         ## Take the square root and multiply by q, giving 
    327         ##   Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 
    328         dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 
    329     else: 
    330         dq = resolution * q 
    331  
    332     data = Data1D(q, Iq, dx=dq, dy=dIq) 
     315    data = Data1D(q, Iq, dx=resolution * q, dy=dIq) 
    333316    data.filename = "fake data" 
    334317    return data 
  • sasmodels/jitter.py

    r1198f90 rb3703f5  
    774774        # set small jitter as 0 if multiple pd dims 
    775775        dims = sum(v > 0 for v in jitter) 
    776         limit = [0, 0.5, 5][dims] 
     776        limit = [0, 0, 0.5, 5][dims] 
    777777        jitter = [0 if v < limit else v for v in jitter] 
    778778        axes.cla() 
  • sasmodels/resolution.py

    rbbb887d r2d81cfe  
    2020MINIMUM_RESOLUTION = 1e-8 
    2121MINIMUM_ABSOLUTE_Q = 0.02  # relative to the minimum q in the data 
    22 PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC 
    2322 
    2423class Resolution(object): 
     
    6665    *q_calc* is the list of points to calculate, or None if this should 
    6766    be estimated from the *q* and *q_width*. 
    68  
    69     *nsigma* is the width of the resolution function.  Should be 2.5. 
    70     See :func:`pinhole_resolution` for details. 
    71     """ 
    72     def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 
     67    """ 
     68    def __init__(self, q, q_width, q_calc=None, nsigma=3): 
    7369        #*min_step* is the minimum point spacing to use when computing the 
    7470        #underlying model.  It should be on the order of 
     
    8682 
    8783        # Protect against models which are not defined for very low q.  Limit 
    88         # the smallest q value evaluated to 0.02*min.  Note that negative q 
    89         # values are trimmed even for broad resolution.  Although not possible 
    90         # from the geometry, they may appear since we are using a truncated 
    91         # gaussian to represent resolution rather than a skew distribution. 
     84        # the smallest q value evaluated (in absolute) to 0.02*min 
    9285        cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q) 
    93         self.q_calc = self.q_calc[self.q_calc >= cutoff] 
     86        self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff] 
    9487 
    9588        # Build weight matrix from calculated q values 
    9689        self.weight_matrix = pinhole_resolution( 
    97             self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION), 
    98             nsigma=nsigma) 
     90            self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION)) 
     91        self.q_calc = abs(self.q_calc) 
    9992 
    10093    def apply(self, theory): 
     
    108101    *q* points at which the data is measured. 
    109102 
    110     *qx_width* slit width in qx 
    111  
    112     *qy_width* slit height in qy 
     103    *dqx* slit width in qx 
     104 
     105    *dqy* slit height in qy 
    113106 
    114107    *q_calc* is the list of points to calculate, or None if this should 
     
    161154 
    162155 
    163 def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA): 
    164     r""" 
     156def pinhole_resolution(q_calc, q, q_width): 
     157    """ 
    165158    Compute the convolution matrix *W* for pinhole resolution 1-D data. 
    166159 
     
    169162    *W*, the resolution smearing can be computed using *dot(W,q)*. 
    170163 
    171     Note that resolution is limited to $\pm 2.5 \sigma$.[1]  The true resolution 
    172     function is a broadened triangle, and does not extend over the entire 
    173     range $(-\infty, +\infty)$.  It is important to impose this limitation 
    174     since some models fall so steeply that the weighted value in gaussian 
    175     tails would otherwise dominate the integral. 
    176  
    177164    *q_calc* must be increasing.  *q_width* must be greater than zero. 
    178  
    179     [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects 
    180     in Radially Symmetric Small-Angle Neutron Scattering by Numerical and 
    181     Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14. 
    182     https://doi.org/10.1107/S0021889894010095. 
    183165    """ 
    184166    # The current algorithm is a midpoint rectangle rule.  In the test case, 
     
    188170    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 
    189171    weights = cdf[1:] - cdf[:-1] 
    190     # Limit q range to +/- 2.5 sigma 
    191     qhigh = q + nsigma*q_width 
    192     #qlow = q - nsigma*q_width  # linear limits 
    193     qlow = q*q/qhigh  # log limits 
    194     weights[q_calc[:, None] < qlow[None, :]] = 0. 
    195     weights[q_calc[:, None] > qhigh[None, :]] = 0. 
    196172    weights /= np.sum(weights, axis=0)[None, :] 
    197173    return weights 
     
    518494 
    519495 
    520 def gaussian(q, q0, dq, nsigma=2.5): 
    521     """ 
    522     Return the truncated Gaussian resolution function. 
     496def gaussian(q, q0, dq): 
     497    """ 
     498    Return the Gaussian resolution function. 
    523499 
    524500    *q0* is the center, *dq* is the width and *q* are the points to evaluate. 
    525501    """ 
    526     # Calculate the density of the tails; the resulting gaussian needs to be 
    527     # scaled by this amount in ordere to integrate to 1.0 
    528     two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2 
    529     return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density) 
     502    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 
    530503 
    531504 
     
    585558 
    586559 
    587 def romberg_pinhole_1d(q, q_width, form, pars, nsigma=2.5): 
     560def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5): 
    588561    """ 
    589562    Romberg integration for pinhole resolution. 
     
    705678        np.testing.assert_equal(output, self.y) 
    706679 
    707     # TODO: turn pinhole/slit demos into tests 
    708  
    709680    def test_pinhole(self): 
    710681        """ 
     
    715686        theory = 12.0-1000.0*resolution.q_calc 
    716687        output = resolution.apply(theory) 
    717         # Note: answer came from output of previous run.  Non-integer 
    718         # values at ends come from the fact that q_calc does not 
    719         # extend beyond q, and so the weights don't balance. 
    720688        answer = [ 
    721             10.47037734, 9.86925860, 
    722             9., 8., 7., 6., 5., 4., 
    723             3.13074140, 2.52962266, 
     689            10.44785079, 9.84991299, 8.98101708, 
     690            7.99906585, 6.99998311, 6.00001689, 
     691            5.00093415, 4.01898292, 3.15008701, 2.55214921, 
    724692            ] 
    725693        np.testing.assert_allclose(output, answer, atol=1e-8) 
     
    764732        self._compare(q, output, answer, 1e-6) 
    765733 
    766     @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    767734    def test_pinhole(self): 
    768735        """ 
     
    779746        self._compare(q, output, answer, 3e-4) 
    780747 
    781     @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    782748    def test_pinhole_romberg(self): 
    783749        """ 
     
    795761        #                     2*np.pi/pars['radius']/200) 
    796762        #tol = 0.001 
    797         ## The default 2.5 sigma and no extra points gets 1% 
     763        ## The default 3 sigma and no extra points gets 1% 
    798764        q_calc = None  # type: np.ndarray 
    799765        tol = 0.01 
     
    11141080 
    11151081    if isinstance(resolution, Slit1D): 
    1116         width, height = resolution.qx_width, resolution.qy_width 
     1082        width, height = resolution.dqx, resolution.dqy 
    11171083        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 
    11181084    else: 
Note: See TracChangeset for help on using the changeset viewer.