Changeset 3fc4097 in sasmodels


Ignore:
Timestamp:
Jun 10, 2018 8:41:02 PM (2 months ago)
Author:
GitHub <noreply@…>
Branches:
master, ESS_GUI, beta_approx, beta_approx_lazy_results, beta_approx_new_R_eff, doc_update, ticket-1104-resolution, ticket-1112, ticket-1142-plugin-reload, ticket-1148-Sq-scale-background, ticket-608-user-defined-weights
Children:
0b9c6df
Parents:
f796469 (diff), bbb887d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
Paul Butler <butlerpd@…> (06/10/18 20:41:02)
git-committer:
GitHub <noreply@…> (06/10/18 20:41:02)
Message:

Merge pull request #66 from SasView?/ticket-1102-pinhole

limit pinhole resolution integral to ± 2.5 dq

Files:
8 edited

Legend:

Unmodified
Added
Removed
  • sasmodels/compare.py

    r1fbadb2 r65fbf7c  
    645645 
    646646def make_data(opts): 
    647     # type: (Dict[str, Any]) -> Tuple[Data, np.ndarray] 
     647    # type: (Dict[str, Any], float) -> Tuple[Data, np.ndarray] 
    648648    """ 
    649649    Generate an empty dataset, used with the model to set Q points 
     
    667667        if opts['zero']: 
    668668            q = np.hstack((0, q)) 
    669         data = empty_data1D(q, resolution=res) 
     669        # TODO: provide command line control of lambda and Delta lambda/lambda 
     670        #L, dLoL = 5, 0.14/np.sqrt(6)  # wavelength and 14% triangular FWHM 
     671        L, dLoL = 0, 0 
     672        data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL) 
    670673        index = slice(None, None) 
    671674    return data, index 
     
    786789    base_n, comp_n = opts['count'] 
    787790    base_pars, comp_pars = opts['pars'] 
    788     data = opts['data'] 
     791    base_data, comp_data = opts['data'] 
    789792 
    790793    comparison = comp is not None 
     
    800803            print("%s t=%.2f ms, intensity=%.0f" 
    801804                  % (base.engine, base_time, base_value.sum())) 
    802         _show_invalid(data, base_value) 
     805        _show_invalid(base_data, base_value) 
    803806    except ImportError: 
    804807        traceback.print_exc() 
     
    812815                print("%s t=%.2f ms, intensity=%.0f" 
    813816                      % (comp.engine, comp_time, comp_value.sum())) 
    814             _show_invalid(data, comp_value) 
     817            _show_invalid(base_data, comp_value) 
    815818        except ImportError: 
    816819            traceback.print_exc() 
     
    866869    have_base, have_comp = (base_value is not None), (comp_value is not None) 
    867870    base, comp = opts['engines'] 
    868     data = opts['data'] 
     871    base_data, comp_data = opts['data'] 
    869872    use_data = (opts['datafile'] is not None) and (have_base ^ have_comp) 
    870873 
    871874    # Plot if requested 
    872875    view = opts['view'] 
     876    #view = 'log' 
    873877    if limits is None: 
    874878        vmin, vmax = np.inf, -np.inf 
     
    884888        if have_comp: 
    885889            plt.subplot(131) 
    886         plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 
     890        plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) 
    887891        plt.title("%s t=%.2f ms"%(base.engine, base_time)) 
    888892        #cbar_title = "log I" 
     
    891895            plt.subplot(132) 
    892896        if not opts['is2d'] and have_base: 
    893             plot_theory(data, base_value, view=view, use_data=use_data, limits=limits) 
    894         plot_theory(data, comp_value, view=view, use_data=use_data, limits=limits) 
     897            plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) 
     898        plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) 
    895899        plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) 
    896900        #cbar_title = "log I" 
     
    908912            err[err > cutoff] = cutoff 
    909913        #err,errstr = base/comp,"ratio" 
    910         plot_theory(data, None, resid=err, view=errview, use_data=use_data) 
     914        # Note: base_data only since base and comp have same q values (though 
     915        # perhaps different resolution), and we are plotting the difference 
     916        # at each q 
     917        plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) 
    911918        plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') 
    912919        plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best') 
     
    10751082        'qmax'      : 0.05, 
    10761083        'nq'        : 128, 
    1077         'res'       : 0.0, 
     1084        'res'       : '0.0', 
    10781085        'noise'     : 0.0, 
    10791086        'accuracy'  : 'Low', 
     
    11151122        elif arg.startswith('-q='): 
    11161123            opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')] 
    1117         elif arg.startswith('-res='):      opts['res'] = float(arg[5:]) 
     1124        elif arg.startswith('-res='):      opts['res'] = arg[5:] 
    11181125        elif arg.startswith('-noise='):    opts['noise'] = float(arg[7:]) 
    11191126        elif arg.startswith('-sets='):     opts['sets'] = int(arg[6:]) 
     
    11731180    if opts['qmin'] is None: 
    11741181        opts['qmin'] = 0.001*opts['qmax'] 
    1175     if opts['datafile'] is not None: 
    1176         data = load_data(os.path.expanduser(opts['datafile'])) 
    1177     else: 
    1178         data, _ = make_data(opts) 
    11791182 
    11801183    comparison = any(PAR_SPLIT in v for v in values) 
     
    12161219        opts['cutoff'] = [float(opts['cutoff'])]*2 
    12171220 
    1218     base = make_engine(model_info[0], data, opts['engine'][0], 
     1221    if PAR_SPLIT in opts['res']: 
     1222        opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)] 
     1223        comparison = True 
     1224    else: 
     1225        opts['res'] = [float(opts['res'])]*2 
     1226 
     1227    if opts['datafile'] is not None: 
     1228        data = load_data(os.path.expanduser(opts['datafile'])) 
     1229    else: 
     1230        # Hack around the fact that make_data doesn't take a pair of resolutions 
     1231        res = opts['res'] 
     1232        opts['res'] = res[0] 
     1233        data0, _ = make_data(opts) 
     1234        if res[0] != res[1]: 
     1235            opts['res'] = res[1] 
     1236            data1, _ = make_data(opts) 
     1237        else: 
     1238            data1 = data0 
     1239        opts['res'] = res 
     1240        data = data0, data1 
     1241 
     1242    base = make_engine(model_info[0], data[0], opts['engine'][0], 
    12191243                       opts['cutoff'][0], opts['ngauss'][0]) 
    12201244    if comparison: 
    1221         comp = make_engine(model_info[1], data, opts['engine'][1], 
     1245        comp = make_engine(model_info[1], data[1], opts['engine'][1], 
    12221246                           opts['cutoff'][1], opts['ngauss'][1]) 
    12231247    else: 
  • sasmodels/data.py

    rd86f0fc r65fbf7c  
    3636 
    3737import numpy as np  # type: ignore 
     38from numpy import sqrt, sin, cos, pi 
    3839 
    3940# pylint: disable=unused-import 
     
    301302 
    302303 
    303 def empty_data1D(q, resolution=0.0): 
     304def empty_data1D(q, resolution=0.0, L=0., dL=0.): 
    304305    # type: (np.ndarray, float) -> Data1D 
    305     """ 
     306    r""" 
    306307    Create empty 1D data using the given *q* as the x value. 
    307308 
    308     *resolution* dq/q defaults to 5%. 
     309    rms *resolution* $\Delta q/q$ defaults to 0%.  If wavelength *L* and rms 
     310    wavelength divergence *dL* are defined, then *resolution* defines 
     311    rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from 
     312    $q = 4\pi/\lambda \sin(\theta)$. 
    309313    """ 
    310314 
     
    313317    Iq, dIq = None, None 
    314318    q = np.asarray(q) 
    315     data = Data1D(q, Iq, dx=resolution * q, dy=dIq) 
     319    if L != 0 and resolution != 0: 
     320        theta = np.arcsin(q*L/(4*pi)) 
     321        dtheta = theta[0]*resolution 
     322        ## Solving Gaussian error propagation from 
     323        ##   Dq^2 = (dq/dL)^2 DL^2 + (dq/dtheta)^2 Dtheta^2 
     324        ## gives 
     325        ##   (Dq/q)^2 = (DL/L)**2 + (Dtheta/tan(theta))**2 
     326        ## Take the square root and multiply by q, giving 
     327        ##   Dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 
     328        dq = (4*pi/L) * sqrt((sin(theta)*dL/L)**2 + (cos(theta)*dtheta)**2) 
     329    else: 
     330        dq = resolution * q 
     331 
     332    data = Data1D(q, Iq, dx=dq, dy=dIq) 
    316333    data.filename = "fake data" 
    317334    return data 
  • sasmodels/jitter.py

    rb3703f5 r1198f90  
    774774        # set small jitter as 0 if multiple pd dims 
    775775        dims = sum(v > 0 for v in jitter) 
    776         limit = [0, 0, 0.5, 5][dims] 
     776        limit = [0, 0.5, 5][dims] 
    777777        jitter = [0 if v < limit else v for v in jitter] 
    778778        axes.cla() 
  • sasmodels/resolution.py

    r2d81cfe rbbb887d  
    2020MINIMUM_RESOLUTION = 1e-8 
    2121MINIMUM_ABSOLUTE_Q = 0.02  # relative to the minimum q in the data 
     22PINHOLE_N_SIGMA = 2.5 # From: Barker & Pedersen 1995 JAC 
    2223 
    2324class Resolution(object): 
     
    6566    *q_calc* is the list of points to calculate, or None if this should 
    6667    be estimated from the *q* and *q_width*. 
    67     """ 
    68     def __init__(self, q, q_width, q_calc=None, nsigma=3): 
     68 
     69    *nsigma* is the width of the resolution function.  Should be 2.5. 
     70    See :func:`pinhole_resolution` for details. 
     71    """ 
     72    def __init__(self, q, q_width, q_calc=None, nsigma=PINHOLE_N_SIGMA): 
    6973        #*min_step* is the minimum point spacing to use when computing the 
    7074        #underlying model.  It should be on the order of 
     
    8286 
    8387        # Protect against models which are not defined for very low q.  Limit 
    84         # the smallest q value evaluated (in absolute) to 0.02*min 
     88        # the smallest q value evaluated to 0.02*min.  Note that negative q 
     89        # values are trimmed even for broad resolution.  Although not possible 
     90        # from the geometry, they may appear since we are using a truncated 
     91        # gaussian to represent resolution rather than a skew distribution. 
    8592        cutoff = MINIMUM_ABSOLUTE_Q*np.min(self.q) 
    86         self.q_calc = self.q_calc[abs(self.q_calc) >= cutoff] 
     93        self.q_calc = self.q_calc[self.q_calc >= cutoff] 
    8794 
    8895        # Build weight matrix from calculated q values 
    8996        self.weight_matrix = pinhole_resolution( 
    90             self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION)) 
    91         self.q_calc = abs(self.q_calc) 
     97            self.q_calc, self.q, np.maximum(q_width, MINIMUM_RESOLUTION), 
     98            nsigma=nsigma) 
    9299 
    93100    def apply(self, theory): 
     
    101108    *q* points at which the data is measured. 
    102109 
    103     *dqx* slit width in qx 
    104  
    105     *dqy* slit height in qy 
     110    *qx_width* slit width in qx 
     111 
     112    *qy_width* slit height in qy 
    106113 
    107114    *q_calc* is the list of points to calculate, or None if this should 
     
    154161 
    155162 
    156 def pinhole_resolution(q_calc, q, q_width): 
    157     """ 
     163def pinhole_resolution(q_calc, q, q_width, nsigma=PINHOLE_N_SIGMA): 
     164    r""" 
    158165    Compute the convolution matrix *W* for pinhole resolution 1-D data. 
    159166 
     
    162169    *W*, the resolution smearing can be computed using *dot(W,q)*. 
    163170 
     171    Note that resolution is limited to $\pm 2.5 \sigma$.[1]  The true resolution 
     172    function is a broadened triangle, and does not extend over the entire 
     173    range $(-\infty, +\infty)$.  It is important to impose this limitation 
     174    since some models fall so steeply that the weighted value in gaussian 
     175    tails would otherwise dominate the integral. 
     176 
    164177    *q_calc* must be increasing.  *q_width* must be greater than zero. 
     178 
     179    [1] Barker, J. G., and J. S. Pedersen. 1995. Instrumental Smearing Effects 
     180    in Radially Symmetric Small-Angle Neutron Scattering by Numerical and 
     181    Analytical Methods. Journal of Applied Crystallography 28 (2): 105--14. 
     182    https://doi.org/10.1107/S0021889894010095. 
    165183    """ 
    166184    # The current algorithm is a midpoint rectangle rule.  In the test case, 
     
    170188    cdf = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 
    171189    weights = cdf[1:] - cdf[:-1] 
     190    # Limit q range to +/- 2.5 sigma 
     191    qhigh = q + nsigma*q_width 
     192    #qlow = q - nsigma*q_width  # linear limits 
     193    qlow = q*q/qhigh  # log limits 
     194    weights[q_calc[:, None] < qlow[None, :]] = 0. 
     195    weights[q_calc[:, None] > qhigh[None, :]] = 0. 
    172196    weights /= np.sum(weights, axis=0)[None, :] 
    173197    return weights 
     
    494518 
    495519 
    496 def gaussian(q, q0, dq): 
    497     """ 
    498     Return the Gaussian resolution function. 
     520def gaussian(q, q0, dq, nsigma=2.5): 
     521    """ 
     522    Return the truncated Gaussian resolution function. 
    499523 
    500524    *q0* is the center, *dq* is the width and *q* are the points to evaluate. 
    501525    """ 
    502     return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq) 
     526    # Calculate the density of the tails; the resulting gaussian needs to be 
     527    # scaled by this amount in ordere to integrate to 1.0 
     528    two_tail_density = 2 * (1 + erf(-nsigma/sqrt(2)))/2 
     529    return exp(-0.5*((q-q0)/dq)**2)/(sqrt(2*pi)*dq)/(1-two_tail_density) 
    503530 
    504531 
     
    558585 
    559586 
    560 def romberg_pinhole_1d(q, q_width, form, pars, nsigma=5): 
     587def romberg_pinhole_1d(q, q_width, form, pars, nsigma=2.5): 
    561588    """ 
    562589    Romberg integration for pinhole resolution. 
     
    678705        np.testing.assert_equal(output, self.y) 
    679706 
     707    # TODO: turn pinhole/slit demos into tests 
     708 
    680709    def test_pinhole(self): 
    681710        """ 
     
    686715        theory = 12.0-1000.0*resolution.q_calc 
    687716        output = resolution.apply(theory) 
     717        # Note: answer came from output of previous run.  Non-integer 
     718        # values at ends come from the fact that q_calc does not 
     719        # extend beyond q, and so the weights don't balance. 
    688720        answer = [ 
    689             10.44785079, 9.84991299, 8.98101708, 
    690             7.99906585, 6.99998311, 6.00001689, 
    691             5.00093415, 4.01898292, 3.15008701, 2.55214921, 
     721            10.47037734, 9.86925860, 
     722            9., 8., 7., 6., 5., 4., 
     723            3.13074140, 2.52962266, 
    692724            ] 
    693725        np.testing.assert_allclose(output, answer, atol=1e-8) 
     
    732764        self._compare(q, output, answer, 1e-6) 
    733765 
     766    @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    734767    def test_pinhole(self): 
    735768        """ 
     
    746779        self._compare(q, output, answer, 3e-4) 
    747780 
     781    @unittest.skip("suppress comparison with old version; pinhole calc changed") 
    748782    def test_pinhole_romberg(self): 
    749783        """ 
     
    761795        #                     2*np.pi/pars['radius']/200) 
    762796        #tol = 0.001 
    763         ## The default 3 sigma and no extra points gets 1% 
     797        ## The default 2.5 sigma and no extra points gets 1% 
    764798        q_calc = None  # type: np.ndarray 
    765799        tol = 0.01 
     
    10801114 
    10811115    if isinstance(resolution, Slit1D): 
    1082         width, height = resolution.dqx, resolution.dqy 
     1116        width, height = resolution.qx_width, resolution.qy_width 
    10831117        Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 
    10841118    else: 
  • doc/guide/magnetism/magnetism.rst

    r4f5afc9 rbefe905  
    3939 
    4040.. math:: 
    41     -- &= ((1-u_i)(1-u_f))^{1/4} \\ 
    42     -+ &= ((1-u_i)(u_f))^{1/4} \\ 
    43     +- &= ((u_i)(1-u_f))^{1/4} \\ 
    44     ++ &= ((u_i)(u_f))^{1/4} 
     41    -- &= (1-u_i)(1-u_f) \\ 
     42    -+ &= (1-u_i)(u_f) \\ 
     43    +- &= (u_i)(1-u_f) \\ 
     44    ++ &= (u_i)(u_f) 
    4545 
    4646Ideally the experiment would measure the pure spin states independently and 
     
    104104| 2015-05-02 Steve King 
    105105| 2017-11-15 Paul Kienzle 
     106| 2018-06-02 Adam Washington 
  • doc/guide/plugin.rst

    r7e6bc45e rf796469  
    822822 
    823823        :code:`source = ["lib/Si.c", ...]` 
    824         (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/Si.c>`_) 
     824        (`Si.c <https://github.com/SasView/sasmodels/tree/master/sasmodels/models/lib/sas_Si.c>`_) 
    825825 
    826826    sas_3j1x_x(x): 
  • sasmodels/kernel_iq.c

    rdc6f601 r7c35fda  
    8383  in_spin = clip(in_spin, 0.0, 1.0); 
    8484  out_spin = clip(out_spin, 0.0, 1.0); 
    85   // Note: sasview 3.1 scaled all slds by sqrt(weight) and assumed that 
     85  // Previous version of this function took the square root of the weights, 
     86  // under the assumption that  
     87  // 
    8688  //     w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) 
    87   // which is likely to be the case for simple models. 
    88   weight[0] = sqrt((1.0-in_spin) * (1.0-out_spin)); // dd 
    89   weight[1] = sqrt((1.0-in_spin) * out_spin);       // du.real 
    90   weight[2] = sqrt(in_spin * (1.0-out_spin));       // ud.real 
    91   weight[3] = sqrt(in_spin * out_spin);             // uu 
     89  // 
     90  // However, since the weights are applied to the final intensity and 
     91  // are not interned inside the I(q) function, we want the full 
     92  // weight and not the square root.  Any function using 
     93  // set_spin_weights as part of calculating an amplitude will need to 
     94  // manually take that square root, but there is currently no such 
     95  // function. 
     96  weight[0] = (1.0-in_spin) * (1.0-out_spin); // dd 
     97  weight[1] = (1.0-in_spin) * out_spin;       // du 
     98  weight[2] = in_spin * (1.0-out_spin);       // ud 
     99  weight[3] = in_spin * out_spin;             // uu 
    92100  weight[4] = weight[1]; // du.imag 
    93101  weight[5] = weight[2]; // ud.imag 
  • sasmodels/models/mass_surface_fractal.py

    r2d81cfe r7994359  
    3939    The surface ( $D_s$ ) and mass ( $D_m$ ) fractal dimensions are only 
    4040    valid if $0 < surface\_dim < 6$ , $0 < mass\_dim < 6$ , and 
    41     $(surface\_dim + mass\_dim ) < 6$ . 
    42  
     41    $(surface\_dim + mass\_dim ) < 6$ .  
     42    Older versions of sasview may have the default primary particle radius 
     43    larger than the cluster radius, this was an error, also present in the  
     44    Schmidt review paper below. The primary particle should be the smaller  
     45    as described in the original Hurd et.al. who also point out that  
     46    polydispersity in the primary particle sizes may affect their  
     47    apparent surface fractal dimension. 
     48     
    4349 
    4450References 
    4551---------- 
    4652 
    47 P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 
     53.. [#] P Schmidt, *J Appl. Cryst.*, 24 (1991) 414-435 Equation(19) 
     54.. [#] A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 
     55   35 (1987) 2361-2364 Equation(2) 
    4856 
    49 A J Hurd, D W Schaefer, J E Martin, *Phys. Rev. A*, 
    50 35 (1987) 2361-2364 Equation(2) 
     57Authorship and Verification 
     58---------------------------- 
     59 
     60* **Converted to sasmodels by:** Piotr Rozyczko **Date:** Jan 20, 2016 
     61* **Last Reviewed by:** Richard Heenan **Date:** May 30, 2018 
    5162""" 
    5263 
     
    6778        rg_primary    =  rg 
    6879        background   =  background 
    69         Ref: Schmidt, J Appl Cryst, eq(19), (1991), 24, 414-435 
    7080        Hurd, Schaefer, Martin, Phys Rev A, eq(2),(1987),35, 2361-2364 
    7181        Note that 0 < Ds< 6 and 0 < Dm < 6. 
     
    7888    ["fractal_dim_mass", "",      1.8, [0.0, 6.0], "", "Mass fractal dimension"], 
    7989    ["fractal_dim_surf", "",      2.3, [0.0, 6.0], "", "Surface fractal dimension"], 
    80     ["rg_cluster",       "Ang",  86.7, [0.0, inf], "", "Cluster radius of gyration"], 
    81     ["rg_primary",       "Ang", 4000., [0.0, inf], "", "Primary particle radius of gyration"], 
     90    ["rg_cluster",       "Ang", 4000., [0.0, inf], "", "Cluster radius of gyration"], 
     91    ["rg_primary",       "Ang",  86.7, [0.0, inf], "", "Primary particle radius of gyration"], 
    8292] 
    8393# pylint: enable=bad-whitespace, line-too-long 
     
    107117            fractal_dim_mass=1.8, 
    108118            fractal_dim_surf=2.3, 
    109             rg_cluster=86.7, 
    110             rg_primary=4000.0) 
     119            rg_cluster=4000.0, 
     120            rg_primary=86.7) 
    111121 
    112122tests = [ 
    113123 
    114     # Accuracy tests based on content in test/utest_other_models.py 
    115     [{'fractal_dim_mass':      1.8, 
     124    # Accuracy tests based on content in test/utest_other_models.py  All except first, changed so rg_cluster is the larger, RKH 30 May 2018 
     125    [{'fractal_dim_mass':   1.8, 
    116126      'fractal_dim_surf':   2.3, 
    117127      'rg_cluster':   86.7, 
     
    123133    [{'fractal_dim_mass':      3.3, 
    124134      'fractal_dim_surf':   1.0, 
    125       'rg_cluster':   90.0, 
    126       'rg_primary': 4000.0, 
    127      }, 0.001, 0.18562699016], 
     135      'rg_cluster': 4000.0, 
     136      'rg_primary':   90.0, 
     137     }, 0.001, 0.0932516614456], 
    128138 
    129139    [{'fractal_dim_mass':      1.3, 
    130       'fractal_dim_surf':   1.0, 
    131       'rg_cluster':   90.0, 
    132       'rg_primary': 2000.0, 
     140      'fractal_dim_surf':   2.0, 
     141      'rg_cluster': 2000.0, 
     142      'rg_primary':   90.0, 
    133143      'background':    0.8, 
    134      }, 0.001, 1.16539753641], 
     144     }, 0.001, 1.28296431786], 
    135145 
    136146    [{'fractal_dim_mass':      2.3, 
    137       'fractal_dim_surf':   1.0, 
    138       'rg_cluster':   90.0, 
    139       'rg_primary': 1000.0, 
     147      'fractal_dim_surf':   3.1, 
     148      'rg_cluster':  1000.0, 
     149      'rg_primary':  30.0, 
    140150      'scale':        10.0, 
    141151      'background':    0.0, 
    142      }, 0.051, 0.000169548800377], 
     152     }, 0.051, 0.00333804044899], 
    143153    ] 
Note: See TracChangeset for help on using the changeset viewer.