Changeset bf6631a in sasmodels


Ignore:
Timestamp:
Jan 28, 2016 8:42:56 AM (8 years ago)
Author:
piotr
Branches:
master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
Children:
4f2478e, 168052c
Parents:
f94d8a2 (diff), 384d114 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'origin/master'

Files:
5 added
10 edited

Legend:

Unmodified
Added
Removed
  • extra/pylint.rc

    r5ef0633 r37a7252  
    66# Python code to execute, usually for sys.path manipulation such as 
    77# pygtk.require(). 
    8 #init-hook= 
     8#init-hook='import os, sys; sys.path.extend([os.getcwd(),os.path.join(os.getcwd(),"extra")])' 
    99 
    1010# Profiled execution. 
  • sasmodels/__init__.py

    r9890053 r37a7252  
     1""" 
     2sasmodels 
     3========= 
     4 
     5**sasmodels** is a package containing models for small angle neutron and xray 
     6scattering.  Models supported are the one dimensional circular average and 
     7two dimensional oriented patterns.  As well as the form factor calculations 
     8for the individual shapes **sasmodels** also provides automatic shape 
     9polydispersity, angular dispersion and resolution convolution.  SESANS 
     10patterns can be computed for any model. 
     11 
     12Models can be written in python or in C.  C models can run on the GPU if 
     13OpenCL drivers are available.  See :mod:`generate` for details on 
     14defining new models. 
     15""" 
     16 
    117__version__ = "0.9" 
  • sasmodels/bumps_model.py

    rec7e360 r37a7252  
    66arguments to set the initial value for each parameter. 
    77 
    8 :class:`Experiment` combines the *Model* function with a data file loaded by the 
    9 sasview data loader.  *Experiment* takes a *cutoff* parameter controlling 
     8:class:`Experiment` combines the *Model* function with a data file loaded by 
     9the sasview data loader.  *Experiment* takes a *cutoff* parameter controlling 
    1010how far the polydispersity integral extends. 
    1111 
    1212""" 
     13 
     14__all__ = [ 
     15    "Model", "Experiment", 
     16    ] 
    1317 
    1418import warnings 
     
    2024 
    2125# CRUFT: old style bumps wrapper which doesn't separate data and model 
     26# pylint: disable=invalid-name 
    2227def BumpsModel(data, model, cutoff=1e-5, **kw): 
     28    r""" 
     29    Bind a model to data, along with a polydispersity cutoff. 
     30 
     31    *data* is a :class:`data.Data1D`, :class:`data.Data2D` or 
     32    :class:`data.Sesans` object.  Use :func:`data.empty_data1D` or 
     33    :func:`data.empty_data2D` to define $q, \Delta q$ calculation 
     34    points for displaying the SANS curve when there is no measured data. 
     35 
     36    *model* is a runnable module as returned from :func:`core.load_model`. 
     37 
     38    *cutoff* is the polydispersity weight cutoff. 
     39 
     40    Any additional *key=value* pairs are model dependent parameters. 
     41 
     42    Returns an :class:`Experiment` object. 
     43 
     44    Note that the usual Bumps semantics is not fully supported, since 
     45    assigning *M.name = parameter* on the returned experiment object 
     46    does not set that parameter in the model.  Range setting will still 
     47    work as expected though. 
     48 
     49    .. deprecated:: 0.1 
     50        Use :class:`Experiment` instead. 
     51    """ 
    2352    warnings.warn("Use of BumpsModel is deprecated.  Use bumps_model.Experiment instead.") 
     53 
     54    # Create the model and experiment 
    2455    model = Model(model, **kw) 
    2556    experiment = Experiment(data=data, model=model, cutoff=cutoff) 
    26     for k in model._parameter_names: 
    27         setattr(experiment, k, getattr(model, k)) 
     57 
     58    # Copy the model parameters up to the experiment object. 
     59    for k, v in model.parameters().items(): 
     60        setattr(experiment, k, v) 
    2861    return experiment 
    2962 
     63 
    3064def create_parameters(model_info, **kwargs): 
     65    """ 
     66    Generate Bumps parameters from the model info. 
     67 
     68    *model_info* is returned from :func:`generate.model_info` on the 
     69    model definition module. 
     70 
     71    Any additional *key=value* pairs are initial values for the parameters 
     72    to the models.  Uninitialized parameters will use the model default 
     73    value. 
     74 
     75    Returns a dictionary of *{name: Parameter}* containing the bumps 
     76    parameters for each model parameter, and a dictionary of 
     77    *{name: str}* containing the polydispersity distribution types. 
     78    """ 
    3179    # lazy import; this allows the doc builder and nosetests to run even 
    3280    # when bumps is not on the path. 
    3381    from bumps.names import Parameter 
    34  
    35     partype = model_info['partype'] 
    3682 
    3783    pars = {} 
     
    4086        value = kwargs.pop(name, default) 
    4187        pars[name] = Parameter.default(value, name=name, limits=limits) 
    42     for name in partype['pd-2d']: 
     88    for name in model_info['partype']['pd-2d']: 
    4389        for xpart, xdefault, xlimits in [ 
    44             ('_pd', 0., limits), 
    45             ('_pd_n', 35., (0, 1000)), 
    46             ('_pd_nsigma', 3., (0, 10)), 
    47         ]: 
     90                ('_pd', 0., limits), 
     91                ('_pd_n', 35., (0, 1000)), 
     92                ('_pd_nsigma', 3., (0, 10)), 
     93            ]: 
    4894            xname = name + xpart 
    4995            xvalue = kwargs.pop(xname, xdefault) 
     
    5197 
    5298    pd_types = {} 
    53     for name in partype['pd-2d']: 
     99    for name in model_info['partype']['pd-2d']: 
    54100        xname = name + '_pd_type' 
    55101        xvalue = kwargs.pop(xname, 'gaussian') 
     
    63109 
    64110class Model(object): 
     111    """ 
     112    Bumps wrapper for a SAS model. 
     113 
     114    *model* is a runnable module as returned from :func:`core.load_model`. 
     115 
     116    *cutoff* is the polydispersity weight cutoff. 
     117 
     118    Any additional *key=value* pairs are model dependent parameters. 
     119    """ 
    65120    def __init__(self, model, **kwargs): 
    66121        self._sasmodel = model 
    67122        pars, pd_types = create_parameters(model.info, **kwargs) 
    68         for k,v in pars.items(): 
     123        for k, v in pars.items(): 
    69124            setattr(self, k, v) 
    70         for k,v in pd_types.items(): 
     125        for k, v in pd_types.items(): 
    71126            setattr(self, k, v) 
    72127        self._parameter_names = list(pars.keys()) 
     
    75130    def parameters(self): 
    76131        """ 
    77         Return a dictionary of parameters 
     132        Return a dictionary of parameters objects for the parameters, 
     133        excluding polydispersity distribution type. 
    78134        """ 
    79135        return dict((k, getattr(self, k)) for k in self._parameter_names) 
    80136 
    81137    def state(self): 
     138        """ 
     139        Return a dictionary of current values for all the parameters, 
     140        including polydispersity distribution type. 
     141        """ 
    82142        pars = dict((k, getattr(self, k).value) for k in self._parameter_names) 
    83143        pars.update((k, getattr(self, k)) for k in self._pd_type_names) 
     
    85145 
    86146class Experiment(DataMixin): 
    87     """ 
    88     Return a bumps wrapper for a SAS model. 
    89  
    90     *data* is the data to be fitted. 
    91  
    92     *model* is the SAS model from :func:`core.load_model`. 
     147    r""" 
     148    Bumps wrapper for a SAS experiment. 
     149 
     150    *data* is a :class:`data.Data1D`, :class:`data.Data2D` or 
     151    :class:`data.Sesans` object.  Use :func:`data.empty_data1D` or 
     152    :func:`data.empty_data2D` to define $q, \Delta q$ calculation 
     153    points for displaying the SANS curve when there is no measured data. 
     154 
     155    *model* is a :class:`Model` object. 
    93156 
    94157    *cutoff* is the integration cutoff, which avoids computing the 
    95158    the SAS model where the polydispersity weight is low. 
    96159 
    97     Model parameters can be initialized with additional keyword 
    98     arguments, or by assigning to model.parameter_name.value. 
    99  
    100     The resulting bumps model can be used directly in a FitProblem call. 
     160    The resulting model can be used directly in a Bumps FitProblem call. 
    101161    """ 
    102162    def __init__(self, data, model, cutoff=1e-5): 
     
    109169 
    110170    def update(self): 
     171        """ 
     172        Call when model parameters have changed and theory needs to be 
     173        recalculated. 
     174        """ 
    111175        self._cache = {} 
    112176 
    113177    def numpoints(self): 
    114178        """ 
    115             Return the number of points 
     179        Return the number of data points 
    116180        """ 
    117181        return len(self.Iq) 
     
    124188 
    125189    def theory(self): 
     190        """ 
     191        Return the theory corresponding to the model parameters. 
     192 
     193        This method uses lazy evaluation, and requires model.update() to be 
     194        called when the parameters have changed. 
     195        """ 
    126196        if 'theory' not in self._cache: 
    127197            pars = self.model.state() 
     
    130200 
    131201    def residuals(self): 
     202        """ 
     203        Return theory minus data normalized by uncertainty. 
     204        """ 
    132205        #if np.any(self.err ==0): print("zeros in err") 
    133206        return (self.theory() - self.Iq) / self.dIq 
    134207 
    135208    def nllf(self): 
     209        """ 
     210        Return the negative log likelihood of seeing data given the model 
     211        parameters, up to a normalizing constant which depends on the data 
     212        uncertainty. 
     213        """ 
    136214        delta = self.residuals() 
    137215        #if np.any(np.isnan(R)): print("NaN in residuals") 
     
    149227 
    150228    def simulate_data(self, noise=None): 
     229        """ 
     230        Generate simulated data. 
     231        """ 
    151232        Iq = self.theory() 
    152233        self._set_data(Iq, noise) 
    153234 
    154235    def save(self, basename): 
     236        """ 
     237        Save the model parameters and data into a file. 
     238 
     239        Not Implemented. 
     240        """ 
    155241        pass 
    156242 
  • sasmodels/compare.py

    r608e31e rd15a908  
    7272# doc string so that we can display it at run time if there is an error. 
    7373# lin 
    74 __doc__ = __doc__ + """ 
     74__doc__ = (__doc__  # pylint: disable=redefined-builtin 
     75           + """ 
    7576Program description 
    7677------------------- 
    7778 
    78 """ + USAGE 
     79""" 
     80           + USAGE) 
    7981 
    8082 
     
    281283            theory = lambda: smearer.get_value() 
    282284        else: 
    283             theory = lambda: model.evalDistribution([data.qx_data[index], data.qy_data[index]]) 
     285            theory = lambda: model.evalDistribution([data.qx_data[index], 
     286                                                     data.qy_data[index]]) 
    284287    elif smearer is not None: 
    285288        theory = lambda: smearer(model.evalDistribution(data.x)) 
     
    416419        try: 
    417420            base_value, base_time = time_calculation(base, pars, Nbase) 
    418             print("%s t=%.1f ms, intensity=%.0f"%(base.engine, base_time, sum(base_value))) 
     421            print("%s t=%.1f ms, intensity=%.0f" 
     422                  % (base.engine, base_time, sum(base_value))) 
    419423        except ImportError: 
    420424            traceback.print_exc() 
     
    426430        try: 
    427431            comp_value, comp_time = time_calculation(comp, pars, Ncomp) 
    428             print("%s t=%.1f ms, intensity=%.0f"%(comp.engine, comp_time, sum(comp_value))) 
     432            print("%s t=%.1f ms, intensity=%.0f" 
     433                  % (comp.engine, comp_time, sum(comp_value))) 
    429434        except ImportError: 
    430435            traceback.print_exc() 
     
    433438    # Compare, but only if computing both forms 
    434439    if Nbase > 0 and Ncomp > 0: 
    435         #print("speedup %.2g"%(comp_time/base_time)) 
    436         #print("max |base/comp|", max(abs(base_value/comp_value)), "%.15g"%max(abs(base_value)), "%.15g"%max(abs(comp_value))) 
    437         #comp *= max(base_value/comp_value) 
    438440        resid = (base_value - comp_value) 
    439441        relerr = resid/comp_value 
    440         _print_stats("|%s-%s|"%(base.engine, comp.engine) + (" "*(3+len(comp.engine))), 
     442        _print_stats("|%s-%s|" 
     443                     % (base.engine, comp.engine) + (" "*(3+len(comp.engine))), 
    441444                     resid) 
    442         _print_stats("|(%s-%s)/%s|"%(base.engine, comp.engine, comp.engine), 
     445        _print_stats("|(%s-%s)/%s|" 
     446                     % (base.engine, comp.engine, comp.engine), 
    443447                     relerr) 
    444448 
     
    591595    invalid = [o[1:] for o in flags 
    592596               if o[1:] not in NAME_OPTIONS 
    593                    and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)] 
     597               and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)] 
    594598    if invalid: 
    595599        print("Invalid options: %s"%(", ".join(invalid))) 
     
    597601 
    598602 
     603    # pylint: disable=bad-whitespace 
    599604    # Interpret the flags 
    600605    opts = { 
     
    651656        elif arg == '-sasview': engines.append(arg[1:]) 
    652657        elif arg == '-edit':    opts['explore'] = True 
     658    # pylint: enable=bad-whitespace 
    653659 
    654660    if len(engines) == 0: 
     
    675681    presets = {} 
    676682    for arg in values: 
    677         k,v = arg.split('=',1) 
     683        k, v = arg.split('=', 1) 
    678684        if k not in pars: 
    679685            # extract base name without polydispersity info 
    680686            s = set(p.split('_pd')[0] for p in pars) 
    681             print("%r invalid; parameters are: %s"%(k,", ".join(sorted(s)))) 
     687            print("%r invalid; parameters are: %s"%(k, ", ".join(sorted(s)))) 
    682688            sys.exit(1) 
    683689        presets[k] = float(v) if not k.endswith('type') else v 
     
    697703 
    698704    # Create the computational engines 
    699     data, _index = make_data(opts) 
     705    data, _ = make_data(opts) 
    700706    if n1: 
    701707        base = make_engine(model_definition, data, engines[0], opts['cutoff']) 
     
    707713        comp = None 
    708714 
     715    # pylint: disable=bad-whitespace 
    709716    # Remember it all 
    710717    opts.update({ 
     
    718725        'engines'   : [base, comp], 
    719726    }) 
     727    # pylint: enable=bad-whitespace 
    720728 
    721729    return opts 
    722730 
    723 def main(): 
    724     opts = parse_opts() 
    725     if opts['explore']: 
    726         explore(opts) 
    727     else: 
    728         compare(opts) 
    729  
    730731def explore(opts): 
     732    """ 
     733    Explore the model using the Bumps GUI. 
     734    """ 
    731735    import wx 
    732736    from bumps.names import FitProblem 
     
    734738 
    735739    problem = FitProblem(Explore(opts)) 
    736     isMac = "cocoa" in wx.version() 
     740    is_mac = "cocoa" in wx.version() 
    737741    app = wx.App() 
    738742    frame = AppFrame(parent=None, title="explore") 
    739     if not isMac: frame.Show() 
     743    if not is_mac: frame.Show() 
    740744    frame.panel.set_model(model=problem) 
    741745    frame.panel.Layout() 
    742746    frame.panel.aui.Split(0, wx.TOP) 
    743     if isMac: frame.Show() 
     747    if is_mac: frame.Show() 
    744748    app.MainLoop() 
    745749 
    746750class Explore(object): 
    747751    """ 
    748     Return a bumps wrapper for a SAS model comparison. 
     752    Bumps wrapper for a SAS model comparison. 
     753 
     754    The resulting object can be used as a Bumps fit problem so that 
     755    parameters can be adjusted in the GUI, with plots updated on the fly. 
    749756    """ 
    750757    def __init__(self, opts): 
     
    787794        Return cost. 
    788795        """ 
     796        # pylint: disable=no-self-use 
    789797        return 0.  # No nllf 
    790798 
     
    804812 
    805813 
     814def main(): 
     815    """ 
     816    Main program. 
     817    """ 
     818    opts = parse_opts() 
     819    if opts['explore']: 
     820        explore(opts) 
     821    else: 
     822        compare(opts) 
     823 
    806824if __name__ == "__main__": 
    807825    main() 
  • sasmodels/compare_many.py

    r6458608 rd15a908  
    11#!/usr/bin/env python 
     2""" 
     3Program to compare results from many random parameter sets for a given model. 
     4 
     5The result is a comma separated value (CSV) table that can be redirected 
     6from standard output into a file and loaded into a spreadsheet. 
     7 
     8The models are compared for each parameter set and if the difference is 
     9greater than expected for that precision, the parameter set is labeled 
     10as bad and written to the output, along with the random seed used to 
     11generate that parameter value.  This seed can be used with :mod:`compare` 
     12to reload and display the details of the model. 
     13""" 
    214from __future__ import print_function 
    315 
     
    1426 
    1527def calc_stats(target, value, index): 
     28    """ 
     29    Calculate statistics between the target value and the computed value. 
     30 
     31    *target* and *value* are the vectors being compared, with the 
     32    difference normalized by *target* to get relative error.  Only 
     33    the elements listed in *index* are used, though index may be 
     34    and empty slice defined by *slice(None, None)*. 
     35 
     36    Returns: 
     37 
     38        *maxrel* the maximum relative difference 
     39 
     40        *rel95* the relative difference with the 5% biggest differences ignored 
     41 
     42        *maxabs* the maximum absolute difference for the 5% biggest differences 
     43 
     44        *maxval* the maximum value for the 5% biggest differences 
     45    """ 
    1646    resid = abs(value-target)[index] 
    1747    relerr = resid/target[index] 
    18     srel = np.argsort(relerr) 
     48    sorted_rel_index = np.argsort(relerr) 
    1949    #p90 = int(len(relerr)*0.90) 
    2050    p95 = int(len(relerr)*0.95) 
    2151    maxrel = np.max(relerr) 
    22     rel95 = relerr[srel[p95]] 
    23     maxabs = np.max(resid[srel[p95:]]) 
    24     maxval = np.max(value[srel[p95:]]) 
    25     return maxrel,rel95,maxabs,maxval 
     52    rel95 = relerr[sorted_rel_index[p95]] 
     53    maxabs = np.max(resid[sorted_rel_index[p95:]]) 
     54    maxval = np.max(value[sorted_rel_index[p95:]]) 
     55    return maxrel, rel95, maxabs, maxval 
    2656 
    2757def print_column_headers(pars, parts): 
     58    """ 
     59    Generate column headers for the differences and for the parameters, 
     60    and print them to standard output. 
     61    """ 
    2862    stats = list('Max rel err|95% rel err|Max abs err above 90% rel|Max value above 90% rel'.split('|')) 
    2963    groups = [''] 
     
    3670    print(','.join('"%s"'%c for c in columns)) 
    3771 
     72# Target 'good' value for various precision levels. 
    3873PRECISION = { 
    3974    'fast': 1e-3, 
     
    4883def compare_instance(name, data, index, N=1, mono=True, cutoff=1e-5, 
    4984                     base='sasview', comp='double'): 
    50     is2D = hasattr(data, 'qx_data') 
     85    r""" 
     86    Compare the model under different calculation engines. 
     87 
     88    *name* is the name of the model. 
     89 
     90    *data* is the data object giving $q, \Delta q$ calculation points. 
     91 
     92    *index* is the active set of points. 
     93 
     94    *N* is the number of comparisons to make. 
     95 
     96    *cutoff* is the polydispersity weight cutoff to make the calculation 
     97    a little bit faster. 
     98 
     99    *base* and *comp* are the names of the calculation engines to compare. 
     100    """ 
     101 
     102    is_2d = hasattr(data, 'qx_data') 
    51103    model_definition = core.load_model_definition(name) 
    52104    pars = get_demo_pars(model_definition) 
    53105    header = ('\n"Model","%s","Count","%d","Dimension","%s"' 
    54               % (name, N, "2D" if is2D else "1D")) 
     106              % (name, N, "2D" if is_2d else "1D")) 
    55107    if not mono: header += ',"Cutoff",%g'%(cutoff,) 
    56108    print(header) 
    57109 
    58     if is2D: 
     110    if is_2d: 
    59111        info = generate.make_info(model_definition) 
    60112        partype = info['partype'] 
     
    69121    # declarations are not available in python 2.7. 
    70122    def try_model(fn, pars): 
     123        """ 
     124        Return the model evaluated at *pars*.  If there is an exception, 
     125        print it and return NaN of the right shape. 
     126        """ 
    71127        try: 
    72128            result = fn(**pars) 
     
    82138        return result 
    83139    def check_model(pars): 
     140        """ 
     141        Run the two calculators against *pars*, returning statistics 
     142        on the differences.  See :func:`calc_stats` for the list of stats. 
     143        """ 
    84144        base_value = try_model(calc_base, pars) 
    85145        comp_value = try_model(calc_comp, pars) 
     
    108168        good = [True] 
    109169        columns = check_model(pars_i) 
    110         columns += [v for _,v in sorted(pars_i.items())] 
     170        columns += [v for _, v in sorted(pars_i.items())] 
    111171        if first: 
    112172            labels = [" vs. ".join((calc_base.engine, calc_comp.engine))] 
     
    121181 
    122182def print_usage(): 
     183    """ 
     184    Print the command usage string. 
     185    """ 
    123186    print("usage: compare_many.py MODEL COUNT (1dNQ|2dNQ) (CUTOFF|mono) (single|double|quad)") 
    124187 
    125188 
    126189def print_models(): 
     190    """ 
     191    Print the list of available models in columns. 
     192    """ 
    127193    print(columnize(MODELS, indent="  ")) 
    128194 
    129195 
    130196def print_help(): 
     197    """ 
     198    Print usage string, the option description and the list of available models. 
     199    """ 
    131200    print_usage() 
    132201    print("""\ 
     
    158227 
    159228def main(): 
    160     if len(sys.argv) not in (6,7): 
     229    """ 
     230    Main program. 
     231    """ 
     232    if len(sys.argv) not in (6, 7): 
    161233        print_help() 
    162234        sys.exit(1) 
     
    182254 
    183255    data, index = make_data({'qmax':1.0, 'is2d':is2D, 'nq':Nq, 'res':0., 
    184                               'accuracy': 'Low', 'view':'log'}) 
     256                             'accuracy': 'Low', 'view':'log'}) 
    185257    model_list = [model] if model != "all" else MODELS 
    186258    for model in model_list: 
  • sasmodels/convert.py

    r29da213 rd15a908  
    4949    """ 
    5050    return dict((p, (v*1e6 if p.endswith('sld') else v)) 
    51                 for p,v in pars.items()) 
     51                for p, v in pars.items()) 
    5252 
    5353def convert_model(name, pars): 
     
    5555    Convert model from old style parameter names to new style. 
    5656    """ 
    57     _,_ = name,pars # lint 
     57    _, _ = name, pars # lint 
    5858    raise NotImplementedError 
    5959    # need to load all new models in order to determine old=>new 
     
    6767    """ 
    6868    return dict((p, (v*1e-6 if p.endswith('sld') else v)) 
    69                 for p,v in pars.items()) 
     69                for p, v in pars.items()) 
    7070 
    7171def _remove_pd(pars, key, name): 
  • sasmodels/core.py

    rcde11f0f rd15a908  
    22Core model handling routines. 
    33""" 
    4 __all__ = ["list_models", "load_model_definition",  "precompile_dll", 
    5            "load_model", "make_kernel", "call_kernel", "call_ER", "call_VR" ] 
     4__all__ = [ 
     5    "list_models", "load_model_definition", "precompile_dll", 
     6    "load_model", "make_kernel", "call_kernel", "call_ER", "call_VR", 
     7    ] 
    68 
    79from os.path import basename, dirname, join as joinpath 
     
    6163 
    6264def isstr(s): 
     65    """ 
     66    Return True if *s* is a string-like object. 
     67    """ 
    6368    try: s + '' 
    6469    except: return False 
     
    99104    # source = open(info['name']+'.cl','r').read() 
    100105 
    101     if (platform=="dll" 
     106    if (platform == "dll" 
    102107            or not HAVE_OPENCL 
    103108            or not kernelcl.environment().has_type(dtype)): 
     
    128133    width = pars.get(name+'_pd', 0.0) 
    129134    nsigma = pars.get(name+'_pd_nsigma', 3.0) 
    130     value,weight = weights.get_weights( 
     135    value, weight = weights.get_weights( 
    131136        disperser, npts, width, nsigma, value, limits, relative) 
    132137    return value, weight / np.sum(weight) 
     
    195200                    for name in info['partype']['volume']] 
    196201        value, weight = dispersion_mesh(vol_pars) 
    197         whole,part = VR(*value) 
     202        whole, part = VR(*value) 
    198203        return np.sum(weight*part)/np.sum(weight*whole) 
    199204 
  • sasmodels/data.py

    r013adb7 rd15a908  
    284284                mdata[mdata <= 0] = masked 
    285285            plt.errorbar(data.x/10, scale*mdata, yerr=data.dy, fmt='.') 
    286             all_positive = all_positive and (mdata>0).all() 
     286            all_positive = all_positive and (mdata > 0).all() 
    287287            some_present = some_present or (mdata.count() > 0) 
    288288 
     
    292292            mtheory[~np.isfinite(mtheory)] = masked 
    293293            if view is 'log': 
    294                 mtheory[mtheory<=0] = masked 
     294                mtheory[mtheory <= 0] = masked 
    295295            plt.plot(data.x/10, scale*mtheory, '-', hold=True) 
    296             all_positive = all_positive and (mtheory>0).all() 
     296            all_positive = all_positive and (mtheory > 0).all() 
    297297            some_present = some_present or (mtheory.count() > 0) 
    298298 
     
    396396        plt.title('theory') 
    397397        h = plt.colorbar() 
    398         h.set_label(r'$\log_{10}I(q)$' if view=='log' 
     398        h.set_label(r'$\log_{10}I(q)$' if view == 'log' 
    399399                    else r'$q^4 I(q)$' if view == 'q4' 
    400400                    else '$I(q)$') 
     
    411411        plt.title('residuals') 
    412412        h = plt.colorbar() 
    413         h.set_label('$\Delta I(q)$') 
     413        h.set_label(r'$\Delta I(q)$') 
    414414 
    415415 
  • sasmodels/resolution.py

    r5258859 rfdc538a  
    135135    """ 
    136136    #print("apply shapes", theory.shape, weight_matrix.shape) 
    137     Iq = np.dot(theory[None,:], weight_matrix) 
     137    Iq = np.dot(theory[None, :], weight_matrix) 
    138138    #print("result shape",Iq.shape) 
    139139    return Iq.flatten() 
     
    153153    # neither trapezoid nor Simpson's rule improved the accuracy. 
    154154    edges = bin_edges(q_calc) 
    155     edges[edges<0.0] = 0.0 # clip edges below zero 
    156     G = erf( (edges[:,None] - q[None,:]) / (sqrt(2.0)*q_width)[None,:] ) 
     155    edges[edges < 0.0] = 0.0 # clip edges below zero 
     156    G = erf((edges[:, None] - q[None, :]) / (sqrt(2.0)*q_width)[None, :]) 
    157157    weights = G[1:] - G[:-1] 
    158     weights /= np.sum(weights, axis=0)[None,:] 
     158    weights /= np.sum(weights, axis=0)[None, :] 
    159159    return weights 
    160160 
     
    287287    # The current algorithm is a midpoint rectangle rule. 
    288288    q_edges = bin_edges(q_calc) # Note: requires q > 0 
    289     q_edges[q_edges<0.0] = 0.0 # clip edges below zero 
     289    q_edges[q_edges < 0.0] = 0.0 # clip edges below zero 
    290290    weights = np.zeros((len(q), len(q_calc)), 'd') 
    291291 
     
    306306            #print(qi - h, qi + h) 
    307307            #print(in_x + abs_x) 
    308             weights[i,:] = (in_x + abs_x) * np.diff(q_edges) / (2*h) 
     308            weights[i, :] = (in_x + abs_x) * np.diff(q_edges) / (2*h) 
    309309        else: 
    310310            L = n_height 
    311311            for k in range(-L, L+1): 
    312                 weights[i,:] += _q_perp_weights(q_edges, qi+k*h/L, w) 
    313             weights[i,:] /= 2*L + 1 
     312                weights[i, :] += _q_perp_weights(q_edges, qi+k*h/L, w) 
     313            weights[i, :] /= 2*L + 1 
    314314 
    315315    return weights.T 
     
    358358    log-scaled data. 
    359359    """ 
    360     if len(x) < 2 or (np.diff(x)<0).any(): 
     360    if len(x) < 2 or (np.diff(x) < 0).any(): 
    361361        raise ValueError("Expected bins to be an increasing set") 
    362362    edges = np.hstack([ 
     
    373373    """ 
    374374    step = np.diff(q) 
    375     index = step>max_step 
     375    index = step > max_step 
    376376    if np.any(index): 
    377377        inserts = [] 
    378         for q_i,step_i in zip(q[:-1][index],step[index]): 
     378        for q_i, step_i in zip(q[:-1][index], step[index]): 
    379379            n = np.ceil(step_i/max_step) 
    380             inserts.extend(q_i + np.arange(1,n)*(step_i/n)) 
     380            inserts.extend(q_i + np.arange(1, n)*(step_i/n)) 
    381381        # Extend a couple of fringes beyond the end of the data 
    382         inserts.extend(q[-1] + np.arange(1,8)*max_step) 
    383         q_calc = np.sort(np.hstack((q,inserts))) 
     382        inserts.extend(q[-1] + np.arange(1, 8)*max_step) 
     383        q_calc = np.sort(np.hstack((q, inserts))) 
    384384    else: 
    385385        q_calc = q 
     
    399399    if q_min < q[0]: 
    400400        if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 
    401         n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1]>q[0] else 15 
     401        n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15 
    402402        q_low = np.linspace(q_min, q[0], n_low+1)[:-1] 
    403403    else: 
    404404        q_low = [] 
    405405    if q_max > q[-1]: 
    406         n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1]>q[-2] else 15 
     406        n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15 
    407407        q_high = np.linspace(q[-1], q_max, n_high+1)[1:] 
    408408    else: 
     
    452452        if q_min < 0: q_min = q[0]*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 
    453453        n_low = log_delta_q * (log(q[0])-log(q_min)) 
    454         q_low  = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1] 
     454        q_low = np.logspace(log10(q_min), log10(q[0]), np.ceil(n_low)+1)[:-1] 
    455455    else: 
    456456        q_low = [] 
     
    489489    that make it slow to evaluate but give it good accuracy. 
    490490    """ 
    491     from scipy.integrate import romberg, dblquad 
     491    from scipy.integrate import romberg 
    492492 
    493493    if any(k not in form.info['defaults'] for k in pars.keys()): 
     
    520520            result[i] = r/(2*h) 
    521521        else: 
    522             w_grid = np.linspace(0, w, 21)[None,:] 
    523             h_grid = np.linspace(-h, h, 23)[:,None] 
     522            w_grid = np.linspace(0, w, 21)[None, :] 
     523            h_grid = np.linspace(-h, h, 23)[:, None] 
    524524            u = sqrt((qi+h_grid)**2 + w_grid**2) 
    525525            Iu = np.interp(u, q_calc, Iq) 
    526526            #print(np.trapz(Iu, w_grid, axis=1)) 
    527             Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:,0]) 
     527            Is = np.trapz(np.trapz(Iu, w_grid, axis=1), h_grid[:, 0]) 
    528528            result[i] = Is / (2*h*w) 
    529             """ 
    530             r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w, 
    531                              args=(qi,)) 
    532             result[i] = r/(w*2*h) 
    533             """ 
     529            # from scipy.integrate import dblquad 
     530            # r, err = dblquad(_int_wh, -h, h, lambda h: 0., lambda h: w, 
     531            #                  args=(qi,)) 
     532            # result[i] = r/(w*2*h) 
    534533 
    535534    # r should be [float, ...], but it is [array([float]), array([float]),...] 
     
    553552 
    554553    _fn = lambda q, q0, dq: eval_form(q, form, pars)*gaussian(q, q0, dq) 
    555     r = [romberg(_fn, max(qi-nsigma*dqi,1e-10*q[0]), qi+nsigma*dqi, args=(qi, dqi), 
    556                  divmax=100, vec_func=True, tol=0, rtol=1e-8) 
    557          for qi,dqi in zip(q,q_width)] 
     554    r = [romberg(_fn, max(qi-nsigma*dqi, 1e-10*q[0]), qi+nsigma*dqi, 
     555                 args=(qi, dqi), divmax=100, vec_func=True, tol=0, rtol=1e-8) 
     556         for qi, dqi in zip(q, q_width)] 
    558557    return np.asarray(r).flatten() 
    559558 
     
    595594        theory = self.Iq(resolution.q_calc) 
    596595        output = resolution.apply(theory) 
    597         answer = [ 9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 
    598                    5.5555, 4.5584, 3.5606, 2.5623, 2.0000 ] 
     596        answer = [ 
     597            9.0618, 8.6402, 8.1187, 7.1392, 6.1528, 
     598            5.5555, 4.5584, 3.5606, 2.5623, 2.0000, 
     599            ] 
    599600        np.testing.assert_allclose(output, answer, atol=1e-4) 
    600601 
     
    608609        theory = 1000*self.Iq(resolution.q_calc**4) 
    609610        output = resolution.apply(theory) 
    610         answer = [ 8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 
    611                    5.40363, 4.40655, 3.40880, 2.41058, 2.00000 ] 
     611        answer = [ 
     612            8.85785, 8.43012, 7.92687, 6.94566, 6.03660, 
     613            5.40363, 4.40655, 3.40880, 2.41058, 2.00000, 
     614            ] 
    612615        np.testing.assert_allclose(output, answer, atol=1e-4) 
    613616 
     
    620623        theory = self.Iq(resolution.q_calc) 
    621624        output = resolution.apply(theory) 
    622         answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 
     625        answer = [ 
     626            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 
     627            ] 
    623628        np.testing.assert_allclose(output, answer, atol=1e-4) 
    624629 
     
    632637        theory = self.Iq(resolution.q_calc) 
    633638        output = resolution.apply(theory) 
    634         answer = [ 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0 ] 
     639        answer = [ 
     640            11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 
     641            ] 
    635642        np.testing.assert_allclose(output, answer, atol=1e-4) 
    636643 
     
    652659        theory = 12.0-1000.0*resolution.q_calc 
    653660        output = resolution.apply(theory) 
    654         answer = [ 10.44785079, 9.84991299, 8.98101708, 
    655                   7.99906585, 6.99998311, 6.00001689, 
    656                   5.00093415, 4.01898292, 3.15008701, 2.55214921] 
     661        answer = [ 
     662            10.44785079, 9.84991299, 8.98101708, 
     663            7.99906585, 6.99998311, 6.00001689, 
     664            5.00093415, 4.01898292, 3.15008701, 2.55214921, 
     665            ] 
    657666        np.testing.assert_allclose(output, answer, atol=1e-8) 
    658667 
     
    783792            } 
    784793        form = load_model('ellipsoid', dtype='double') 
    785         q = np.logspace(log10(4e-5),log10(2.5e-2), 68) 
     794        q = np.logspace(log10(4e-5), log10(2.5e-2), 68) 
    786795        width, height = 0.117, 0. 
    787796        resolution = Slit1D(q, width=width, height=height) 
     
    10711080    #h = 0.0277790 
    10721081    resolution = Slit1D(q, w, h) 
    1073     _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w,h)) 
     1082    _eval_demo_1d(resolution, title="(%g,%g) Slit Resolution"%(w, h)) 
    10741083 
    10751084def demo(): 
  • sasmodels/sesans.py

    r3c56da87 r384d114  
    1212import numpy as np 
    1313from numpy import pi, exp 
    14  
    1514from scipy.special import jv as besselj 
    1615 
    17 def make_q(q_zmax, Rmax): 
     16def make_q(q_max, Rmax): 
     17    r""" 
     18    Return a $q$ vector suitable for SESANS covering from $2\pi/ (10 R_{\max})$ 
     19    to $q_max$. 
     20    """ 
    1821    q_min = dq = 0.1 * 2*pi / Rmax 
    19     #q_min = 0.00003 
    20     return np.arange(q_min, q_zmax, dq) 
    21  
    22 # TODO: dead code; for now the call to the hankel transform happens in BumpsModel 
    23 class SesansCalculator: 
    24     def __init__(self, kernel, q_zmax, Rmax, SElength, wavelength, thickness): 
    25         self._set_kernel(kernel, q_zmax, Rmax) 
    26         self.SElength = SElength 
    27         self.wavelength = wavelength 
    28         self.thickness = thickness 
    29  
    30     def _set_kernel(self, kernel, q_zmax, Rmax): 
    31         kernel_input = kernel.make_input([make_q(q_zmax, Rmax)]) 
    32         self.sans_calculator = kernel(kernel_input) 
    33  
    34     def __call__(self, pars, pd_pars, cutoff=1e-5): 
    35         Iq = self.sans_calculator(pars, pd_pars, cutoff) 
    36         P = hankel(self.SElength, self.wavelength, self.thickness, self.q, Iq) 
    37         self.Iq = Iq 
    38         return P 
     22    return np.arange(q_min, q_max, dq) 
    3923 
    4024def hankel(SElength, wavelength, thickness, q, Iq): 
    41     """ 
     25    r""" 
    4226    Compute the expected SESANS polarization for a given SANS pattern. 
    4327 
    44     Uses the hankel transform followed by the exponential.  The values 
    45     for zz (or spin echo length, or delta), wavelength and sample thickness 
    46     information should come from the dataset.  *q* should be chosen such 
    47     that the oscillations in *I(q)* are well sampled (e.g., 5*2*pi/d_max). 
     28    Uses the hankel transform followed by the exponential.  The values for *zz* 
     29    (or spin echo length, or delta), wavelength and sample thickness should 
     30    come from the dataset.  $q$ should be chosen such that the oscillations 
     31    in $I(q)$ are well sampled (e.g., $5 \cdot 2 \pi/d_{\max}$). 
    4832 
    49     *SElength* [A] is the set of z points at which to compute the hankel transform 
     33    *SElength* [A] is the set of $z$ points at which to compute the 
     34    Hankel transform 
    5035 
    5136    *wavelength* [m]  is the wavelength of each individual point *zz* 
     
    5338    *thickness* [cm] is the sample thickness. 
    5439 
    55     *q* [A^{-1}] is the set of q points at which the model has been computed. 
    56     These should be equally spaced. 
     40    *q* [A$^{-1}$] is the set of $q$ points at which the model has been 
     41    computed. These should be equally spaced. 
    5742 
    58     *I* [cm^{-1}] is the value of the SANS model at *q* 
     43    *I* [cm$^{-1}$] is the value of the SANS model at *q* 
    5944    """ 
    6045    G = np.zeros(len(SElength), 'd') 
    6146    for i in range(len(SElength)): 
    62         integr = besselj(0,q*SElength[i])*Iq*q 
     47        integr = besselj(0, q*SElength[i])*Iq*q 
    6348        G[i] = np.sum(integr) 
    64     dq=(q[1]-q[0])*1e10   # [m^-1] step size in q, needed for integration 
    65     G *= dq*1e10*2*pi # integr step, conver q into [m**-1] and 2 pi circle integr 
     49 
     50    # [m^-1] step size in q, needed for integration 
     51    dq=(q[1]-q[0])*1e10 
     52 
     53    # integration step, convert q into [m**-1] and 2 pi circle integration 
     54    G *= dq*1e10*2*pi 
     55 
    6656    P = exp(thickness*wavelength**2/(4*pi**2)*(G-G[0])) 
    6757 
Note: See TracChangeset for help on using the changeset viewer.