Changeset aaf75b6 in sasmodels
- Timestamp:
- Mar 30, 2016 2:39:42 PM (9 years ago)
- Branches:
- master, core_shell_microgels, costrafo411, magnetic_model, release_v0.94, release_v0.95, ticket-1257-vesicle-product, ticket_1156, ticket_1265_superball, ticket_822_more_unit_tests
- Children:
- 885ee37
- Parents:
- d0876c9d (diff), 364d8f7 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Files:
-
- 7 added
- 39 edited
Legend:
- Unmodified
- Added
- Removed
-
example/fit.py
r7cf2cfd r1182da5 31 31 model = Model(kernel, 32 32 scale=0.08, 33 r polar=15, requatorial=800,34 sld=.291, s olvent_sld=7.105,33 r_polar=15, r_equatorial=800, 34 sld=.291, sld_solvent=7.105, 35 35 background=0, 36 36 theta=90, phi=phi, 37 37 theta_pd=15, theta_pd_n=40, theta_pd_nsigma=3, 38 r polar_pd=0.222296, rpolar_pd_n=1, rpolar_pd_nsigma=0,39 r equatorial_pd=.000128, requatorial_pd_n=1, requatorial_pd_nsigma=0,38 r_polar_pd=0.222296, r_polar_pd_n=1, r_polar_pd_nsigma=0, 39 r_equatorial_pd=.000128, r_equatorial_pd_n=1, r_equatorial_pd_nsigma=0, 40 40 phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3, 41 41 ) … … 43 43 44 44 # SET THE FITTING PARAMETERS 45 model.r polar.range(15, 1000)46 model.r equatorial.range(15, 1000)45 model.r_polar.range(15, 1000) 46 model.r_equatorial.range(15, 1000) 47 47 model.theta_pd.range(0, 360) 48 48 model.background.range(0,1000) … … 81 81 pars = dict( 82 82 scale=.01, background=35, 83 sld=.291, s olvent_sld=5.77,83 sld=.291, sld_solvent=5.77, 84 84 radius=250, length=178, 85 85 theta=90, phi=phi, … … 105 105 model = Model(kernel, 106 106 scale= .031, radius=19.5, thickness=30, length=22, 107 core_sld=7.105, shell_sld=.291, solvent_sld=7.105,107 sld_core=7.105, sld_shell=.291, sdl_solvent=7.105, 108 108 background=0, theta=0, phi=phi, 109 109 … … 132 132 model = Model(kernel, 133 133 scale=.08, radius=20, cap_radius=40, length=400, 134 sld _capcyl=1, sld_solv=6.3,134 sld=1, sld_solvent=6.3, 135 135 background=0, theta=0, phi=phi, 136 136 radius_pd=.1, radius_pd_n=5, radius_pd_nsigma=3, … … 147 147 model = Model(kernel, 148 148 scale=0.08, req_minor=15, req_major=20, rpolar=500, 149 sld Ell=7.105, solvent_sld=.291,149 sld=7.105, solvent_sld=.291, 150 150 background=5, theta=0, phi=phi, psi=0, 151 151 theta_pd=20, theta_pd_n=40, theta_pd_nsigma=3, -
example/model.py
r15d2285 r1182da5 17 17 model = Model(kernel, 18 18 scale=0.08, 19 r polar=15, requatorial=800,20 sld=.291, s olvent_sld=7.105,19 r_polar=15, r_equatorial=800, 20 sld=.291, sld_solvent=7.105, 21 21 background=0, 22 22 theta=90, phi=0, 23 23 theta_pd=15, theta_pd_n=40, theta_pd_nsigma=3, 24 r polar_pd=0.222296, rpolar_pd_n=1, rpolar_pd_nsigma=0,25 r equatorial_pd=.000128, requatorial_pd_n=1, requatorial_pd_nsigma=0,24 r_polar_pd=0.222296, r_polar_pd_n=1, r_polar_pd_nsigma=0, 25 r_equatorial_pd=.000128, r_equatorial_pd_n=1, r_equatorial_pd_nsigma=0, 26 26 phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3, 27 27 ) 28 28 29 29 # SET THE FITTING PARAMETERS 30 model.r polar.range(15, 1000)31 model.r equatorial.range(15, 1000)30 model.r_polar.range(15, 1000) 31 model.r_equatorial.range(15, 1000) 32 32 model.theta_pd.range(0, 360) 33 33 model.background.range(0,1000) -
sasmodels/bumps_model.py
rd19962c raaf75b6 219 219 """ 220 220 data, theory, resid = self._data, self.theory(), self.residuals() 221 plot_theory(data, theory, resid, view )221 plot_theory(data, theory, resid, view, Iq_calc = self.Iq_calc) 222 222 223 223 def simulate_data(self, noise=None): -
sasmodels/compare.py
rd1c4760 raaf75b6 468 468 data = empty_data2D(np.linspace(-qmax, qmax, nq), resolution=res) 469 469 data.accuracy = opts['accuracy'] 470 set_beam_stop(data, 0.00 4)470 set_beam_stop(data, 0.0004) 471 471 index = ~data.mask 472 472 else: -
sasmodels/data.py
r092cb3c rd6f5da6 41 41 Load data using a sasview loader. 42 42 """ 43 from sas. dataloader.loader import Loader43 from sas.sascalc.dataloader.loader import Loader 44 44 loader = Loader() 45 45 data = loader.load(filename) … … 322 322 323 323 def plot_theory(data, theory, resid=None, view='log', 324 use_data=True, limits=None ):324 use_data=True, limits=None, Iq_calc=None): 325 325 """ 326 326 Plot theory calculation. … … 343 343 _plot_result2D(data, theory, resid, view, use_data, limits=limits) 344 344 else: 345 _plot_result1D(data, theory, resid, view, use_data, limits=limits) 345 _plot_result1D(data, theory, resid, view, use_data, 346 limits=limits, Iq_calc=Iq_calc) 346 347 347 348 … … 366 367 367 368 @protect 368 def _plot_result1D(data, theory, resid, view, use_data, limits=None): 369 def _plot_result1D(data, theory, resid, view, use_data, 370 limits=None, Iq_calc=None): 369 371 """ 370 372 Plot the data and residuals for 1D data. … … 376 378 use_theory = theory is not None 377 379 use_resid = resid is not None 378 num_plots = (use_data or use_theory) + use_resid 380 use_calc = use_theory and Iq_calc is not None 381 num_plots = (use_data or use_theory) + use_calc + use_resid 382 379 383 380 384 scale = data.x**4 if view == 'q4' else 1.0 381 385 382 386 if use_data or use_theory: 387 if num_plots > 1: 388 plt.subplot(1, num_plots, 1) 389 383 390 #print(vmin, vmax) 384 391 all_positive = True … … 399 406 if view is 'log': 400 407 mtheory[mtheory <= 0] = masked 401 plt.plot(data.x /10, scale*mtheory, '-', hold=True)408 plt.plot(data.x, scale*mtheory, '-', hold=True) 402 409 all_positive = all_positive and (mtheory > 0).all() 403 410 some_present = some_present or (mtheory.count() > 0) … … 406 413 plt.ylim(*limits) 407 414 408 if num_plots > 1:409 plt.subplot(1, num_plots, 1)410 415 plt.xscale('linear' if not some_present else view) 411 416 plt.yscale('linear' … … 415 420 plt.ylabel('$I(q)$') 416 421 422 if use_calc: 423 # Only have use_calc if have use_theory 424 plt.subplot(1, num_plots, 2) 425 qx, qy, Iqxy = Iq_calc 426 plt.pcolormesh(qx, qy[qy>0], np.log10(Iqxy[qy>0,:])) 427 plt.xlabel("$q_x$/A$^{-1}$") 428 plt.xlabel("$q_y$/A$^{-1}$") 429 plt.xscale('log') 430 plt.yscale('log') 431 #plt.axis('equal') 432 417 433 if use_resid: 418 434 mresid = masked_array(resid, data.mask.copy()) … … 421 437 422 438 if num_plots > 1: 423 plt.subplot(1, num_plots, (use_data or use_theory) + 1)439 plt.subplot(1, num_plots, use_calc + 2) 424 440 plt.plot(data.x, mresid, '-') 425 441 plt.xlabel("$q$/A$^{-1}$") … … 561 577 plottable = masked_array(image, ~valid | data.mask) 562 578 # Divide range by 10 to convert from angstroms to nanometers 563 xmin, xmax = min(data.qx_data) /10, max(data.qx_data)/10564 ymin, ymax = min(data.qy_data) /10, max(data.qy_data)/10579 xmin, xmax = min(data.qx_data), max(data.qx_data) 580 ymin, ymax = min(data.qy_data), max(data.qy_data) 565 581 if view == 'log': 566 582 vmin, vmax = np.log10(vmin), np.log10(vmax) 567 583 plt.imshow(plottable.reshape(len(data.x_bins), len(data.y_bins)), 568 interpolation='nearest', aspect=1, origin=' upper',584 interpolation='nearest', aspect=1, origin='lower', 569 585 extent=[xmin, xmax, ymin, ymax], vmin=vmin, vmax=vmax) 570 plt.xlabel("$q_x$/ nm$^{-1}$")571 plt.ylabel("$q_y$/ nm$^{-1}$")586 plt.xlabel("$q_x$/A$^{-1}$") 587 plt.ylabel("$q_y$/A$^{-1}$") 572 588 return vmin, vmax 573 589 -
sasmodels/direct_model.py
r60eab2a raaf75b6 25 25 import numpy as np 26 26 27 from .core import make_kernel 27 28 from .core import call_kernel, call_ER_VR 28 29 from . import sesans … … 62 63 elif hasattr(data, 'qx_data'): 63 64 self.data_type = 'Iqxy' 65 elif getattr(data, 'oriented', False): 66 self.data_type = 'Iq-oriented' 64 67 else: 65 68 self.data_type = 'Iq' … … 113 116 and getattr(data, 'dxw', None) is not None): 114 117 res = resolution.Slit1D(data.x[index], 115 width=data.dxh[index],116 height=data.dxw[index])118 qx_width=data.dxw[index], 119 qy_width=data.dxl[index]) 117 120 else: 118 121 res = resolution.Perfect1D(data.x[index]) … … 120 123 #self._theory = np.zeros_like(self.Iq) 121 124 q_vectors = [res.q_calc] 125 q_mono = [] 126 elif self.data_type == 'Iq-oriented': 127 index = (data.x >= data.qmin) & (data.x <= data.qmax) 128 if data.y is not None: 129 index &= ~np.isnan(data.y) 130 Iq = data.y[index] 131 dIq = data.dy[index] 132 else: 133 Iq, dIq = None, None 134 if (getattr(data, 'dxl', None) is None 135 or getattr(data, 'dxw', None) is None): 136 raise ValueError("oriented sample with 1D data needs slit resolution") 137 138 res = resolution2d.Slit2D(data.x[index], 139 qx_width=data.dxw[index], 140 qy_width=data.dxl[index]) 141 q_vectors = res.q_calc 122 142 q_mono = [] 123 143 else: … … 139 159 y = Iq + np.random.randn(*dy.shape) * dy 140 160 self.Iq = y 141 if self.data_type == 'Iq':161 if self.data_type in ('Iq', 'Iq-oriented'): 142 162 self._data.dy[self.index] = dy 143 163 self._data.y[self.index] = y … … 151 171 def _calc_theory(self, pars, cutoff=0.0): 152 172 if self._kernel is None: 153 self._kernel = self._model.make_kernel(self._kernel_inputs) 154 self._kernel_mono = ( 155 self._model.make_kernel(self._kernel_mono_inputs) 156 if self._kernel_mono_inputs else None 157 ) 173 self._kernel = make_kernel(self._model, self._kernel_inputs) # pylint: disable=attribute-dedata_type 174 self._kernel_mono = (make_kernel(self._model, self._kernel_mono_inputs) 175 if self._kernel_mono_inputs else None) 158 176 159 177 Iq_calc = call_kernel(self._kernel, pars, cutoff=cutoff) 160 Iq_mono = (call_kernel(self._kernel_mono, pars, mono=True)161 if self._kernel_mono_inputs else None)178 # TODO: may want to plot the raw Iq for other than oriented usans 179 self.Iq_calc = None 162 180 if self.data_type == 'sesans': 181 Iq_mono = (call_kernel(self._kernel_mono, pars, mono=True) 182 if self._kernel_mono_inputs else None) 163 183 result = sesans.transform(self._data, 164 184 self._kernel_inputs[0], Iq_calc, … … 166 186 else: 167 187 result = self.resolution.apply(Iq_calc) 188 if hasattr(self.resolution, 'nx'): 189 self.Iq_calc = ( 190 self.resolution.qx_calc, self.resolution.qy_calc, 191 np.reshape(Iq_calc, (self.resolution.ny, self.resolution.nx)) 192 ) 168 193 return result 169 194 -
sasmodels/models/adsorbed_layer.py
rc079f50 r1d4d409 98 98 'density_shell': 0.7, 'radius': 500.0, 'volfraction': 0.14, 99 99 'sld_shell': 1.5, 'sld_solvent': 6.3, 'background': 0.0}, 100 [0.0106939, 0. 469418], [73.741, 9.65391e-53]],100 [0.0106939, 0.1], [73.741, 4.51684e-3]], 101 101 ] 102 102 # ADDED by: SMK ON: 16Mar2016 convert from sasview, check vs SANDRA, 18Mar2016 RKH some edits & renaming -
sasmodels/models/core_shell_ellipsoid.py
r65bf704 r27fade8 117 117 source = ["lib/sph_j1c.c", "lib/gfn.c", "lib/gauss76.c", "core_shell_ellipsoid.c"] 118 118 119 def ER(equat_ shell, polar_shell):119 def ER(equat_core, polar_core, equat_shell, polar_shell): 120 120 """ 121 121 Returns the effective radius used in the S*P calculation … … 123 123 import numpy as np 124 124 from .ellipsoid import ER as ellipsoid_ER 125 return ellipsoid_ER( rpolar, equat_shell)125 return ellipsoid_ER(polar_shell, equat_shell) 126 126 127 127 -
sasmodels/models/core_shell_ellipsoid_xt.py
r65bf704 r27fade8 107 107 Returns the effective radius used in the S*P calculation 108 108 """ 109 import numpy as np110 109 from .ellipsoid import ER as ellipsoid_ER 111 return ellipsoid_ER(equat_core*x_core + t_shell*x_polar_shell, equat_shell + t_shell) 110 polar_outer = equat_core*x_core + t_shell*x_polar_shell 111 equat_outer = equat_core + t_shell 112 return ellipsoid_ER(polar_outer, equat_outer) 112 113 113 114 -
sasmodels/models/core_shell_sphere.py
r55b2b232 r3c6d5bc 99 99 100 100 tests = [[{'radius': 20.0, 'thickness': 10.0}, 'ER', 30.0], 101 [{'radius': 20.0, 'thickness': 10.0}, 'VR', 0.703703704], 101 # TODO: VR test suppressed until we sort out new product model 102 # and determine what to do with volume ratio. 103 #[{'radius': 20.0, 'thickness': 10.0}, 'VR', 0.703703704], 102 104 103 105 # The SasView test result was 0.00169, with a background of 0.001 -
sasmodels/models/onion.py
rce896fd raaf75b6 290 290 category = "shape:sphere" 291 291 292 # TODO: n is a volume parameter that is not polydisperse 292 293 293 294 # ["name", "units", default, [lower, upper], "type","description"], … … 364 365 365 366 def ER(core_radius, n, thickness): 366 return np.sum(thickness[:n ], axis=0) + core_radius367 return np.sum(thickness[:n[0]], axis=0) + core_radius 367 368 368 369 def VR(core_radius, n, thickness): 369 return 1.0 370 return 1.0, 1.0 370 371 371 372 demo = { -
sasmodels/models/sphere.py
rd7028dc r364d8f7 86 86 def ER(radius): 87 87 """ 88 88 Return equivalent radius (ER) 89 89 """ 90 90 return radius -
sasmodels/models/squarewell.py
r2f63032 rb812972 55 55 category = "structure-factor" 56 56 structure_factor = True 57 single = False 57 58 58 59 #single = False -
sasmodels/product.py
rce896fd raaf75b6 20 20 VOLFRACTION=3 21 21 22 # TODO: core_shell_sphere model has suppressed the volume ratio calculation 23 # revert it after making VR and ER available at run time as constraints. 22 24 def make_product_info(p_info, s_info): 23 25 """ -
sasmodels/resolution.py
re9b1663d raaf75b6 82 82 self.q_calc = (pinhole_extend_q(q, q_width, nsigma=nsigma) 83 83 if q_calc is None else np.sort(q_calc)) 84 self.weight_matrix = pinhole_resolution( 85 self.q_calc, self.q,np.maximum(q_width, MINIMUM_RESOLUTION))84 self.weight_matrix = pinhole_resolution(self.q_calc, self.q, 85 np.maximum(q_width, MINIMUM_RESOLUTION)) 86 86 87 87 def apply(self, theory): … … 91 91 class Slit1D(Resolution): 92 92 """ 93 Slit aperture with a complicatedresolution function.93 Slit aperture with resolution function. 94 94 95 95 *q* points at which the data is measured. 96 96 97 * qx_width* slit width98 99 * qy_height* slit height97 *dqx* slit width in qx 98 99 *dqy* slit height in qy 100 100 101 101 *q_calc* is the list of points to calculate, or None if this should … … 104 104 The *weight_matrix* is computed by :func:`slit1d_resolution` 105 105 """ 106 def __init__(self, q, width, height=0., q_calc=None):107 # Remember what width/ heightwas used even though we won't need them106 def __init__(self, q, qx_width, qy_width=0., q_calc=None): 107 # Remember what width/dqy was used even though we won't need them 108 108 # after the weight matrix is constructed 109 self. width, self.height = width, height109 self.qx_width, self.qy_width = qx_width, qy_width 110 110 111 111 # Allow independent resolution on each point even though it is not 112 112 # needed in practice. 113 if np.isscalar( width):114 width = np.ones(len(q))*width113 if np.isscalar(qx_width): 114 qx_width = np.ones(len(q))*qx_width 115 115 else: 116 width = np.asarray(width)117 if np.isscalar( height):118 height = np.ones(len(q))*height116 qx_width = np.asarray(qx_width) 117 if np.isscalar(qy_width): 118 qy_width = np.ones(len(q))*qy_width 119 119 else: 120 height = np.asarray(height)120 qy_width = np.asarray(qy_width) 121 121 122 122 self.q = q.flatten() 123 self.q_calc = slit_extend_q(q, width, height) \123 self.q_calc = slit_extend_q(q, qx_width, qy_width) \ 124 124 if q_calc is None else np.sort(q_calc) 125 125 self.weight_matrix = \ 126 slit_resolution(self.q_calc, self.q, width, height)126 slit_resolution(self.q_calc, self.q, qx_width, qy_width) 127 127 128 128 def apply(self, theory): … … 396 396 """ 397 397 q = np.sort(q) 398 if q_min < q[0]:398 if q_min + 2*MINIMUM_RESOLUTION < q[0]: 399 399 if q_min <= 0: q_min = q_min*MIN_Q_SCALE_FOR_NEGATIVE_Q_EXTRAPOLATION 400 400 n_low = np.ceil((q[0]-q_min) / (q[1]-q[0])) if q[1] > q[0] else 15 … … 402 402 else: 403 403 q_low = [] 404 if q_max > q[-1]:404 if q_max - 2*MINIMUM_RESOLUTION > q[-1]: 405 405 n_high = np.ceil((q_max-q[-1]) / (q[-1]-q[-2])) if q[-1] > q[-2] else 15 406 406 q_high = np.linspace(q[-1], q_max, n_high+1)[1:] … … 596 596 Slit smearing with perfect resolution. 597 597 """ 598 resolution = Slit1D(self.x, width=0, height=0, q_calc=self.x)598 resolution = Slit1D(self.x, qx_width=0, qy_width=0, q_calc=self.x) 599 599 theory = self.Iq(resolution.q_calc) 600 600 output = resolution.apply(theory) … … 606 606 Slit smearing with height 0.005 607 607 """ 608 resolution = Slit1D(self.x, width=0, height=0.005, q_calc=self.x)608 resolution = Slit1D(self.x, qx_width=0, qy_width=0.005, q_calc=self.x) 609 609 theory = self.Iq(resolution.q_calc) 610 610 output = resolution.apply(theory) … … 621 621 """ 622 622 q = np.logspace(-4, -1, 10) 623 resolution = Slit1D(q, width=0.2, height=np.inf)623 resolution = Slit1D(q, qx_width=0.2, qy_width=np.inf) 624 624 theory = 1000*self.Iq(resolution.q_calc**4) 625 625 output = resolution.apply(theory) … … 635 635 Slit smearing with width 0.0002 636 636 """ 637 resolution = Slit1D(self.x, width=0.0002, height=0, q_calc=self.x)637 resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0, q_calc=self.x) 638 638 theory = self.Iq(resolution.q_calc) 639 639 output = resolution.apply(theory) … … 648 648 Slit smearing with width > 100*height. 649 649 """ 650 resolution = Slit1D(self.x, width=0.0002, height=0.000001,650 resolution = Slit1D(self.x, qx_width=0.0002, qy_width=0.000001, 651 651 q_calc=self.x) 652 652 theory = self.Iq(resolution.q_calc) … … 773 773 data = np.loadtxt(data_string.split('\n')).T 774 774 q, delta_qv, _, answer = data 775 resolution = Slit1D(q, width=delta_qv, height=0)775 resolution = Slit1D(q, qx_width=delta_qv, qy_width=0) 776 776 output = self._eval_sphere(pars, resolution) 777 777 # TODO: eliminate Igor test since it is too inaccurate to be useful. … … 793 793 q_calc = slit_extend_q(interpolate(q, 2*np.pi/radius/20), 794 794 delta_qv[0], 0.) 795 resolution = Slit1D(q, width=delta_qv, height=0, q_calc=q_calc)795 resolution = Slit1D(q, qx_width=delta_qv, qy_width=0, q_calc=q_calc) 796 796 output = self._eval_sphere(pars, resolution) 797 797 # TODO: relative error should be lower … … 805 805 pars = { 806 806 'scale':0.05, 807 'r polar':500, 'requatorial':15000,808 'sld':6, 's olvent_sld': 1,807 'r_polar':500, 'r_equatorial':15000, 808 'sld':6, 'sld_solvent': 1, 809 809 } 810 810 form = load_model('ellipsoid', dtype='double') 811 811 q = np.logspace(log10(4e-5), log10(2.5e-2), 68) 812 812 width, height = 0.117, 0. 813 resolution = Slit1D(q, width=width, height=height)813 resolution = Slit1D(q, qx_width=width, qy_width=height) 814 814 answer = romberg_slit_1d(q, width, height, form, pars) 815 815 output = resolution.apply(eval_form(resolution.q_calc, form, pars)) … … 837 837 TEST_PARS_PINHOLE_SPHERE = { 838 838 'scale': 1.0, 'background': 0.01, 839 'radius': 60.0, 'sld': 1, 's olvent_sld': 6.3,839 'radius': 60.0, 'sld': 1, 'sld_solvent': 6.3, 840 840 } 841 841 # Q, dQ, I(Q) calculated by NIST Igor SANS package … … 946 946 TEST_PARS_SLIT_SPHERE = { 947 947 'scale': 0.01, 'background': 0.01, 948 'radius': 60000, 'sld': 1, 's olvent_sld': 4,948 'radius': 60000, 'sld': 1, 'sld_solvent': 4, 949 949 } 950 950 # Q dQ I(Q) I_smeared(Q) … … 1047 1047 1048 1048 if name == 'cylinder': 1049 pars = {'length':210, 'radius':500 }1049 pars = {'length':210, 'radius':500, 'background': 0} 1050 1050 elif name == 'teubner_strey': 1051 1051 pars = {'a2':0.003, 'c1':-1e4, 'c2':1e10, 'background':0.312643} … … 1054 1054 elif name == 'ellipsoid': 1055 1055 pars = { 1056 'scale':0.05, 1057 'r polar':500, 'requatorial':15000,1058 'sld':6, 's olvent_sld': 1,1056 'scale':0.05, 'background': 0, 1057 'r_polar':500, 'r_equatorial':15000, 1058 'sld':6, 'sld_solvent': 1, 1059 1059 } 1060 1060 else: … … 1068 1068 1069 1069 if isinstance(resolution, Slit1D): 1070 width, height = resolution. width, resolution.height1070 width, height = resolution.dqx, resolution.dqy 1071 1071 Iq_romb = romberg_slit_1d(resolution.q, width, height, model, pars) 1072 1072 else: -
sasmodels/resolution2d.py
r823e620 rd6f5da6 10 10 from numpy import pi, cos, sin, sqrt 11 11 12 from . import resolution 12 13 from .resolution import Resolution 13 14 … … 20 21 NR = {'xhigh':10, 'high':5, 'med':5, 'low':3} 21 22 NPHI = {'xhigh':20, 'high':12, 'med':6, 'low':4} 23 24 ## Defaults 25 N_SLIT_PERP = {'xhigh':1000, 'high':500, 'med':200, 'low':50} 26 N_SLIT_PERP_DOC = ", ".join("%s=%d"%(name,value) for value,name in 27 sorted((2*v+1,k) for k,v in N_SLIT_PERP.items())) 22 28 23 29 class Pinhole2D(Resolution): … … 167 173 else: 168 174 return theory 175 176 177 class Slit2D(Resolution): 178 """ 179 Slit aperture with resolution function on an oriented sample. 180 181 *q* points at which the data is measured. 182 183 *qx_width* slit width in qx 184 185 *qy_width* slit height in qy; current implementation requires a fixed 186 qy_width for all q points. 187 188 *q_calc* is the list of q points to calculate, or None if this 189 should be estimated from the *q* and *qx_width*. 190 191 *accuracy* determines the number of *qy* points to compute for each *q*. 192 The values are stored in sasmodels.resolution2d.N_SLIT_PERP. The default 193 values are: %s 194 """ 195 __doc__ = __doc__%N_SLIT_PERP_DOC 196 def __init__(self, q, qx_width, qy_width=0., q_calc=None, accuracy='low'): 197 # Remember what q and width was used even though we won't need them 198 # after the weight matrix is constructed 199 self.q, self.qx_width, self.qy_width = q, qx_width, qy_width 200 201 # Allow independent resolution on each qx point even though it is not 202 # needed in practice. Set qy_width to the maximum qy width. 203 if np.isscalar(qx_width): 204 qx_width = np.ones(len(q))*qx_width 205 else: 206 qx_width = np.asarray(qx_width) 207 if not np.isscalar(qy_width): 208 qy_width = np.max(qy_width) 209 210 # Build grid of qx, qy points 211 if q_calc is not None: 212 qx_calc = np.sort(q_calc) 213 else: 214 qx_calc = resolution.pinhole_extend_q(q, qx_width, nsigma=3) 215 qy_min, qy_max = np.log10(np.min(q)), np.log10(qy_width) 216 qy_calc = np.logspace(qy_min, qy_max, N_SLIT_PERP[accuracy]) 217 qy_calc = np.hstack((-qy_calc[::-1], 0, qy_calc)) 218 self.q_calc = [v.flatten() for v in np.meshgrid(qx_calc, qy_calc)] 219 self.qx_calc, self.qy_calc = qx_calc, qy_calc 220 self.nx, self.ny = len(qx_calc), len(qy_calc) 221 self.dy = 2*qy_width/self.ny 222 223 # Build weight matrix for resolution integration 224 if np.any(qx_width > 0): 225 self.weights = resolution.pinhole_resolution(qx_calc, q, 226 np.maximum(qx_width, resolution.MINIMUM_RESOLUTION)) 227 elif len(qx_calc)==len(q) and np.all(qx_calc == q): 228 self.weights = None 229 else: 230 raise ValueError("Slit2D fails with q_calc != q") 231 232 def apply(self, theory): 233 Iq = np.trapz(theory.reshape(self.ny, self.nx), axis=0, x=self.qy_calc) 234 if self.weights is not None: 235 Iq = resolution.apply_resolution_matrix(self.weights, Iq) 236 return Iq 237 -
doc/developer/index.rst
r56fc97a rb85be2d 3 3 *************************** 4 4 5 .. toctree:: 6 :numbered: 4 7 :maxdepth: 4 5 8 9 calculator.rst -
sasmodels/convert.py
r2a3e1f5 rd1c4760 73 73 new model definition end with sld. 74 74 """ 75 print "pars",pars 75 76 return dict((p, (v*1e-6 if p.startswith('sld') or p.endswith('sld') 76 77 else v*1e15 if 'ndensity' in p -
sasmodels/core.py
re79f0a5 rfb5914f 24 24 HAVE_OPENCL = False 25 25 26 try: 27 # Python 3.5 and up 28 from importlib.util import spec_from_file_location, module_from_spec 29 def load_module(fullname, path): 30 spec = spec_from_file_location(fullname, path) 31 module = module_from_spec(spec) 32 spec.loader.exec_module(module) 33 return module 34 except ImportError: 35 # CRUFT: python 2 36 import imp 37 def load_module(fullname, path): 38 module = imp.load_source(fullname, path) 39 return module 40 41 42 26 43 __all__ = [ 27 44 "list_models", "load_model_info", "precompile_dll", 28 "build_model", " make_kernel", "call_kernel", "call_ER_VR",45 "build_model", "call_kernel", "call_ER_VR", 29 46 ] 30 47 … … 51 68 """ 52 69 return build_model(load_model_info(model_name), **kw) 53 54 def load_model_info_from_path(path):55 # Pull off the last .ext if it exists; there may be others56 name = basename(splitext(path)[0])57 58 # Not cleaning name since don't need to be able to reload this59 # model later60 # Should probably turf the model from sys.modules after we are done...61 62 # Placing the model in the 'sasmodels.custom' name space, even63 # though it doesn't actually exist. imp.load_source doesn't seem64 # to care.65 kernel_module = imp.load_source('sasmodels.custom.'+name, path)66 67 # Now that we have the module, we can load it as usual68 model_info = generate.make_model_info(kernel_module)69 return model_info70 70 71 71 def load_model_info(model_name): … … 90 90 return product.make_product_info(P_info, Q_info) 91 91 92 return make_model_by_name(model_name) 93 94 95 def make_model_by_name(model_name): 96 if model_name.endswith('.py'): 97 path = model_name 98 # Pull off the last .ext if it exists; there may be others 99 name = basename(splitext(path)[0]) 100 # Placing the model in the 'sasmodels.custom' name space. 101 from sasmodels import custom 102 kernel_module = load_module('sasmodels.custom.'+name, path) 103 else: 104 from sasmodels import models 105 __import__('sasmodels.models.'+model_name) 106 kernel_module = getattr(models, model_name, None) 92 107 #import sys; print "\n".join(sys.path) 93 __import__('sasmodels.models.'+model_name)94 kernel_module = getattr(models, model_name, None)95 108 return generate.make_model_info(kernel_module) 96 109 … … 167 180 168 181 169 def make_kernel(model, q_vectors): 170 """ 171 Return a computation kernel from the model definition and the q input. 172 """ 173 return model(q_vectors) 174 175 def get_weights(model_info, pars, name): 182 def get_weights(parameter, values): 176 183 """ 177 184 Generate the distribution for parameter *name* given the parameter values … … 181 188 from the *pars* dictionary for parameter value and parameter dispersion. 182 189 """ 183 relative = name in model_info['partype']['pd-rel'] 184 limits = model_info['limits'][name] 185 disperser = pars.get(name+'_pd_type', 'gaussian') 186 value = pars.get(name, model_info['defaults'][name]) 187 npts = pars.get(name+'_pd_n', 0) 188 width = pars.get(name+'_pd', 0.0) 189 nsigma = pars.get(name+'_pd_nsigma', 3.0) 190 value = values.get(parameter.name, parameter.default) 191 relative = parameter.relative_pd 192 limits = parameter.limits 193 disperser = values.get(parameter.name+'_pd_type', 'gaussian') 194 npts = values.get(parameter.name+'_pd_n', 0) 195 width = values.get(parameter.name+'_pd', 0.0) 196 nsigma = values.get(parameter.name+'_pd_nsigma', 3.0) 197 if npts == 0 or width == 0: 198 return [value], [] 190 199 value, weight = weights.get_weights( 191 200 disperser, npts, width, nsigma, value, limits, relative) … … 208 217 def call_kernel(kernel, pars, cutoff=0, mono=False): 209 218 """ 210 Call *kernel* returned from :func:`make_kernel`with parameters *pars*.219 Call *kernel* returned from *model.make_kernel* with parameters *pars*. 211 220 212 221 *cutoff* is the limiting value for the product of dispersion weights used … … 216 225 with an error of about 1%, which is usually less than the measurement 217 226 uncertainty. 218 """ 219 fixed_pars = [pars.get(name, kernel.info['defaults'][name]) 220 for name in kernel.fixed_pars] 227 228 *mono* is True if polydispersity should be set to none on all parameters. 229 """ 230 parameters = kernel.info['parameters'] 221 231 if mono: 222 pd_pars = [( np.array([pars[name]]), np.array([1.0]) ) 223 for name in kernel.pd_pars] 224 else: 225 pd_pars = [get_weights(kernel.info, pars, name) for name in kernel.pd_pars] 226 return kernel(fixed_pars, pd_pars, cutoff=cutoff) 232 active = lambda name: False 233 elif kernel.dim == '1d': 234 active = lambda name: name in parameters.pd_1d 235 elif kernel.dim == '2d': 236 active = lambda name: name in parameters.pd_2d 237 else: 238 active = lambda name: True 239 240 vw_pairs = [(get_weights(p, pars) if active(p.name) 241 else ([pars.get(p.name, p.default)], [])) 242 for p in parameters.call_parameters] 243 244 details, weights, values = build_details(kernel, vw_pairs) 245 return kernel(details, weights, values, cutoff) 246 247 def build_details(kernel, pairs): 248 values, weights = zip(*pairs) 249 if max([len(w) for w in weights]) > 1: 250 details = generate.poly_details(kernel.info, weights) 251 else: 252 details = kernel.info['mono_details'] 253 weights, values = [np.hstack(v) for v in (weights, values)] 254 weights = weights.astype(dtype=kernel.dtype) 255 values = values.astype(dtype=kernel.dtype) 256 return details, weights, values 257 227 258 228 259 def call_ER_VR(model_info, vol_pars): … … 247 278 248 279 249 def call_ER( info, pars):250 """ 251 Call the model ER function using * pars*.252 * info* is either *model.info* if you have a loaded model, or *kernel.info*253 if youhave a model kernel prepared for evaluation.254 """ 255 ER = info.get('ER', None)280 def call_ER(model_info, values): 281 """ 282 Call the model ER function using *values*. *model_info* is either 283 *model.info* if you have a loaded model, or *kernel.info* if you 284 have a model kernel prepared for evaluation. 285 """ 286 ER = model_info.get('ER', None) 256 287 if ER is None: 257 288 return 1.0 258 289 else: 259 vol_pars = [get_weights(info, pars, name) 260 for name in info['partype']['volume']] 290 vol_pars = [get_weights(parameter, values) 291 for parameter in model_info['parameters'] 292 if parameter.type == 'volume'] 261 293 value, weight = dispersion_mesh(vol_pars) 262 294 individual_radii = ER(*value) … … 264 296 return np.sum(weight*individual_radii) / np.sum(weight) 265 297 266 def call_VR( info, pars):298 def call_VR(model_info, values): 267 299 """ 268 300 Call the model VR function using *pars*. … … 270 302 if you have a model kernel prepared for evaluation. 271 303 """ 272 VR = info.get('VR', None)304 VR = model_info.get('VR', None) 273 305 if VR is None: 274 306 return 1.0 275 307 else: 276 vol_pars = [get_weights(info, pars, name) 277 for name in info['partype']['volume']] 308 vol_pars = [get_weights(parameter, values) 309 for parameter in model_info['parameters'] 310 if parameter.type == 'volume'] 278 311 value, weight = dispersion_mesh(vol_pars) 279 312 whole, part = VR(*value) -
sasmodels/generate.py
r9ef9dd9 rce896fd 21 21 22 22 *VR(p1, p2, ...)* returns the volume ratio for core-shell style forms. 23 24 #define INVALID(v) (expr) returns False if v.parameter is invalid 25 for some parameter or other (e.g., v.bell_radius < v.radius). If 26 necessary, the expression can call a function. 23 27 24 28 These functions are defined in a kernel module .py script and an associated … … 65 69 The constructor code will generate the necessary vectors for computing 66 70 them with the desired polydispersity. 67 68 The available kernel parameters are defined as a list, with each parameter69 defined as a sublist with the following elements:70 71 *name* is the name that will be used in the call to the kernel72 function and the name that will be displayed to the user. Names73 should be lower case, with words separated by underscore. If74 acronyms are used, the whole acronym should be upper case.75 76 *units* should be one of *degrees* for angles, *Ang* for lengths,77 *1e-6/Ang^2* for SLDs.78 79 *default value* will be the initial value for the model when it80 is selected, or when an initial value is not otherwise specified.81 82 *limits = [lb, ub]* are the hard limits on the parameter value, used to83 limit the polydispersity density function. In the fit, the parameter limits84 given to the fit are the limits on the central value of the parameter.85 If there is polydispersity, it will evaluate parameter values outside86 the fit limits, but not outside the hard limits specified in the model.87 If there are no limits, use +/-inf imported from numpy.88 89 *type* indicates how the parameter will be used. "volume" parameters90 will be used in all functions. "orientation" parameters will be used91 in *Iqxy* and *Imagnetic*. "magnetic* parameters will be used in92 *Imagnetic* only. If *type* is the empty string, the parameter will93 be used in all of *Iq*, *Iqxy* and *Imagnetic*. "sld" parameters94 can automatically be promoted to magnetic parameters, each of which95 will have a magnitude and a direction, which may be different from96 other sld parameters.97 98 *description* is a short description of the parameter. This will99 be displayed in the parameter table and used as a tool tip for the100 parameter value in the user interface.101 102 71 The kernel module must set variables defining the kernel meta data: 103 72 … … 212 181 from __future__ import print_function 213 182 214 # TODO: identify model files which have changed since loading and reload them.215 216 import sys 183 #TODO: determine which functions are useful outside of generate 184 #__all__ = ["model_info", "make_doc", "make_source", "convert_type"] 185 217 186 from os.path import abspath, dirname, join as joinpath, exists, basename, \ 218 splitext 187 splitext, getmtime 219 188 import re 220 189 import string 221 190 import warnings 222 from collections import namedtuple223 191 224 192 import numpy as np 225 193 226 PARAMETER_FIELDS = ['name', 'units', 'default', 'limits', 'type', 'description'] 227 Parameter = namedtuple('Parameter', PARAMETER_FIELDS) 228 229 #TODO: determine which functions are useful outside of generate 230 #__all__ = ["model_info", "make_doc", "make_source", "convert_type"] 231 232 C_KERNEL_TEMPLATE_PATH = joinpath(dirname(__file__), 'kernel_template.c') 194 from .modelinfo import ModelInfo, Parameter, make_parameter_table, set_demo 195 196 # TODO: identify model files which have changed since loading and reload them. 197 198 TEMPLATE_ROOT = dirname(__file__) 233 199 234 200 F16 = np.dtype('float16') … … 239 205 except TypeError: 240 206 F128 = None 241 242 # Scale and background, which are parameters common to every form factor243 COMMON_PARAMETERS = [244 ["scale", "", 1, [0, np.inf], "", "Source intensity"],245 ["background", "1/cm", 1e-3, [0, np.inf], "", "Source background"],246 ]247 207 248 208 # Conversion from units defined in the parameter table for each model … … 338 298 raise ValueError("%r not found in %s" % (filename, search_path)) 339 299 300 340 301 def model_sources(model_info): 341 302 """ … … 346 307 return [_search(search_path, f) for f in model_info['source']] 347 308 348 # Pragmas for enable OpenCL features. Be sure to protect them so that they 349 # still compile even if OpenCL is not present. 350 _F16_PRAGMA = """\ 351 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16) 352 # pragma OPENCL EXTENSION cl_khr_fp16: enable 353 #endif 354 """ 355 356 _F64_PRAGMA = """\ 357 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64) 358 # pragma OPENCL EXTENSION cl_khr_fp64: enable 359 #endif 360 """ 309 def timestamp(model_info): 310 """ 311 Return a timestamp for the model corresponding to the most recently 312 changed file or dependency. 313 """ 314 source_files = (model_sources(model_info) 315 + model_templates() 316 + [model_info['filename']]) 317 newest = max(getmtime(f) for f in source_files) 318 return newest 361 319 362 320 def convert_type(source, dtype): … … 369 327 if dtype == F16: 370 328 fbytes = 2 371 source = _ F16_PRAGMA + _convert_type(source, "half", "f")329 source = _convert_type(source, "float", "f") 372 330 elif dtype == F32: 373 331 fbytes = 4 … … 375 333 elif dtype == F64: 376 334 fbytes = 8 377 source = _F64_PRAGMA + source # Sourceis already double335 # no need to convert if it is already double 378 336 elif dtype == F128: 379 337 fbytes = 16 … … 418 376 419 377 420 LOOP_OPEN = """\ 421 for (int %(name)s_i=0; %(name)s_i < N%(name)s; %(name)s_i++) { 422 const double %(name)s = loops[2*(%(name)s_i%(offset)s)]; 423 const double %(name)s_w = loops[2*(%(name)s_i%(offset)s)+1];\ 378 _template_cache = {} 379 def load_template(filename): 380 path = joinpath(TEMPLATE_ROOT, filename) 381 mtime = getmtime(path) 382 if filename not in _template_cache or mtime > _template_cache[filename][0]: 383 with open(path) as fid: 384 _template_cache[filename] = (mtime, fid.read(), path) 385 return _template_cache[filename][1] 386 387 def model_templates(): 388 # TODO: fails DRY; templates are listed in two places. 389 # should instead have model_info contain a list of paths 390 return [joinpath(TEMPLATE_ROOT, filename) 391 for filename in ('kernel_header.c', 'kernel_iq.c')] 392 393 394 _FN_TEMPLATE = """\ 395 double %(name)s(%(pars)s); 396 double %(name)s(%(pars)s) { 397 %(body)s 398 } 399 400 424 401 """ 425 def build_polydispersity_loops(pd_pars): 426 """ 427 Build polydispersity loops 428 429 Returns loop opening and loop closing 430 """ 431 depth = 4 432 offset = "" 433 loop_head = [] 434 loop_end = [] 435 for name in pd_pars: 436 subst = {'name': name, 'offset': offset} 437 loop_head.append(indent(LOOP_OPEN % subst, depth)) 438 loop_end.insert(0, (" "*depth) + "}") 439 offset += '+N' + name 440 depth += 2 441 return "\n".join(loop_head), "\n".join(loop_end) 442 443 C_KERNEL_TEMPLATE = None 402 403 def _gen_fn(name, pars, body): 404 """ 405 Generate a function given pars and body. 406 407 Returns the following string:: 408 409 double fn(double a, double b, ...); 410 double fn(double a, double b, ...) { 411 .... 412 } 413 """ 414 par_decl = ', '.join(p.as_function_argument() for p in pars) if pars else 'void' 415 return _FN_TEMPLATE % {'name': name, 'body': body, 'pars': par_decl} 416 417 def _call_pars(prefix, pars): 418 """ 419 Return a list of *prefix.parameter* from parameter items. 420 """ 421 return [p.as_call_reference(prefix) for p in pars] 422 423 _IQXY_PATTERN = re.compile("^((inline|static) )? *(double )? *Iqxy *([(]|$)", 424 flags=re.MULTILINE) 425 def _have_Iqxy(sources): 426 """ 427 Return true if any file defines Iqxy. 428 429 Note this is not a C parser, and so can be easily confused by 430 non-standard syntax. Also, it will incorrectly identify the following 431 as having Iqxy:: 432 433 /* 434 double Iqxy(qx, qy, ...) { ... fill this in later ... } 435 */ 436 437 If you want to comment out an Iqxy function, use // on the front of the 438 line instead. 439 """ 440 for code in sources: 441 if _IQXY_PATTERN.search(code): 442 return True 443 else: 444 return False 445 444 446 def make_source(model_info): 445 447 """ … … 460 462 # for computing volume even if we allow non-disperse volume parameters. 461 463 462 # Load template 463 global C_KERNEL_TEMPLATE 464 if C_KERNEL_TEMPLATE is None: 465 with open(C_KERNEL_TEMPLATE_PATH) as fid: 466 C_KERNEL_TEMPLATE = fid.read() 467 468 # Load additional sources 469 source = [open(f).read() for f in model_sources(model_info)] 470 471 # Prepare defines 472 defines = [] 473 partype = model_info['partype'] 474 pd_1d = partype['pd-1d'] 475 pd_2d = partype['pd-2d'] 476 fixed_1d = partype['fixed-1d'] 477 fixed_2d = partype['fixed-1d'] 478 479 iq_parameters = [p.name 480 for p in model_info['parameters'][2:] # skip scale, background 481 if p.name in set(fixed_1d + pd_1d)] 482 iqxy_parameters = [p.name 483 for p in model_info['parameters'][2:] # skip scale, background 484 if p.name in set(fixed_2d + pd_2d)] 485 volume_parameters = [p.name 486 for p in model_info['parameters'] 487 if p.type == 'volume'] 488 489 # Fill in defintions for volume parameters 490 if volume_parameters: 491 defines.append(('VOLUME_PARAMETERS', 492 ','.join(volume_parameters))) 493 defines.append(('VOLUME_WEIGHT_PRODUCT', 494 '*'.join(p + '_w' for p in volume_parameters))) 495 496 # Generate form_volume function from body only 464 partable = model_info['parameters'] 465 466 # Identify parameters for Iq, Iqxy, Iq_magnetic and form_volume. 467 # Note that scale and volume are not possible types. 468 469 # Load templates and user code 470 kernel_header = load_template('kernel_header.c') 471 kernel_code = load_template('kernel_iq.c') 472 user_code = [open(f).read() for f in model_sources(model_info)] 473 474 # Build initial sources 475 source = [kernel_header] + user_code 476 477 # Make parameters for q, qx, qy so that we can use them in declarations 478 q, qx, qy = [Parameter(name=v) for v in ('q', 'qx', 'qy')] 479 # Generate form_volume function, etc. from body only 497 480 if model_info['form_volume'] is not None: 498 if volume_parameters: 499 vol_par_decl = ', '.join('double ' + p for p in volume_parameters) 500 else: 501 vol_par_decl = 'void' 502 defines.append(('VOLUME_PARAMETER_DECLARATIONS', 503 vol_par_decl)) 504 fn = """\ 505 double form_volume(VOLUME_PARAMETER_DECLARATIONS); 506 double form_volume(VOLUME_PARAMETER_DECLARATIONS) { 507 %(body)s 508 } 509 """ % {'body':model_info['form_volume']} 510 source.append(fn) 511 512 # Fill in definitions for Iq parameters 513 defines.append(('IQ_KERNEL_NAME', model_info['name'] + '_Iq')) 514 defines.append(('IQ_PARAMETERS', ', '.join(iq_parameters))) 515 if fixed_1d: 516 defines.append(('IQ_FIXED_PARAMETER_DECLARATIONS', 517 ', \\\n '.join('const double %s' % p for p in fixed_1d))) 518 if pd_1d: 519 defines.append(('IQ_WEIGHT_PRODUCT', 520 '*'.join(p + '_w' for p in pd_1d))) 521 defines.append(('IQ_DISPERSION_LENGTH_DECLARATIONS', 522 ', \\\n '.join('const int N%s' % p for p in pd_1d))) 523 defines.append(('IQ_DISPERSION_LENGTH_SUM', 524 '+'.join('N' + p for p in pd_1d))) 525 open_loops, close_loops = build_polydispersity_loops(pd_1d) 526 defines.append(('IQ_OPEN_LOOPS', 527 open_loops.replace('\n', ' \\\n'))) 528 defines.append(('IQ_CLOSE_LOOPS', 529 close_loops.replace('\n', ' \\\n'))) 481 pars = partable.form_volume_parameters 482 source.append(_gen_fn('form_volume', pars, model_info['form_volume'])) 530 483 if model_info['Iq'] is not None: 531 defines.append(('IQ_PARAMETER_DECLARATIONS', 532 ', '.join('double ' + p for p in iq_parameters))) 533 fn = """\ 534 double Iq(double q, IQ_PARAMETER_DECLARATIONS); 535 double Iq(double q, IQ_PARAMETER_DECLARATIONS) { 536 %(body)s 537 } 538 """ % {'body':model_info['Iq']} 539 source.append(fn) 540 541 # Fill in definitions for Iqxy parameters 542 defines.append(('IQXY_KERNEL_NAME', model_info['name'] + '_Iqxy')) 543 defines.append(('IQXY_PARAMETERS', ', '.join(iqxy_parameters))) 544 if fixed_2d: 545 defines.append(('IQXY_FIXED_PARAMETER_DECLARATIONS', 546 ', \\\n '.join('const double %s' % p for p in fixed_2d))) 547 if pd_2d: 548 defines.append(('IQXY_WEIGHT_PRODUCT', 549 '*'.join(p + '_w' for p in pd_2d))) 550 defines.append(('IQXY_DISPERSION_LENGTH_DECLARATIONS', 551 ', \\\n '.join('const int N%s' % p for p in pd_2d))) 552 defines.append(('IQXY_DISPERSION_LENGTH_SUM', 553 '+'.join('N' + p for p in pd_2d))) 554 open_loops, close_loops = build_polydispersity_loops(pd_2d) 555 defines.append(('IQXY_OPEN_LOOPS', 556 open_loops.replace('\n', ' \\\n'))) 557 defines.append(('IQXY_CLOSE_LOOPS', 558 close_loops.replace('\n', ' \\\n'))) 484 pars = [q] + partable.iq_parameters 485 source.append(_gen_fn('Iq', pars, model_info['Iq'])) 559 486 if model_info['Iqxy'] is not None: 560 defines.append(('IQXY_PARAMETER_DECLARATIONS', 561 ', '.join('double ' + p for p in iqxy_parameters))) 562 fn = """\ 563 double Iqxy(double qx, double qy, IQXY_PARAMETER_DECLARATIONS); 564 double Iqxy(double qx, double qy, IQXY_PARAMETER_DECLARATIONS) { 565 %(body)s 566 } 567 """ % {'body':model_info['Iqxy']} 568 source.append(fn) 569 570 # Need to know if we have a theta parameter for Iqxy; it is not there 571 # for the magnetic sphere model, for example, which has a magnetic 572 # orientation but no shape orientation. 573 if 'theta' in pd_2d: 574 defines.append(('IQXY_HAS_THETA', '1')) 575 576 #for d in defines: print(d) 577 defines = '\n'.join('#define %s %s' % (k, v) for k, v in defines) 578 sources = '\n\n'.join(source) 579 return C_KERNEL_TEMPLATE % { 580 'DEFINES': defines, 581 'SOURCES': sources, 582 } 583 584 def categorize_parameters(pars): 585 """ 586 Build parameter categories out of the the parameter definitions. 587 588 Returns a dictionary of categories. 589 590 Note: these categories are subject to change, depending on the needs of 591 the UI and the needs of the kernel calling function. 592 593 The categories are as follows: 594 595 * *volume* list of volume parameter names 596 * *orientation* list of orientation parameters 597 * *magnetic* list of magnetic parameters 598 * *<empty string>* list of parameters that have no type info 599 600 Each parameter is in one and only one category. 601 602 The following derived categories are created: 603 604 * *fixed-1d* list of non-polydisperse parameters for 1D models 605 * *pd-1d* list of polydisperse parameters for 1D models 606 * *fixed-2d* list of non-polydisperse parameters for 2D models 607 * *pd-d2* list of polydisperse parameters for 2D models 608 """ 609 partype = { 610 'volume': [], 'orientation': [], 'magnetic': [], 'sld': [], '': [], 611 'fixed-1d': [], 'fixed-2d': [], 'pd-1d': [], 'pd-2d': [], 612 'pd-rel': set(), 613 } 614 615 for p in pars: 616 if p.type == 'volume': 617 partype['pd-1d'].append(p.name) 618 partype['pd-2d'].append(p.name) 619 partype['pd-rel'].add(p.name) 620 elif p.type == 'magnetic': 621 partype['fixed-2d'].append(p.name) 622 elif p.type == 'orientation': 623 partype['pd-2d'].append(p.name) 624 elif p.type in ('', 'sld'): 625 partype['fixed-1d'].append(p.name) 626 partype['fixed-2d'].append(p.name) 627 else: 628 raise ValueError("unknown parameter type %r" % p.type) 629 partype[p.type].append(p.name) 630 631 return partype 632 633 def process_parameters(model_info): 634 """ 635 Process parameter block, precalculating parameter details. 636 """ 637 # convert parameters into named tuples 638 for p in model_info['parameters']: 639 if p[4] == '' and (p[0].startswith('sld') or p[0].endswith('sld')): 640 p[4] = 'sld' 641 # TODO: make sure all models explicitly label their sld parameters 642 #raise ValueError("%s.%s needs to be explicitly set to type 'sld'" %(model_info['id'], p[0])) 643 644 pars = [Parameter(*p) for p in model_info['parameters']] 645 # Fill in the derived attributes 646 model_info['parameters'] = pars 647 partype = categorize_parameters(pars) 648 model_info['limits'] = dict((p.name, p.limits) for p in pars) 649 model_info['partype'] = partype 650 model_info['defaults'] = dict((p.name, p.default) for p in pars) 651 if model_info.get('demo', None) is None: 652 model_info['demo'] = model_info['defaults'] 653 model_info['has_2d'] = partype['orientation'] or partype['magnetic'] 487 pars = [qx, qy] + partable.iqxy_parameters 488 source.append(_gen_fn('Iqxy', pars, model_info['Iqxy'])) 489 490 # Define the parameter table 491 source.append("#define PARAMETER_TABLE \\") 492 source.append("\\\n".join(p.as_definition() 493 for p in partable.kernel_parameters)) 494 495 # Define the function calls 496 if partable.form_volume_parameters: 497 refs = _call_pars("v.", partable.form_volume_parameters) 498 call_volume = "#define CALL_VOLUME(v) form_volume(%s)" % (",".join(refs)) 499 else: 500 # Model doesn't have volume. We could make the kernel run a little 501 # faster by not using/transferring the volume normalizations, but 502 # the ifdef's reduce readability more than is worthwhile. 503 call_volume = "#define CALL_VOLUME(v) 1.0" 504 source.append(call_volume) 505 506 refs = ["q[i]"] + _call_pars("v.", partable.iq_parameters) 507 call_iq = "#define CALL_IQ(q,i,v) Iq(%s)" % (",".join(refs)) 508 if _have_Iqxy(user_code): 509 # Call 2D model 510 refs = ["q[2*i]", "q[2*i+1]"] + _call_pars("v.", partable.iqxy_parameters) 511 call_iqxy = "#define CALL_IQ(q,i,v) Iqxy(%s)" % (",".join(refs)) 512 else: 513 # Call 1D model with sqrt(qx^2 + qy^2) 514 warnings.warn("Creating Iqxy = Iq(sqrt(qx^2 + qy^2))") 515 # still defined:: refs = ["q[i]"] + _call_pars("v", iq_parameters) 516 pars_sqrt = ["sqrt(q[2*i]*q[2*i]+q[2*i+1]*q[2*i+1])"] + refs[1:] 517 call_iqxy = "#define CALL_IQ(q,i,v) Iq(%s)" % (",".join(pars_sqrt)) 518 519 # Fill in definitions for numbers of parameters 520 source.append("#define MAX_PD %s"%partable.max_pd) 521 source.append("#define NPARS %d"%partable.npars) 522 523 # TODO: allow mixed python/opencl kernels? 524 525 # define the Iq kernel 526 source.append("#define KERNEL_NAME %s_Iq"%model_info['name']) 527 source.append(call_iq) 528 source.append(kernel_code) 529 source.append("#undef CALL_IQ") 530 source.append("#undef KERNEL_NAME") 531 532 # define the Iqxy kernel from the same source with different #defines 533 source.append("#define KERNEL_NAME %s_Iqxy"%model_info['name']) 534 source.append(call_iqxy) 535 source.append(kernel_code) 536 source.append("#undef CALL_IQ") 537 source.append("#undef KERNEL_NAME") 538 539 return '\n'.join(source) 540 541 class CoordinationDetails(object): 542 def __init__(self, model_info): 543 parameters = model_info['parameters'] 544 max_pd = parameters.max_pd 545 npars = parameters.npars 546 par_offset = 4*max_pd 547 self.details = np.zeros(par_offset + 3*npars + 4, 'i4') 548 549 # generate views on different parts of the array 550 self._pd_par = self.details[0*max_pd:1*max_pd] 551 self._pd_length = self.details[1*max_pd:2*max_pd] 552 self._pd_offset = self.details[2*max_pd:3*max_pd] 553 self._pd_stride = self.details[3*max_pd:4*max_pd] 554 self._par_offset = self.details[par_offset+0*npars:par_offset+1*npars] 555 self._par_coord = self.details[par_offset+1*npars:par_offset+2*npars] 556 self._pd_coord = self.details[par_offset+2*npars:par_offset+3*npars] 557 558 # theta_par is fixed 559 self.details[-1] = parameters.theta_offset 560 561 @property 562 def ctypes(self): return self.details.ctypes 563 @property 564 def pd_par(self): return self._pd_par 565 @property 566 def pd_length(self): return self._pd_length 567 @property 568 def pd_offset(self): return self._pd_offset 569 @property 570 def pd_stride(self): return self._pd_stride 571 @property 572 def pd_coord(self): return self._pd_coord 573 @property 574 def par_coord(self): return self._par_coord 575 @property 576 def par_offset(self): return self._par_offset 577 @property 578 def num_coord(self): return self.details[-2] 579 @num_coord.setter 580 def num_coord(self, v): self.details[-2] = v 581 @property 582 def total_pd(self): return self.details[-3] 583 @total_pd.setter 584 def total_pd(self, v): self.details[-3] = v 585 @property 586 def num_active(self): return self.details[-4] 587 @num_active.setter 588 def num_active(self, v): self.details[-4] = v 589 590 def show(self): 591 print("total_pd", self.total_pd) 592 print("num_active", self.num_active) 593 print("pd_par", self.pd_par) 594 print("pd_length", self.pd_length) 595 print("pd_offset", self.pd_offset) 596 print("pd_stride", self.pd_stride) 597 print("par_offsets", self.par_offset) 598 print("num_coord", self.num_coord) 599 print("par_coord", self.par_coord) 600 print("pd_coord", self.pd_coord) 601 print("theta par", self.details[-1]) 602 603 def mono_details(model_info): 604 details = CoordinationDetails(model_info) 605 # The zero defaults for monodisperse systems are mostly fine 606 details.par_offset[:] = np.arange(2, len(details.par_offset)+2) 607 return details 608 609 def poly_details(model_info, weights): 610 #print("weights",weights) 611 weights = weights[2:] # Skip scale and background 612 613 # Decreasing list of polydispersity lengths 614 # Note: the reversing view, x[::-1], does not require a copy 615 pd_length = np.array([len(w) for w in weights]) 616 num_active = np.sum(pd_length>1) 617 if num_active > model_info['parameters'].max_pd: 618 raise ValueError("Too many polydisperse parameters") 619 620 pd_offset = np.cumsum(np.hstack((0, pd_length))) 621 idx = np.argsort(pd_length)[::-1][:num_active] 622 par_length = np.array([max(len(w),1) for w in weights]) 623 pd_stride = np.cumprod(np.hstack((1, par_length[idx]))) 624 par_offsets = np.cumsum(np.hstack((2, par_length))) 625 626 details = CoordinationDetails(model_info) 627 details.pd_par[:num_active] = idx 628 details.pd_length[:num_active] = pd_length[idx] 629 details.pd_offset[:num_active] = pd_offset[idx] 630 details.pd_stride[:num_active] = pd_stride[:-1] 631 details.par_offset[:] = par_offsets[:-1] 632 details.total_pd = pd_stride[-1] 633 details.num_active = num_active 634 # Without constraints coordinated parameters are just the pd parameters 635 details.par_coord[:num_active] = idx 636 details.pd_coord[:num_active] = 2**np.arange(num_active) 637 details.num_coord = num_active 638 #details.show() 639 return details 640 641 def constrained_poly_details(model_info, weights, constraints): 642 # Need to find the independently varying pars and sort them 643 # Need to build a coordination list for the dependent variables 644 # Need to generate a constraints function which takes values 645 # and weights, returning par blocks 646 raise NotImplementedError("Can't handle constraints yet") 647 648 649 def create_default_functions(model_info): 650 """ 651 Autogenerate missing functions, such as Iqxy from Iq. 652 653 This only works for Iqxy when Iq is written in python. :func:`make_source` 654 performs a similar role for Iq written in C. 655 """ 656 if callable(model_info['Iq']) and model_info['Iqxy'] is None: 657 partable = model_info['parameters'] 658 if partable.has_2d: 659 raise ValueError("Iqxy model is missing") 660 Iq = model_info['Iq'] 661 def Iqxy(qx, qy, **kw): 662 return Iq(np.sqrt(qx**2 + qy**2), **kw) 663 model_info['Iqxy'] = Iqxy 664 654 665 655 666 def make_model_info(kernel_module): … … 678 689 model variants (e.g., the list of cases) or is None if there are no 679 690 model variants 680 * *defaults* is the *{parameter: value}* table built from the parameter 681 description table. 682 * *limits* is the *{parameter: [min, max]}* table built from the 683 parameter description table. 684 * *partypes* categorizes the model parameters. See 691 * *par_type* categorizes the model parameters. See 685 692 :func:`categorize_parameters` for details. 686 693 * *demo* contains the *{parameter: value}* map used in compare (and maybe … … 699 706 *model_info* blocks for the composition objects. This allows us to 700 707 build complete product and mixture models from just the info. 701 702 708 """ 703 709 # TODO: maybe turn model_info into a class ModelDefinition 704 parameters = COMMON_PARAMETERS + kernel_module.parameters 710 #print("make parameter table", kernel_module.parameters) 711 parameters = make_parameter_table(kernel_module.parameters) 705 712 filename = abspath(kernel_module.__file__) 706 713 kernel_id = splitext(basename(filename))[0] … … 712 719 filename=abspath(kernel_module.__file__), 713 720 name=name, 714 title= kernel_module.title,715 description= kernel_module.description,721 title=getattr(kernel_module, 'title', name+" model"), 722 description=getattr(kernel_module, 'description', 'no description'), 716 723 parameters=parameters, 717 724 composition=None, … … 720 727 single=getattr(kernel_module, 'single', True), 721 728 structure_factor=getattr(kernel_module, 'structure_factor', False), 729 profile_axes=getattr(kernel_module, 'profile_axes', ['x','y']), 722 730 variant_info=getattr(kernel_module, 'invariant_info', None), 723 731 demo=getattr(kernel_module, 'demo', None), … … 727 735 tests=getattr(kernel_module, 'tests', []), 728 736 ) 729 process_parameters(model_info)737 set_demo(model_info, getattr(kernel_module, 'demo', None)) 730 738 # Check for optional functions 731 functions = "ER VR form_volume Iq Iqxy shape sesans".split()739 functions = "ER VR form_volume Iq Iqxy profile sesans".split() 732 740 model_info.update((k, getattr(kernel_module, k, None)) for k in functions) 741 create_default_functions(model_info) 742 # Precalculate the monodisperse parameters 743 # TODO: make this a lazy evaluator 744 # make_model_info is called for every model on sasview startup 745 model_info['mono_details'] = mono_details(model_info) 733 746 return model_info 734 747 … … 782 795 783 796 784 785 797 def demo_time(): 786 798 """ … … 798 810 Program which prints the source produced by the model. 799 811 """ 812 import sys 813 from sasmodels.core import make_model_by_name 800 814 if len(sys.argv) <= 1: 801 815 print("usage: python -m sasmodels.generate modelname") 802 816 else: 803 817 name = sys.argv[1] 804 import sasmodels.models 805 __import__('sasmodels.models.' + name) 806 model = getattr(sasmodels.models, name) 807 model_info = make_model_info(model) 818 model_info = make_model_by_name(name) 808 819 source = make_source(model_info) 809 820 print(source) -
sasmodels/kernelcl.py
r8e0d974 rba32cdd 48 48 harmless, albeit annoying. 49 49 """ 50 from __future__ import print_function 50 51 import os 51 52 import warnings … … 73 74 # of polydisperse parameters. 74 75 MAX_LOOPS = 2048 76 77 78 # Pragmas for enable OpenCL features. Be sure to protect them so that they 79 # still compile even if OpenCL is not present. 80 _F16_PRAGMA = """\ 81 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16) 82 # pragma OPENCL EXTENSION cl_khr_fp16: enable 83 #endif 84 """ 85 86 _F64_PRAGMA = """\ 87 #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64) 88 # pragma OPENCL EXTENSION cl_khr_fp64: enable 89 #endif 90 """ 75 91 76 92 … … 142 158 raise RuntimeError("%s not supported for devices"%dtype) 143 159 144 source = generate.convert_type(source, dtype) 160 source_list = [generate.convert_type(source, dtype)] 161 162 if dtype == generate.F16: 163 source_list.insert(0, _F16_PRAGMA) 164 elif dtype == generate.F64: 165 source_list.insert(0, _F64_PRAGMA) 166 145 167 # Note: USE_SINCOS makes the intel cpu slower under opencl 146 168 if context.devices[0].type == cl.device_type.GPU: 147 source = "#define USE_SINCOS\n" + source169 source_list.insert(0, "#define USE_SINCOS\n") 148 170 options = (get_fast_inaccurate_build_options(context.devices[0]) 149 171 if fast else []) 172 source = "\n".join(source_list) 150 173 program = cl.Program(context, source).build(options=options) 151 174 return program … … 220 243 key = "%s-%s-%s"%(name, dtype, fast) 221 244 if key not in self.compiled: 222 #print("compiling",name)245 print("compiling",name) 223 246 dtype = np.dtype(dtype) 224 program = compile_model(self.get_context(dtype), source, dtype, fast) 247 program = compile_model(self.get_context(dtype), 248 str(source), dtype, fast) 225 249 self.compiled[key] = program 226 250 return self.compiled[key] … … 314 338 self.program = None 315 339 316 def __call__(self, q_vectors):340 def make_kernel(self, q_vectors): 317 341 if self.program is None: 318 342 compiler = environment().compile_program 319 self.program = compiler(self.info['name'], self.source, self.dtype,320 self. fast)343 self.program = compiler(self.info['name'], self.source, 344 self.dtype, self.fast) 321 345 is_2d = len(q_vectors) == 2 322 346 kernel_name = generate.kernel_name(self.info, is_2d) … … 365 389 # at this point, so instead using 32, which is good on the set of 366 390 # architectures tested so far. 367 self.q_vectors = [_stretch_input(q, self.dtype, 32) for q in q_vectors] 391 if self.is_2d: 392 # Note: 17 rather than 15 because results is 2 elements 393 # longer than input. 394 width = ((self.nq+17)//16)*16 395 self.q = np.empty((width, 2), dtype=dtype) 396 self.q[:self.nq, 0] = q_vectors[0] 397 self.q[:self.nq, 1] = q_vectors[1] 398 else: 399 # Note: 33 rather than 31 because results is 2 elements 400 # longer than input. 401 width = ((self.nq+33)//32)*32 402 self.q = np.empty(width, dtype=dtype) 403 self.q[:self.nq] = q_vectors[0] 404 self.global_size = [self.q.shape[0]] 368 405 context = env.get_context(self.dtype) 369 self.global_size = [self.q_vectors[0].size]370 406 #print("creating inputs of size", self.global_size) 371 self.q_buffers = [ 372 cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q) 373 for q in self.q_vectors 374 ] 407 self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 408 hostbuf=self.q) 375 409 376 410 def release(self): … … 378 412 Free the memory. 379 413 """ 380 for b in self.q_buffers:381 b.release()382 self.q_buffers = []414 if self.q is not None: 415 self.q.release() 416 self.q = None 383 417 384 418 def __del__(self): … … 406 440 """ 407 441 def __init__(self, kernel, model_info, q_vectors, dtype): 442 max_pd = model_info['max_pd'] 443 npars = len(model_info['parameters'])-2 408 444 q_input = GpuInput(q_vectors, dtype) 445 self.dtype = dtype 446 self.dim = '2d' if q_input.is_2d else '1d' 409 447 self.kernel = kernel 410 448 self.info = model_info 411 self.res = np.empty(q_input.nq, q_input.dtype) 412 dim = '2d' if q_input.is_2d else '1d' 413 self.fixed_pars = model_info['partype']['fixed-' + dim] 414 self.pd_pars = model_info['partype']['pd-' + dim] 449 self.pd_stop_index = 4*max_pd-1 450 # plus three for the normalization values 451 self.result = np.empty(q_input.nq+3, q_input.dtype) 415 452 416 453 # Inputs and outputs for each kernel call … … 418 455 env = environment() 419 456 self.queue = env.get_queue(dtype) 420 self.loops_b = cl.Buffer(self.queue.context, mf.READ_WRITE, 421 2 * MAX_LOOPS * q_input.dtype.itemsize) 422 self.res_b = cl.Buffer(self.queue.context, mf.READ_WRITE, 457 458 # details is int32 data, padded to an 8 integer boundary 459 size = ((max_pd*5 + npars*3 + 2 + 7)//8)*8 460 self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE, 423 461 q_input.global_size[0] * q_input.dtype.itemsize) 424 self.q_input = q_input 425 426 self._need_release = [ self.loops_b, self.res_b, self.q_input]427 428 def __call__(self, fixed_pars, pd_pars, cutoff):462 self.q_input = q_input # allocated by GpuInput above 463 464 self._need_release = [ self.result_b, self.q_input ] 465 466 def __call__(self, details, weights, values, cutoff): 429 467 real = (np.float32 if self.q_input.dtype == generate.F32 430 468 else np.float64 if self.q_input.dtype == generate.F64 431 469 else np.float16 if self.q_input.dtype == generate.F16 432 470 else np.float32) # will never get here, so use np.float32 433 434 #print "pars", fixed_pars, pd_pars 435 res_bi = self.res_b 436 nq = np.uint32(self.q_input.nq) 437 if pd_pars: 438 cutoff = real(cutoff) 439 loops_N = [np.uint32(len(p[0])) for p in pd_pars] 440 loops = np.hstack(pd_pars) \ 441 if pd_pars else np.empty(0, dtype=self.q_input.dtype) 442 loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() 443 #print("loops",Nloops, loops) 444 445 #import sys; print("opencl eval",pars) 446 #print("opencl eval",pars) 447 if len(loops) > 2 * MAX_LOOPS: 448 raise ValueError("too many polydispersity points") 449 450 loops_bi = self.loops_b 451 cl.enqueue_copy(self.queue, loops_bi, loops) 452 loops_l = cl.LocalMemory(len(loops.data)) 453 #ctx = environment().context 454 #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops) 455 dispersed = [loops_bi, loops_l, cutoff] + loops_N 456 else: 457 dispersed = [] 458 fixed = [real(p) for p in fixed_pars] 459 args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed 471 assert details.dtype == np.int32 472 assert weights.dtype == real and values.dtype == real 473 474 context = self.queue.context 475 details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 476 hostbuf=details) 477 weights_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 478 hostbuf=weights) 479 values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, 480 hostbuf=values) 481 482 start, stop = 0, self.details[self.pd_stop_index] 483 args = [ 484 np.uint32(self.q_input.nq), np.uint32(start), np.uint32(stop), 485 self.details_b, self.weights_b, self.values_b, 486 self.q_input.q_b, self.result_b, real(cutoff), 487 ] 460 488 self.kernel(self.queue, self.q_input.global_size, None, *args) 461 cl.enqueue_copy(self.queue, self.res, res_bi) 462 463 return self.res 489 cl.enqueue_copy(self.queue, self.result, self.result_b) 490 [v.release() for v in details_b, weights_b, values_b] 491 492 return self.result[:self.nq] 464 493 465 494 def release(self): -
sasmodels/kerneldll.py
r6ad0e87 rd19962c 50 50 import tempfile 51 51 import ctypes as ct 52 from ctypes import c_void_p, c_int, c_longdouble, c_double, c_float 53 import _ctypes 52 from ctypes import c_void_p, c_int32, c_longdouble, c_double, c_float 54 53 55 54 import numpy as np … … 81 80 COMPILE = "gcc -shared -fPIC -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" 82 81 if "SAS_OPENMP" in os.environ: 83 COMPILE = COMPILE +" -fopenmp"82 COMPILE += " -fopenmp" 84 83 else: 85 84 COMPILE = "cc -shared -fPIC -fopenmp -std=c99 -O2 -Wall %(source)s -o %(output)s -lm" … … 90 89 91 90 92 def dll_ path(model_info, dtype="double"):93 """ 94 Path to the compiled model defined by *model_info*.95 """96 from os.path import join as joinpath, split as splitpath, splitext97 b asename = splitext(splitpath(model_info['filename'])[1])[0]98 if np.dtype(dtype) == generate.F32:99 basename += "32" 100 elif np.dtype(dtype) == generate.F64:101 basename += "64"102 else:103 basename += "128"104 return joinpath(DLL_PATH, basename+'.so')105 91 def dll_name(model_info, dtype): 92 """ 93 Name of the dll containing the model. This is the base file name without 94 any path or extension, with a form such as 'sas_sphere32'. 95 """ 96 bits = 8*dtype.itemsize 97 return "sas_%s%d"%(model_info['id'], bits) 98 99 def dll_path(model_info, dtype): 100 """ 101 Complete path to the dll for the model. Note that the dll may not 102 exist yet if it hasn't been compiled. 103 """ 104 return os.path.join(DLL_PATH, dll_name(model_info, dtype)+".so") 106 105 107 106 def make_dll(source, model_info, dtype="double"): … … 133 132 dtype = generate.F64 # Force 64-bit dll 134 133 135 if dtype == generate.F32: # 32-bit dll136 tempfile_prefix = 'sas_' + model_info['name'] + '32_'137 elif dtype == generate.F64:138 tempfile_prefix = 'sas_' + model_info['name'] + '64_'139 else:140 tempfile_prefix = 'sas_' + model_info['name'] + '128_'141 142 134 source = generate.convert_type(source, dtype) 143 source_files = generate.model_sources(model_info) + [model_info['filename']]135 newest = generate.timestamp(model_info) 144 136 dll = dll_path(model_info, dtype) 145 newest = max(os.path.getmtime(f) for f in source_files)146 137 if not os.path.exists(dll) or os.path.getmtime(dll) < newest: 147 # Replace with a proper temp file148 fid, filename = tempfile.mkstemp(suffix=".c", prefix= tempfile_prefix)138 basename = dll_name(model_info, dtype) + "_" 139 fid, filename = tempfile.mkstemp(suffix=".c", prefix=basename) 149 140 os.fdopen(fid, "w").write(source) 150 141 command = COMPILE%{"source":filename, "output":dll} … … 171 162 return DllModel(filename, model_info, dtype=dtype) 172 163 173 174 IQ_ARGS = [c_void_p, c_void_p, c_int]175 IQXY_ARGS = [c_void_p, c_void_p, c_void_p, c_int]176 177 164 class DllModel(object): 178 165 """ … … 197 184 198 185 def _load_dll(self): 199 Nfixed1d = len(self.info['partype']['fixed-1d'])200 Nfixed2d = len(self.info['partype']['fixed-2d'])201 Npd1d = len(self.info['partype']['pd-1d'])202 Npd2d = len(self.info['partype']['pd-2d'])203 204 186 #print("dll", self.dllpath) 205 187 try: … … 212 194 else c_double if self.dtype == generate.F64 213 195 else c_longdouble) 214 pd_args_1d = [c_void_p, fp] + [c_int]*Npd1d if Npd1d else [] 215 pd_args_2d = [c_void_p, fp] + [c_int]*Npd2d if Npd2d else [] 196 197 # int, int, int, int*, double*, double*, double*, double*, double*, double 198 argtypes = [c_int32]*3 + [c_void_p]*5 + [fp] 216 199 self.Iq = self.dll[generate.kernel_name(self.info, False)] 217 self.Iq.argtypes = IQ_ARGS + pd_args_1d + [fp]*Nfixed1d218 219 200 self.Iqxy = self.dll[generate.kernel_name(self.info, True)] 220 self.Iqxy.argtypes = IQXY_ARGS + pd_args_2d + [fp]*Nfixed2d 221 222 self.release() 201 self.Iq.argtypes = argtypes 202 self.Iqxy.argtypes = argtypes 223 203 224 204 def __getstate__(self): … … 229 209 self.dll = None 230 210 231 def __call__(self, q_vectors):211 def make_kernel(self, q_vectors): 232 212 q_input = PyInput(q_vectors, self.dtype) 233 213 if self.dll is None: self._load_dll() … … 272 252 """ 273 253 def __init__(self, kernel, model_info, q_input): 254 self.kernel = kernel 274 255 self.info = model_info 275 256 self.q_input = q_input 276 self.kernel = kernel 277 self.res = np.empty(q_input.nq, q_input.dtype) 278 dim = '2d' if q_input.is_2d else '1d' 279 self.fixed_pars = model_info['partype']['fixed-' + dim] 280 self.pd_pars = model_info['partype']['pd-' + dim] 281 282 # In dll kernel, but not in opencl kernel 283 self.p_res = self.res.ctypes.data 284 285 def __call__(self, fixed_pars, pd_pars, cutoff): 257 self.dtype = q_input.dtype 258 self.dim = '2d' if q_input.is_2d else '1d' 259 self.result = np.empty(q_input.nq+3, q_input.dtype) 260 261 def __call__(self, details, weights, values, cutoff): 286 262 real = (np.float32 if self.q_input.dtype == generate.F32 287 263 else np.float64 if self.q_input.dtype == generate.F64 288 264 else np.float128) 289 290 nq = c_int(self.q_input.nq) 291 if pd_pars: 292 cutoff = real(cutoff) 293 loops_N = [np.uint32(len(p[0])) for p in pd_pars] 294 loops = np.hstack(pd_pars) 295 loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() 296 p_loops = loops.ctypes.data 297 dispersed = [p_loops, cutoff] + loops_N 298 else: 299 dispersed = [] 300 fixed = [real(p) for p in fixed_pars] 301 args = self.q_input.q_pointers + [self.p_res, nq] + dispersed + fixed 302 #print(pars) 265 assert isinstance(details, generate.CoordinationDetails) 266 assert weights.dtype == real and values.dtype == real 267 268 start, stop = 0, details.total_pd 269 #print("in kerneldll") 270 #print("weights", weights) 271 #print("values", values) 272 args = [ 273 self.q_input.nq, # nq 274 start, # pd_start 275 stop, # pd_stop pd_stride[MAX_PD] 276 details.ctypes.data, # problem 277 weights.ctypes.data, # weights 278 values.ctypes.data, #pars 279 self.q_input.q.ctypes.data, #q 280 self.result.ctypes.data, # results 281 real(cutoff), # cutoff 282 ] 303 283 self.kernel(*args) 304 305 return self.res 284 return self.result[:-3] 306 285 307 286 def release(self): -
sasmodels/kernelpy.py
ra84a0ca r48fbd50 53 53 self.dtype = dtype 54 54 self.is_2d = (len(q_vectors) == 2) 55 self.q_vectors = [np.ascontiguousarray(q, self.dtype) for q in q_vectors] 56 self.q_pointers = [q.ctypes.data for q in self.q_vectors] 55 if self.is_2d: 56 self.q = np.empty((self.nq, 2), dtype=dtype) 57 self.q[:, 0] = q_vectors[0] 58 self.q[:, 1] = q_vectors[1] 59 else: 60 self.q = np.empty(self.nq, dtype=dtype) 61 self.q[:self.nq] = q_vectors[0] 57 62 58 63 def release(self): … … 60 65 Free resources associated with the model inputs. 61 66 """ 62 self.q _vectors = []67 self.q = None 63 68 64 69 class PyKernel(object): -
sasmodels/mixture.py
r72a081d r69aa451 14 14 import numpy as np 15 15 16 from . generate import process_parameters, COMMON_PARAMETERS, Parameter16 from .modelinfo import Parameter, ParameterTable 17 17 18 18 SCALE=0 … … 34 34 35 35 # Build new parameter list 36 pars = COMMON_PARAMETERS +[]36 pars = [] 37 37 for k, part in enumerate(parts): 38 38 # Parameter prefix per model, A_, B_, ... 39 # Note that prefix must also be applied to id and length_control 40 # to support vector parameters 39 41 prefix = chr(ord('A')+k) + '_' 40 for p in part['parameters']: 41 # No background on the individual mixture elements 42 if p.name == 'background': 43 continue 44 # TODO: promote Parameter to a full class 45 # this code knows too much about the implementation! 46 p = list(p) 47 if p[0] == 'scale': # allow model subtraction 48 p[3] = [-np.inf, np.inf] # limits 49 p[0] = prefix+p[0] # relabel parameters with A_, ... 42 pars.append(Parameter(prefix+'scale')) 43 for p in part['parameters'].kernel_pars: 44 p = copy(p) 45 p.name = prefix+p.name 46 p.id = prefix+p.id 47 if p.length_control is not None: 48 p.length_control = prefix+p.length_control 50 49 pars.append(p) 50 partable = ParameterTable(pars) 51 51 52 52 model_info = {} … … 58 58 model_info['docs'] = model_info['title'] 59 59 model_info['category'] = "custom" 60 model_info['parameters'] = par s60 model_info['parameters'] = partable 61 61 #model_info['single'] = any(part['single'] for part in parts) 62 62 model_info['structure_factor'] = False … … 67 67 # Remember the component info blocks so we can build the model 68 68 model_info['composition'] = ('mixture', parts) 69 process_parameters(model_info)70 return model_info71 69 72 70 -
sasmodels/model_test.py
ra84a0ca r48fbd50 51 51 52 52 from .core import list_models, load_model_info, build_model, HAVE_OPENCL 53 from .core import make_kernel,call_kernel, call_ER, call_VR53 from .core import call_kernel, call_ER, call_VR 54 54 from .exception import annotate_exception 55 55 … … 187 187 Qx, Qy = zip(*x) 188 188 q_vectors = [np.array(Qx), np.array(Qy)] 189 kernel = m ake_kernel(model,q_vectors)189 kernel = model.make_kernel(q_vectors) 190 190 actual = call_kernel(kernel, pars) 191 191 else: 192 192 q_vectors = [np.array(x)] 193 kernel = m ake_kernel(model,q_vectors)193 kernel = model.make_kernel(q_vectors) 194 194 actual = call_kernel(kernel, pars) 195 195 -
sasmodels/models/cylinder.c
r26141cb re9b1663d 3 3 double Iqxy(double qx, double qy, double sld, double solvent_sld, 4 4 double radius, double length, double theta, double phi); 5 6 #define INVALID(v) (v.radius<0 || v.length<0) 5 7 6 8 double form_volume(double radius, double length) … … 15 17 double length) 16 18 { 17 // TODO: return NaN if radius<0 or length<0?18 19 // precompute qr and qh to save time in the loop 19 20 const double qr = q*radius; … … 47 48 double phi) 48 49 { 49 // TODO: return NaN if radius<0 or length<0?50 50 double sn, cn; // slots to hold sincos function output 51 51 -
sasmodels/models/gel_fit.c
r30b4ddf r03cac08 1 double form_volume(void); 2 3 double Iq(double q, 4 double guinier_scale, 5 double lorentzian_scale, 6 double gyration_radius, 7 double fractal_exp, 8 double cor_length); 9 10 double Iqxy(double qx, double qy, 11 double guinier_scale, 12 double lorentzian_scale, 13 double gyration_radius, 14 double fractal_exp, 15 double cor_length); 16 17 static double _gel_fit_kernel(double q, 1 static double Iq(double q, 18 2 double guinier_scale, 19 3 double lorentzian_scale, … … 24 8 // Lorentzian Term 25 9 ////////////////////////double a(x[i]*x[i]*zeta*zeta); 26 double lorentzian_term = q*q*cor_length*cor_length;10 double lorentzian_term = square(q*cor_length); 27 11 lorentzian_term = 1.0 + ((fractal_exp + 1.0)/3.0)*lorentzian_term; 28 12 lorentzian_term = pow(lorentzian_term, (fractal_exp/2.0) ); … … 30 14 // Exponential Term 31 15 ////////////////////////double d(x[i]*x[i]*rg*rg); 32 double exp_term = q*q*gyration_radius*gyration_radius;16 double exp_term = square(q*gyration_radius); 33 17 exp_term = exp(-1.0*(exp_term/3.0)); 34 18 … … 37 21 return result; 38 22 } 39 double form_volume(void){40 // Unused, so free to return garbage.41 return NAN;42 }43 44 double Iq(double q,45 double guinier_scale,46 double lorentzian_scale,47 double gyration_radius,48 double fractal_exp,49 double cor_length)50 {51 return _gel_fit_kernel(q,52 guinier_scale,53 lorentzian_scale,54 gyration_radius,55 fractal_exp,56 cor_length);57 }58 59 // Iqxy is never called since no orientation or magnetic parameters.60 double Iqxy(double qx, double qy,61 double guinier_scale,62 double lorentzian_scale,63 double gyration_radius,64 double fractal_exp,65 double cor_length)66 {67 double q = sqrt(qx*qx + qy*qy);68 return _gel_fit_kernel(q,69 guinier_scale,70 lorentzian_scale,71 gyration_radius,72 fractal_exp,73 cor_length);74 }75 -
sasmodels/models/lib/Si.c
re7678b2 rba32cdd 1 // integral of sin(x)/x Taylor series approximated to w/i 0.1% 1 2 double Si(double x); 2 3 3 // integral of sin(x)/x Taylor series approximated to w/i 0.1%4 4 double Si(double x) 5 5 { -
sasmodels/models/lib/polevl.c
r0b05c24 rba32cdd 51 51 */ 52 52 53 double polevl( double x, constant double *coef, int N ) { 53 double polevl( double x, constant double *coef, int N ); 54 double p1evl( double x, constant double *coef, int N ); 55 56 57 double polevl( double x, constant double *coef, int N ) 58 { 54 59 55 60 int i = 0; … … 70 75 */ 71 76 72 double p1evl( double x, constant double *coef, int N ) { 77 double p1evl( double x, constant double *coef, int N ) 78 { 73 79 int i=0; 74 80 double ans = x+coef[i]; -
sasmodels/models/lib/sas_J0.c
ra776125 rba32cdd 49 49 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier 50 50 */ 51 52 double sas_J0(double x); 51 53 52 54 /* Note: all coefficients satisfy the relative error criterion … … 177 179 }; 178 180 179 double sas_J0(double x) { 181 double sas_J0(double x) 182 { 180 183 181 184 //Cephes single precission -
sasmodels/models/lib/sas_J1.c
r19b9005 rba32cdd 39 39 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier 40 40 */ 41 double sas_J1(double x); 42 double sas_J1c(double x); 41 43 42 44 constant double RPJ1[8] = { … … 135 137 }; 136 138 137 double sas_J1(double x) { 139 double sas_J1(double x) 140 { 138 141 139 142 //Cephes double pression function -
sasmodels/models/lib/sas_JN.c
ra776125 rba32cdd 47 47 Copyright 1984, 1987, 2000 by Stephen L. Moshier 48 48 */ 49 50 49 double sas_JN( int n, double x ); 51 50 52 double sas_JN( int n, double x ) { 51 double sas_JN( int n, double x ) 52 { 53 53 54 54 const double MACHEP = 1.11022302462515654042E-16; -
sasmodels/models/lib/sph_j1c.c
re6f1410 rba32cdd 7 7 * using double precision that are the source. 8 8 */ 9 double sph_j1c(double q); 9 10 10 11 // The choice of the number of terms in the series and the cutoff value for … … 43 44 #endif 44 45 45 double sph_j1c(double q);46 46 double sph_j1c(double q) 47 47 { -
sasmodels/models/lib/sphere_form.c
rad90df9 rba32cdd 1 inline double 2 sphere_volume(double radius) 1 double sphere_volume(double radius); 2 double sphere_form(double q, double radius, double sld, double solvent_sld); 3 4 double sphere_volume(double radius) 3 5 { 4 6 return M_4PI_3*cube(radius); 5 7 } 6 8 7 inline double 8 sphere_form(double q, double radius, double sld, double solvent_sld) 9 double sphere_form(double q, double radius, double sld, double solvent_sld) 9 10 { 10 11 const double fq = sphere_volume(radius) * sph_j1c(q*radius); -
sasmodels/models/lib/wrc_cyl.c
re7678b2 rba32cdd 2 2 Functions for WRC implementation of flexible cylinders 3 3 */ 4 double Sk_WR(double q, double L, double b); 5 6 4 7 static double 5 8 AlphaSquare(double x) … … 363 366 } 364 367 365 double Sk_WR(double q, double L, double b);366 368 double Sk_WR(double q, double L, double b) 367 369 { -
sasmodels/models/onion.c
rfdb1487 rce896fd 4 4 double thickness, double A) 5 5 { 6 const double vol = 4.0/3.0 * M_PI * r * r * r;6 const double vol = M_4PI_3 * cube(r); 7 7 const double qr = q * r; 8 8 const double alpha = A * r/thickness; … … 19 19 double sinqr, cosqr; 20 20 SINCOS(qr, sinqr, cosqr); 21 fun = -3.0 (21 fun = -3.0*( 22 22 ((alphasq - qrsq)*sinqr/qr - 2.0*alpha*cosqr) / sumsq 23 23 - (alpha*sinqr/qr - cosqr) … … 32 32 f_linear(double q, double r, double sld, double slope) 33 33 { 34 const double vol = 4.0/3.0 * M_PI * r * r * r;34 const double vol = M_4PI_3 * cube(r); 35 35 const double qr = q * r; 36 36 const double bes = sph_j1c(qr); … … 52 52 { 53 53 const double bes = sph_j1c(q * r); 54 const double vol = 4.0/3.0 * M_PI * r * r * r;54 const double vol = M_4PI_3 * cube(r); 55 55 return sld * vol * bes; 56 56 } … … 64 64 r += thickness[i]; 65 65 } 66 return 4.0/3.0 * M_PI * r * r * r;66 return M_4PI_3*cube(r); 67 67 } 68 68 69 69 static double 70 Iq(double q, double core_sld, double core_radius, double solvent_sld,71 double n, double in_sld[], double out_sld[], double thickness[],70 Iq(double q, double sld_core, double core_radius, double sld_solvent, 71 double n, double sld_in[], double sld_out[], double thickness[], 72 72 double A[]) 73 73 { 74 74 int i; 75 r = core_radius;76 f = f_constant(q, r, core_sld);75 double r = core_radius; 76 double f = f_constant(q, r, sld_core); 77 77 for (i=0; i<n; i++){ 78 78 const double r0 = r; … … 92 92 } 93 93 } 94 f -= f_constant(q, r, s olvent_sld);95 f2 = f * f * 1.0e-4;94 f -= f_constant(q, r, sld_solvent); 95 const double f2 = f * f * 1.0e-4; 96 96 97 97 return f2; -
sasmodels/models/rpa.py
raad336c re9b1663d 86 86 # ["name", "units", default, [lower, upper], "type","description"], 87 87 parameters = [ 88 ["case_num", CASES, 0, [0, 10], "", "Component organization"],88 ["case_num", "", 1, CASES, "", "Component organization"], 89 89 90 ["Na", "", 1000.0, [1, inf], "", "Degree of polymerization"], 91 ["Phia", "", 0.25, [0, 1], "", "volume fraction"], 92 ["va", "mL/mol", 100.0, [0, inf], "", "specific volume"], 93 ["La", "fm", 10.0, [-inf, inf], "", "scattering length"], 94 ["ba", "Ang", 5.0, [0, inf], "", "segment length"], 95 96 ["Nb", "", 1000.0, [1, inf], "", "Degree of polymerization"], 97 ["Phib", "", 0.25, [0, 1], "", "volume fraction"], 98 ["vb", "mL/mol", 100.0, [0, inf], "", "specific volume"], 99 ["Lb", "fm", 10.0, [-inf, inf], "", "scattering length"], 100 ["bb", "Ang", 5.0, [0, inf], "", "segment length"], 101 102 ["Nc", "", 1000.0, [1, inf], "", "Degree of polymerization"], 103 ["Phic", "", 0.25, [0, 1], "", "volume fraction"], 104 ["vc", "mL/mol", 100.0, [0, inf], "", "specific volume"], 105 ["Lc", "fm", 10.0, [-inf, inf], "", "scattering length"], 106 ["bc", "Ang", 5.0, [0, inf], "", "segment length"], 107 108 ["Nd", "", 1000.0, [1, inf], "", "Degree of polymerization"], 109 ["Phid", "", 0.25, [0, 1], "", "volume fraction"], 110 ["vd", "mL/mol", 100.0, [0, inf], "", "specific volume"], 111 ["Ld", "fm", 10.0, [-inf, inf], "", "scattering length"], 112 ["bd", "Ang", 5.0, [0, inf], "", "segment length"], 90 ["N[4]", "", 1000.0, [1, inf], "", "Degree of polymerization"], 91 ["Phi[4]", "", 0.25, [0, 1], "", "volume fraction"], 92 ["v[4]", "mL/mol", 100.0, [0, inf], "", "specific volume"], 93 ["L[4]", "fm", 10.0, [-inf, inf], "", "scattering length"], 94 ["b[4]", "Ang", 5.0, [0, inf], "", "segment length"], 113 95 114 96 ["Kab", "", -0.0004, [-inf, inf], "", "Interaction parameter"], -
sasmodels/models/spherical_sld.py
re42b0b9 rce896fd 163 163 title = "Sperical SLD intensity calculation" 164 164 description = """ 165 166 167 165 I(q) = 166 background = Incoherent background [1/cm] 167 """ 168 168 category = "sphere-based" 169 169 170 170 # pylint: disable=bad-whitespace, line-too-long 171 171 # ["name", "units", default, [lower, upper], "type", "description"], 172 parameters = [["n_shells", "", 1, [0, 9],"", "number of shells"],173 ["thick_inter[n ]", "Ang", 50, [-inf, inf],"", "intern layer thickness"],174 ["func_inter[n ]", "", 0, [0, 4],"", "Erf:0, RPower:1, LPower:2, RExp:3, LExp:4"],175 ["sld_core", "1e-6/Ang^2", 2.07, [-inf, inf],"", "sld function flat"],176 ["sld_solvent", "1e-6/Ang^2", 1.0, [-inf, inf],"", "sld function solvent"],177 ["sld_flat[n ]", "1e-6/Ang^2", 4.06, [-inf, inf],"", "sld function flat"],178 ["thick_inter[n ]", "Ang", 50.0, [0, inf], "", "intern layer thickness"],179 ["thick_flat[n ]", "Ang", 100.0, [0, inf], "", "flat layer_thickness"],180 ["inter_nu[n ]", "", 2.5, [-inf, inf],"", "steepness parameter"],181 ["npts_inter", "", 35, [0, 35],"", "number of points in each sublayer Must be odd number"],182 ["core_rad", "Ang", 50.0, [0, inf], "", "intern layer thickness"],172 parameters = [["n_shells", "", 1, [0, 9], "", "number of shells"], 173 ["thick_inter[n_shells]", "Ang", 50, [-inf, inf], "", "intern layer thickness"], 174 ["func_inter[n_shells]", "", 0, [0, 4], "", "Erf:0, RPower:1, LPower:2, RExp:3, LExp:4"], 175 ["sld_core", "1e-6/Ang^2", 2.07, [-inf, inf], "", "sld function flat"], 176 ["sld_solvent", "1e-6/Ang^2", 1.0, [-inf, inf], "", "sld function solvent"], 177 ["sld_flat[n_shells]", "1e-6/Ang^2", 4.06, [-inf, inf], "", "sld function flat"], 178 ["thick_inter[n_shells]", "Ang", 50.0, [0, inf], "", "intern layer thickness"], 179 ["thick_flat[n_shells]", "Ang", 100.0, [0, inf], "", "flat layer_thickness"], 180 ["inter_nu[n_shells]", "", 2.5, [-inf, inf], "", "steepness parameter"], 181 ["npts_inter", "", 35, [0, 35], "", "number of points in each sublayer Must be odd number"], 182 ["core_rad", "Ang", 50.0, [0, inf], "", "intern layer thickness"], 183 183 ] 184 184 # pylint: enable=bad-whitespace, line-too-long 185 185 186 #source = ["lib/librefl.c", "lib/sph_j1c.c", "spherical_sld.c"] 186 187 187 def Iq(q, *args, **kw): 188 188 return q 189 189 190 def Iqxy(qx, *args, **kw):191 return qx192 193 194 190 demo = dict( 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 191 n_shells=4, 192 scale=1.0, 193 solvent_sld=1.0, 194 background=0.0, 195 npts_inter=35.0, 196 func_inter_0=0, 197 nu_inter_0=2.5, 198 rad_core_0=50.0, 199 core0_sld=2.07, 200 thick_inter_0=50.0, 201 func_inter_1=0, 202 nu_inter_1=2.5, 203 thick_inter_1=50.0, 204 flat1_sld=4.0, 205 thick_flat_1=100.0, 206 func_inter_2=0, 207 nu_inter_2=2.5, 208 thick_inter_2=50.0, 209 flat2_sld=3.5, 210 thick_flat_2=100.0, 211 func_inter_3=0, 212 nu_inter_3=2.5, 213 thick_inter_3=50.0, 214 flat3_sld=4.0, 215 thick_flat_3=100.0, 216 func_inter_4=0, 217 nu_inter_4=2.5, 218 thick_inter_4=50.0, 219 flat4_sld=3.5, 220 thick_flat_4=100.0, 221 func_inter_5=0, 222 nu_inter_5=2.5, 223 thick_inter_5=50.0, 224 flat5_sld=4.0, 225 thick_flat_5=100.0, 226 func_inter_6=0, 227 nu_inter_6=2.5, 228 thick_inter_6=50.0, 229 flat6_sld=3.5, 230 thick_flat_6=100.0, 231 func_inter_7=0, 232 nu_inter_7=2.5, 233 thick_inter_7=50.0, 234 flat7_sld=4.0, 235 thick_flat_7=100.0, 236 func_inter_8=0, 237 nu_inter_8=2.5, 238 thick_inter_8=50.0, 239 flat8_sld=3.5, 240 thick_flat_8=100.0, 241 func_inter_9=0, 242 nu_inter_9=2.5, 243 thick_inter_9=50.0, 244 flat9_sld=4.0, 245 thick_flat_9=100.0, 246 func_inter_10=0, 247 nu_inter_10=2.5, 248 thick_inter_10=50.0, 249 flat10_sld=3.5, 250 thick_flat_10=100.0 251 ) 256 252 257 253 oldname = "SphereSLDModel" 258 254 oldpars = dict( 259 n_shells="n_shells", 260 scale="scale", 261 npts_inter='npts_inter', 262 solvent_sld='sld_solv', 263 func_inter_0='func_inter0', 264 nu_inter_0='nu_inter0', 265 background='background', 266 rad_core_0='rad_core0', 267 core0_sld='sld_core0', 268 thick_inter_0='thick_inter0', 269 func_inter_1='func_inter1', 270 nu_inter_1='nu_inter1', 271 thick_inter_1='thick_inter1', 272 flat1_sld='sld_flat1', 273 thick_flat_1='thick_flat1', 274 func_inter_2='func_inter2', 275 nu_inter_2='nu_inter2', 276 thick_inter_2='thick_inter2', 277 flat2_sld='sld_flat2', 278 thick_flat_2='thick_flat2', 279 func_inter_3='func_inter3', 280 nu_inter_3='nu_inter3', 281 thick_inter_3='thick_inter3', 282 flat3_sld='sld_flat3', 283 thick_flat_3='thick_flat3', 284 func_inter_4='func_inter4', 285 nu_inter_4='nu_inter4', 286 thick_inter_4='thick_inter4', 287 flat4_sld='sld_flat4', 288 thick_flat_4='thick_flat4', 289 func_inter_5='func_inter5', 290 nu_inter_5='nu_inter5', 291 thick_inter_5='thick_inter5', 292 flat5_sld='sld_flat5', 293 thick_flat_5='thick_flat5', 294 func_inter_6='func_inter6', 295 nu_inter_6='nu_inter6', 296 thick_inter_6='thick_inter6', 297 flat6_sld='sld_flat6', 298 thick_flat_6='thick_flat6', 299 func_inter_7='func_inter7', 300 nu_inter_7='nu_inter7', 301 thick_inter_7='thick_inter7', 302 flat7_sld='sld_flat7', 303 thick_flat_7='thick_flat7', 304 func_inter_8='func_inter8', 305 nu_inter_8='nu_inter8', 306 thick_inter_8='thick_inter8', 307 flat8_sld='sld_flat8', 308 thick_flat_8='thick_flat8', 309 func_inter_9='func_inter9', 310 nu_inter_9='nu_inter9', 311 thick_inter_9='thick_inter9', 312 flat9_sld='sld_flat9', 313 thick_flat_9='thick_flat9', 314 func_inter_10='func_inter10', 315 nu_inter_10='nu_inter10', 316 thick_inter_10='thick_inter10', 317 flat10_sld='sld_flat10', 318 thick_flat_10='thick_flat10') 255 n_shells="n_shells", 256 scale="scale", 257 background='background', 258 npts_inter='npts_inter', 259 solvent_sld='sld_solv', 260 261 func_inter_0='func_inter0', 262 nu_inter_0='nu_inter0', 263 rad_core_0='rad_core0', 264 core0_sld='sld_core0', 265 thick_inter_0='thick_inter0', 266 func_inter_1='func_inter1', 267 nu_inter_1='nu_inter1', 268 thick_inter_1='thick_inter1', 269 flat1_sld='sld_flat1', 270 thick_flat_1='thick_flat1', 271 func_inter_2='func_inter2', 272 nu_inter_2='nu_inter2', 273 thick_inter_2='thick_inter2', 274 flat2_sld='sld_flat2', 275 thick_flat_2='thick_flat2', 276 func_inter_3='func_inter3', 277 nu_inter_3='nu_inter3', 278 thick_inter_3='thick_inter3', 279 flat3_sld='sld_flat3', 280 thick_flat_3='thick_flat3', 281 func_inter_4='func_inter4', 282 nu_inter_4='nu_inter4', 283 thick_inter_4='thick_inter4', 284 flat4_sld='sld_flat4', 285 thick_flat_4='thick_flat4', 286 func_inter_5='func_inter5', 287 nu_inter_5='nu_inter5', 288 thick_inter_5='thick_inter5', 289 flat5_sld='sld_flat5', 290 thick_flat_5='thick_flat5', 291 func_inter_6='func_inter6', 292 nu_inter_6='nu_inter6', 293 thick_inter_6='thick_inter6', 294 flat6_sld='sld_flat6', 295 thick_flat_6='thick_flat6', 296 func_inter_7='func_inter7', 297 nu_inter_7='nu_inter7', 298 thick_inter_7='thick_inter7', 299 flat7_sld='sld_flat7', 300 thick_flat_7='thick_flat7', 301 func_inter_8='func_inter8', 302 nu_inter_8='nu_inter8', 303 thick_inter_8='thick_inter8', 304 flat8_sld='sld_flat8', 305 thick_flat_8='thick_flat8', 306 func_inter_9='func_inter9', 307 nu_inter_9='nu_inter9', 308 thick_inter_9='thick_inter9', 309 flat9_sld='sld_flat9', 310 thick_flat_9='thick_flat9', 311 func_inter_10='func_inter10', 312 nu_inter_10='nu_inter10', 313 thick_inter_10='thick_inter10', 314 flat10_sld='sld_flat10', 315 thick_flat_10='thick_flat10') 319 316 320 317 #TODO: Not working yet -
sasmodels/sasview_model.py
r787be86 rfb5914f 21 21 22 22 from . import core 23 from . import generate23 from . import weights 24 24 25 25 def standard_models(): … … 32 32 33 33 Returns a class that can be used directly as a sasview model.t 34 35 Defaults to using the new name for a model. Setting36 *namestyle='oldname'* will produce a class with a name37 compatible with SasView.38 34 """ 39 35 model_info = core.load_model_info(model_name) … … 56 52 """ 57 53 def __init__(self): 58 self._ kernel = None54 self._model = None 59 55 model_info = self._model_info 56 parameters = model_info['parameters'] 60 57 61 58 self.name = model_info['name'] 62 self.oldname = model_info['oldname']63 59 self.description = model_info['description'] 64 60 self.category = None 65 self.multiplicity_info = None 66 self.is_multifunc = False 61 #self.is_multifunc = False 62 for p in parameters.kernel_parameters: 63 if p.is_control: 64 profile_axes = model_info['profile_axes'] 65 self.multiplicity_info = [ 66 p.limits[1], p.name, p.choices, profile_axes[0] 67 ] 68 break 69 else: 70 self.multiplicity_info = [] 67 71 68 72 ## interpret the parameters … … 71 75 self.params = collections.OrderedDict() 72 76 self.dispersion = dict() 73 partype = model_info['partype'] 74 75 for p in model_info['parameters']: 77 78 self.orientation_params = [] 79 self.magnetic_params = [] 80 self.fixed = [] 81 for p in parameters.user_parameters(): 76 82 self.params[p.name] = p.default 77 83 self.details[p.name] = [p.units] + p.limits 78 79 for name in partype['pd-2d']: 80 self.dispersion[name] = { 81 'width': 0, 82 'npts': 35, 83 'nsigmas': 3, 84 'type': 'gaussian', 85 } 86 87 self.orientation_params = ( 88 partype['orientation'] 89 + [n + '.width' for n in partype['orientation']] 90 + partype['magnetic']) 91 self.magnetic_params = partype['magnetic'] 92 self.fixed = [n + '.width' for n in partype['pd-2d']] 84 if p.polydisperse: 85 self.dispersion[p.name] = { 86 'width': 0, 87 'npts': 35, 88 'nsigmas': 3, 89 'type': 'gaussian', 90 } 91 if p.type == 'orientation': 92 self.orientation_params.append(p.name) 93 self.orientation_params.append(p.name+".width") 94 self.fixed.append(p.name+".width") 95 if p.type == 'magnetic': 96 self.orientation_params.append(p.name) 97 self.magnetic_params.append(p.name) 98 self.fixed.append(p.name+".width") 99 93 100 self.non_fittable = [] 94 101 … … 109 116 def __get_state__(self): 110 117 state = self.__dict__.copy() 111 model_id = self._model_info['id']112 118 state.pop('_kernel') 113 119 # May need to reload model info on set state since it has pointers … … 118 124 def __set_state__(self, state): 119 125 self.__dict__ = state 120 self._ kernel = None126 self._model = None 121 127 122 128 def __str__(self): … … 207 213 def getDispParamList(self): 208 214 """ 209 Return a list of all availableparameters for the model215 Return a list of polydispersity parameters for the model 210 216 """ 211 217 # TODO: fix test so that parameter order doesn't matter 212 ret = ['%s.%s' % (d.lower(), p) 213 for d in self._model_info['partype']['pd-2d'] 214 for p in ('npts', 'nsigmas', 'width')] 218 ret = ['%s.%s' % (p.name.lower(), ext) 219 for p in self._model_info['parameters'].user_parameters() 220 for ext in ('npts', 'nsigmas', 'width') 221 if p.polydisperse] 215 222 #print(ret) 216 223 return ret … … 285 292 # Check whether we have a list of ndarrays [qx,qy] 286 293 qx, qy = qdist 287 partype = self._model_info['partype'] 288 if not partype['orientation'] and not partype['magnetic']: 294 if not self._model_info['parameters'].has_2d: 289 295 return self.calculate_Iq(np.sqrt(qx ** 2 + qy ** 2)) 290 296 else: … … 308 314 to the card for each evaluation. 309 315 """ 310 if self._ kernel is None:311 self._ kernel = core.build_model(self._model_info)316 if self._model is None: 317 self._model = core.build_model(self._model_info, platform='dll') 312 318 q_vectors = [np.asarray(q) for q in args] 313 fn = self._kernel(q_vectors) 314 pars = [self.params[v] for v in fn.fixed_pars] 315 pd_pars = [self._get_weights(p) for p in fn.pd_pars] 316 result = fn(pars, pd_pars, self.cutoff) 317 fn.q_input.release() 318 fn.release() 319 kernel = self._model.make_kernel(q_vectors) 320 pairs = [self._get_weights(p) 321 for p in self._model_info['parameters'].call_parameters] 322 details, weights, values = core.build_details(kernel, pairs) 323 return kernel(details, weights, values, cutoff=self.cutoff) 324 kernel.q_input.release() 325 kernel.release() 319 326 return result 320 327 … … 389 396 def _get_weights(self, par): 390 397 """ 391 Return dispersion weights 392 :param par parameter name 393 """ 394 from . import weights 395 396 relative = self._model_info['partype']['pd-rel'] 397 limits = self._model_info['limits'] 398 dis = self.dispersion[par] 399 value, weight = weights.get_weights( 400 dis['type'], dis['npts'], dis['width'], dis['nsigmas'], 401 self.params[par], limits[par], par in relative) 402 return value, weight / np.sum(weight) 403 398 Return dispersion weights for parameter 399 """ 400 if par.polydisperse: 401 dis = self.dispersion[par.name] 402 value, weight = weights.get_weights( 403 dis['type'], dis['npts'], dis['width'], dis['nsigmas'], 404 self.params[par.name], par.limits, par.relative_pd) 405 return value, weight / np.sum(weight) 406 else: 407 return [self.params[par.name]], [] 408 409 def test_model(): 410 Cylinder = make_class('cylinder') 411 cylinder = Cylinder() 412 return cylinder.evalDistribution([0.1,0.1]) 413 414 if __name__ == "__main__": 415 print("cylinder(0.1,0.1)=%g"%test_model())
Note: See TracChangeset
for help on using the changeset viewer.