source: sasmodels/sasmodels/kernelcl.py @ 300a2f7

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 300a2f7 was 300a2f7, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

Force programs to be recompiled on OpenCL when they are changed. Ref #576

  • Property mode set to 100644
File size: 22.6 KB
RevLine 
[14de349]1"""
[eafc9fa]2GPU driver for C kernels
[14de349]3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
[92da231]24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
[14de349]49"""
[ba32cdd]50from __future__ import print_function
[a5b8477]51
[250fa25]52import os
53import warnings
[821a9c6]54import logging
[250fa25]55
[7ae2b7f]56import numpy as np  # type: ignore
[b3f6bc3]57
[250fa25]58try:
[f2f67a6]59    #raise NotImplementedError("OpenCL not yet implemented for new kernel template")
[7ae2b7f]60    import pyopencl as cl  # type: ignore
[3c56da87]61    # Ask OpenCL for the default context so that we know that one exists
62    cl.create_some_context(interactive=False)
[9404dd3]63except Exception as exc:
[40a87fa]64    warnings.warn("OpenCL startup failed with ***"
65                  + str(exc) + "***; using C compiler instead")
[664c8e7]66    raise RuntimeError("OpenCL not available")
[7841376]67
[14de349]68from pyopencl import mem_flags as mf
[5d316e9]69from pyopencl.characterize import get_fast_inaccurate_build_options
[14de349]70
[cb6ecf4]71from . import generate
[f619de7]72from .kernel import KernelModel, Kernel
[14de349]73
[a5b8477]74try:
75    from typing import Tuple, Callable, Any
76    from .modelinfo import ModelInfo
77    from .details import CallDetails
78except ImportError:
79    pass
80
[20317b3]81# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
82def quote_path(v):
83    """
84    Quote the path if it is not already quoted.
85
86    If v starts with '-', then assume that it is a -I option or similar
87    and do not quote it.  This is fragile:  -Ipath with space needs to
88    be quoted.
89    """
90    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
91
92def fix_pyopencl_include():
[40a87fa]93    """
94    Monkey patch pyopencl to allow spaces in include file path.
95    """
[20317b3]96    import pyopencl as cl
97    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
98        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
99
100fix_pyopencl_include()
101
102
[ce27e21]103# The max loops number is limited by the amount of local memory available
104# on the device.  You don't want to make this value too big because it will
105# waste resources, nor too small because it may interfere with users trying
106# to do their polydispersity calculations.  A value of 1024 should be much
107# larger than necessary given that cost grows as npts^k where k is the number
108# of polydisperse parameters.
[5d4777d]109MAX_LOOPS = 2048
110
[ce27e21]111
[5464d68]112# Pragmas for enable OpenCL features.  Be sure to protect them so that they
113# still compile even if OpenCL is not present.
114_F16_PRAGMA = """\
115#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
116#  pragma OPENCL EXTENSION cl_khr_fp16: enable
117#endif
118"""
119
120_F64_PRAGMA = """\
121#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
122#  pragma OPENCL EXTENSION cl_khr_fp64: enable
123#endif
124"""
125
126
[14de349]127ENV = None
128def environment():
[dd7fc12]129    # type: () -> "GpuEnvironment"
[14de349]130    """
131    Returns a singleton :class:`GpuEnvironment`.
132
133    This provides an OpenCL context and one queue per device.
134    """
135    global ENV
136    if ENV is None:
137        ENV = GpuEnvironment()
138    return ENV
139
[5d316e9]140def has_type(device, dtype):
[dd7fc12]141    # type: (cl.Device, np.dtype) -> bool
[14de349]142    """
[5d316e9]143    Return true if device supports the requested precision.
[14de349]144    """
[5d316e9]145    if dtype == generate.F32:
146        return True
147    elif dtype == generate.F64:
148        return "cl_khr_fp64" in device.extensions
149    elif dtype == generate.F16:
150        return "cl_khr_fp16" in device.extensions
151    else:
152        return False
[14de349]153
[f5b9a6b]154def get_warp(kernel, queue):
[dd7fc12]155    # type: (cl.Kernel, cl.CommandQueue) -> int
[f5b9a6b]156    """
157    Return the size of an execution batch for *kernel* running on *queue*.
158    """
[750ffa5]159    return kernel.get_work_group_info(
[63b32bb]160        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
161        queue.device)
[14de349]162
[f5b9a6b]163def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
[dd7fc12]164    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
[14de349]165    """
166    Stretch an input vector to the correct boundary.
167
168    Performance on the kernels can drop by a factor of two or more if the
169    number of values to compute does not fall on a nice power of two
[f5b9a6b]170    boundary.   The trailing additional vector elements are given a
171    value of *extra*, and so f(*extra*) will be computed for each of
172    them.  The returned array will thus be a subset of the computed array.
173
174    *boundary* should be a power of 2 which is at least 32 for good
175    performance on current platforms (as of Jan 2015).  It should
176    probably be the max of get_warp(kernel,queue) and
177    device.min_data_type_align_size//4.
178    """
[c85db69]179    remainder = vector.size % boundary
[f5b9a6b]180    if remainder != 0:
181        size = vector.size + (boundary - remainder)
[c85db69]182        vector = np.hstack((vector, [extra] * (size - vector.size)))
[14de349]183    return np.ascontiguousarray(vector, dtype=dtype)
184
185
[5d316e9]186def compile_model(context, source, dtype, fast=False):
[dd7fc12]187    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
[14de349]188    """
189    Build a model to run on the gpu.
190
191    Returns the compiled program and its type.  The returned type will
192    be float32 even if the desired type is float64 if any of the
193    devices in the context do not support the cl_khr_fp64 extension.
194    """
195    dtype = np.dtype(dtype)
[5d316e9]196    if not all(has_type(d, dtype) for d in context.devices):
197        raise RuntimeError("%s not supported for devices"%dtype)
[14de349]198
[5464d68]199    source_list = [generate.convert_type(source, dtype)]
200
201    if dtype == generate.F16:
202        source_list.insert(0, _F16_PRAGMA)
203    elif dtype == generate.F64:
204        source_list.insert(0, _F64_PRAGMA)
205
[14de349]206    # Note: USE_SINCOS makes the intel cpu slower under opencl
207    if context.devices[0].type == cl.device_type.GPU:
[5464d68]208        source_list.insert(0, "#define USE_SINCOS\n")
[5d316e9]209    options = (get_fast_inaccurate_build_options(context.devices[0])
210               if fast else [])
[ba32cdd]211    source = "\n".join(source_list)
[5d316e9]212    program = cl.Program(context, source).build(options=options)
[821a9c6]213    #print("done with "+program)
[ce27e21]214    return program
[14de349]215
216
217# for now, this returns one device in the context
218# TODO: create a context that contains all devices on all platforms
219class GpuEnvironment(object):
220    """
221    GPU context, with possibly many devices, and one queue per device.
222    """
223    def __init__(self):
[dd7fc12]224        # type: () -> None
[250fa25]225        # find gpu context
226        #self.context = cl.create_some_context()
227
228        self.context = None
229        if 'PYOPENCL_CTX' in os.environ:
230            self._create_some_context()
231
232        if not self.context:
[3c56da87]233            self.context = _get_default_context()
[250fa25]234
[f5b9a6b]235        # Byte boundary for data alignment
236        #self.data_boundary = max(d.min_data_type_align_size
237        #                         for d in self.context.devices)
[d18582e]238        self.queues = [cl.CommandQueue(context, context.devices[0])
239                       for context in self.context]
[ce27e21]240        self.compiled = {}
241
[5d316e9]242    def has_type(self, dtype):
[dd7fc12]243        # type: (np.dtype) -> bool
[eafc9fa]244        """
245        Return True if all devices support a given type.
246        """
[d18582e]247        return any(has_type(d, dtype)
248                   for context in self.context
249                   for d in context.devices)
250
251    def get_queue(self, dtype):
[dd7fc12]252        # type: (np.dtype) -> cl.CommandQueue
[d18582e]253        """
254        Return a command queue for the kernels of type dtype.
255        """
256        for context, queue in zip(self.context, self.queues):
257            if all(has_type(d, dtype) for d in context.devices):
258                return queue
259
260    def get_context(self, dtype):
[dd7fc12]261        # type: (np.dtype) -> cl.Context
[d18582e]262        """
263        Return a OpenCL context for the kernels of type dtype.
264        """
[20317b3]265        for context in self.context:
[d18582e]266            if all(has_type(d, dtype) for d in context.devices):
267                return context
[5d316e9]268
[250fa25]269    def _create_some_context(self):
[dd7fc12]270        # type: () -> cl.Context
[eafc9fa]271        """
272        Protected call to cl.create_some_context without interactivity.  Use
273        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
274        attribute.
275        """
[250fa25]276        try:
[d18582e]277            self.context = [cl.create_some_context(interactive=False)]
[9404dd3]278        except Exception as exc:
[250fa25]279            warnings.warn(str(exc))
280            warnings.warn("pyopencl.create_some_context() failed")
281            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
282
[300a2f7]283    def compile_program(self, name, source, dtype, fast, timestamp):
284        # type: (str, str, np.dtype, bool, float) -> cl.Program
[eafc9fa]285        """
286        Compile the program for the device in the given context.
287        """
[300a2f7]288        # Note: PyOpenCL caches based on md5 hash of source, options and device
289        # so we don't really need to cache things for ourselves.  I'll do so
290        # anyway just to save some data munging time.
[821a9c6]291        key = "%s-%s%s"%(name, dtype, ("-fast" if fast else ""))
[300a2f7]292        # Check timestamp on program
293        program, program_timestamp = self.compiled.get(key, (None, np.inf))
294        if program_timestamp < timestamp:
295            del self.compiled[key]
[cde11f0f]296        if key not in self.compiled:
[821a9c6]297            context = self.get_context(dtype)
[20317b3]298            logging.info("building %s for OpenCL %s", key,
299                         context.devices[0].name.strip())
[fec69dd]300            program = compile_model(self.get_context(dtype),
301                                    str(source), dtype, fast)
[300a2f7]302            self.compiled[key] = (program, timestamp)
303        return program
[14de349]304
[3c56da87]305def _get_default_context():
[20317b3]306    # type: () -> List[cl.Context]
[eafc9fa]307    """
[d18582e]308    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
309    drivers over AMD drivers.
[eafc9fa]310    """
[d18582e]311    # Note: on mobile devices there is automatic clock scaling if either the
312    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
313    # it did, it would mean that putting a busy loop on the CPU while the GPU
314    # is running may increase throughput.
315    #
316    # Macbook pro, base install:
317    #     {'Apple': [Intel CPU, NVIDIA GPU]}
318    # Macbook pro, base install:
319    #     {'Apple': [Intel CPU, Intel GPU]}
320    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
321    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
322    gpu, cpu = None, None
[3c56da87]323    for platform in cl.get_platforms():
[e6a5556]324        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
[20317b3]325        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
326        # them over the integrated graphics driver that may have been supplied
327        # with the CPU chipset.
328        preferred_cpu = (platform.vendor.startswith('Intel')
329                         or platform.vendor.startswith('Apple'))
330        preferred_gpu = (platform.vendor.startswith('Advanced')
331                         or platform.vendor.startswith('NVIDIA'))
[3c56da87]332        for device in platform.get_devices():
333            if device.type == cl.device_type.GPU:
[20317b3]334                # If the existing type is not GPU then it will be CUSTOM
335                # or ACCELERATOR so don't override it.
[e6a5556]336                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
337                    gpu = device
338            elif device.type == cl.device_type.CPU:
339                if cpu is None or preferred_cpu:
340                    cpu = device
[d18582e]341            else:
[e6a5556]342                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
343                # Intel Phi for example registers as an accelerator
[20317b3]344                # Since the user installed a custom device on their system
345                # and went through the pain of sorting out OpenCL drivers for
346                # it, lets assume they really do want to use it as their
347                # primary compute device.
[e6a5556]348                gpu = device
[199d40d]349
[20317b3]350    # order the devices by gpu then by cpu; when searching for an available
351    # device by data type they will be checked in this order, which means
352    # that if the gpu supports double then the cpu will never be used (though
353    # we may make it possible to explicitly request the cpu at some point).
[e6a5556]354    devices = []
355    if gpu is not None:
356        devices.append(gpu)
357    if cpu is not None:
358        devices.append(cpu)
359    return [cl.Context([d]) for d in devices]
[3c56da87]360
[250fa25]361
[f619de7]362class GpuModel(KernelModel):
[14de349]363    """
364    GPU wrapper for a single model.
365
[17bbadd]366    *source* and *model_info* are the model source and interface as returned
367    from :func:`generate.make_source` and :func:`generate.make_model_info`.
[14de349]368
369    *dtype* is the desired model precision.  Any numpy dtype for single
370    or double precision floats will do, such as 'f', 'float32' or 'single'
371    for single and 'd', 'float64' or 'double' for double.  Double precision
372    is an optional extension which may not be available on all devices.
[cde11f0f]373    Half precision ('float16','half') may be available on some devices.
374    Fast precision ('fast') is a loose version of single precision, indicating
375    that the compiler is allowed to take shortcuts.
[14de349]376    """
[dd7fc12]377    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
[a4280bd]378        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
[17bbadd]379        self.info = model_info
[ce27e21]380        self.source = source
[dd7fc12]381        self.dtype = dtype
382        self.fast = fast
[ce27e21]383        self.program = None # delay program creation
[20317b3]384        self._kernels = None
[14de349]385
[ce27e21]386    def __getstate__(self):
[dd7fc12]387        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
[eafc9fa]388        return self.info, self.source, self.dtype, self.fast
[14de349]389
[ce27e21]390    def __setstate__(self, state):
[dd7fc12]391        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
[eafc9fa]392        self.info, self.source, self.dtype, self.fast = state
393        self.program = None
[ce27e21]394
[9eb3632]395    def make_kernel(self, q_vectors):
[dd7fc12]396        # type: (List[np.ndarray]) -> "GpuKernel"
[ce27e21]397        if self.program is None:
[a4280bd]398            compile_program = environment().compile_program
[300a2f7]399            timestamp = generate.timestamp(self.info)
[a4280bd]400            self.program = compile_program(
401                self.info.name,
402                self.source['opencl'],
403                self.dtype,
[300a2f7]404                self.fast,
405                timestamp)
[a4280bd]406            variants = ['Iq', 'Iqxy', 'Imagnetic']
407            names = [generate.kernel_name(self.info, k) for k in variants]
408            kernels = [getattr(self.program, k) for k in names]
[20317b3]409            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
[eafc9fa]410        is_2d = len(q_vectors) == 2
[a4280bd]411        if is_2d:
412            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
413        else:
414            kernel = [self._kernels['Iq']]*2
[f2f67a6]415        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
[ce27e21]416
417    def release(self):
[dd7fc12]418        # type: () -> None
[eafc9fa]419        """
420        Free the resources associated with the model.
421        """
[ce27e21]422        if self.program is not None:
423            self.program = None
[14de349]424
[eafc9fa]425    def __del__(self):
[dd7fc12]426        # type: () -> None
[eafc9fa]427        self.release()
[14de349]428
429# TODO: check that we don't need a destructor for buffers which go out of scope
430class GpuInput(object):
431    """
432    Make q data available to the gpu.
433
434    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
435    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
436    to get the best performance on OpenCL, which may involve shifting and
437    stretching the array to better match the memory architecture.  Additional
438    points will be evaluated with *q=1e-3*.
439
440    *dtype* is the data type for the q vectors. The data type should be
441    set to match that of the kernel, which is an attribute of
442    :class:`GpuProgram`.  Note that not all kernels support double
443    precision, so even if the program was created for double precision,
444    the *GpuProgram.dtype* may be single precision.
445
446    Call :meth:`release` when complete.  Even if not called directly, the
447    buffer will be released when the data object is freed.
448    """
[cb6ecf4]449    def __init__(self, q_vectors, dtype=generate.F32):
[dd7fc12]450        # type: (List[np.ndarray], np.dtype) -> None
[17bbadd]451        # TODO: do we ever need double precision q?
[14de349]452        env = environment()
453        self.nq = q_vectors[0].size
454        self.dtype = np.dtype(dtype)
[eafc9fa]455        self.is_2d = (len(q_vectors) == 2)
[f5b9a6b]456        # TODO: stretch input based on get_warp()
457        # not doing it now since warp depends on kernel, which is not known
458        # at this point, so instead using 32, which is good on the set of
459        # architectures tested so far.
[c072f83]460        if self.is_2d:
461            # Note: 17 rather than 15 because results is 2 elements
462            # longer than input.
463            width = ((self.nq+17)//16)*16
464            self.q = np.empty((width, 2), dtype=dtype)
465            self.q[:self.nq, 0] = q_vectors[0]
466            self.q[:self.nq, 1] = q_vectors[1]
467        else:
468            # Note: 33 rather than 31 because results is 2 elements
469            # longer than input.
470            width = ((self.nq+33)//32)*32
471            self.q = np.empty(width, dtype=dtype)
472            self.q[:self.nq] = q_vectors[0]
473        self.global_size = [self.q.shape[0]]
[d18582e]474        context = env.get_context(self.dtype)
[c094758]475        #print("creating inputs of size", self.global_size)
[c072f83]476        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
477                             hostbuf=self.q)
[14de349]478
479    def release(self):
[dd7fc12]480        # type: () -> None
[eafc9fa]481        """
482        Free the memory.
483        """
[f2f67a6]484        if self.q_b is not None:
485            self.q_b.release()
486            self.q_b = None
[14de349]487
[eafc9fa]488    def __del__(self):
[dd7fc12]489        # type: () -> None
[eafc9fa]490        self.release()
491
[f619de7]492class GpuKernel(Kernel):
[ff7119b]493    """
494    Callable SAS kernel.
495
[eafc9fa]496    *kernel* is the GpuKernel object to call
[ff7119b]497
[17bbadd]498    *model_info* is the module information
[ff7119b]499
[eafc9fa]500    *q_vectors* is the q vectors at which the kernel should be evaluated
501
502    *dtype* is the kernel precision
[ff7119b]503
504    The resulting call method takes the *pars*, a list of values for
505    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
506    vectors for the polydisperse parameters.  *cutoff* determines the
507    integration limits: any points with combined weight less than *cutoff*
508    will not be calculated.
509
510    Call :meth:`release` when done with the kernel instance.
511    """
[f2f67a6]512    def __init__(self, kernel, dtype, model_info, q_vectors):
513        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
514        q_input = GpuInput(q_vectors, dtype)
[14de349]515        self.kernel = kernel
[17bbadd]516        self.info = model_info
[f2f67a6]517        self.dtype = dtype
[a5b8477]518        self.dim = '2d' if q_input.is_2d else '1d'
[c072f83]519        # plus three for the normalization values
[f2f67a6]520        self.result = np.empty(q_input.nq+3, dtype)
[14de349]521
522        # Inputs and outputs for each kernel call
[ce27e21]523        # Note: res may be shorter than res_b if global_size != nq
524        env = environment()
[f2f67a6]525        self.queue = env.get_queue(dtype)
[c072f83]526
527        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
[20317b3]528                                  q_input.global_size[0] * dtype.itemsize)
[c072f83]529        self.q_input = q_input # allocated by GpuInput above
[14de349]530
[20317b3]531        self._need_release = [self.result_b, self.q_input]
[f2f67a6]532        self.real = (np.float32 if dtype == generate.F32
533                     else np.float64 if dtype == generate.F64
534                     else np.float16 if dtype == generate.F16
[8d62008]535                     else np.float32)  # will never get here, so use np.float32
[d18582e]536
[32e3c9b]537    def __call__(self, call_details, values, cutoff, magnetic):
538        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
[48fbd50]539        context = self.queue.context
[8d62008]540        # Arrange data transfer to card
[48fbd50]541        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
[8d62008]542                              hostbuf=call_details.buffer)
[48fbd50]543        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
544                             hostbuf=values)
545
[9eb3632]546        kernel = self.kernel[1 if magnetic else 0]
547        args = [
548            np.uint32(self.q_input.nq), None, None,
549            details_b, values_b, self.q_input.q_b, self.result_b,
550            self.real(cutoff),
551        ]
552        #print("Calling OpenCL")
[bde38b5]553        #call_details.show(values)
[ae2b6b5]554        # Call kernel and retrieve results
[9eb3632]555        last_call = None
[ae2b6b5]556        step = 100
[bde38b5]557        for start in range(0, call_details.num_eval, step):
558            stop = min(start + step, call_details.num_eval)
[9eb3632]559            #print("queuing",start,stop)
560            args[1:3] = [np.int32(start), np.int32(stop)]
561            last_call = [kernel(self.queue, self.q_input.global_size,
562                                None, *args, wait_for=last_call)]
[c072f83]563        cl.enqueue_copy(self.queue, self.result, self.result_b)
[bde38b5]564        #print("result", self.result)
[ae2b6b5]565
566        # Free buffers
[a738209]567        for v in (details_b, values_b):
[f2f67a6]568            if v is not None: v.release()
[14de349]569
[9eb3632]570        scale = values[0]/self.result[self.q_input.nq]
571        background = values[1]
[bde38b5]572        #print("scale",scale,values[0],self.result[self.q_input.nq])
[9eb3632]573        return scale*self.result[:self.q_input.nq] + background
574        # return self.result[:self.q_input.nq]
[14de349]575
576    def release(self):
[dd7fc12]577        # type: () -> None
[eafc9fa]578        """
579        Release resources associated with the kernel.
580        """
[d18582e]581        for v in self._need_release:
582            v.release()
583        self._need_release = []
[14de349]584
585    def __del__(self):
[dd7fc12]586        # type: () -> None
[14de349]587        self.release()
Note: See TracBrowser for help on using the repository browser.