source: sasmodels/sasmodels/kernelcl.py @ ef07e95

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since ef07e95 was 2d81cfe, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

lint

  • Property mode set to 100644
File size: 23.4 KB
RevLine 
[14de349]1"""
[eafc9fa]2GPU driver for C kernels
[14de349]3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
[92da231]24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
[880a2ed]39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
[92da231]41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
[14de349]50"""
[ba32cdd]51from __future__ import print_function
[a5b8477]52
[250fa25]53import os
54import warnings
[821a9c6]55import logging
[6e5b2a7]56import time
[250fa25]57
[7ae2b7f]58import numpy as np  # type: ignore
[b3f6bc3]59
[250fa25]60try:
[f2f67a6]61    #raise NotImplementedError("OpenCL not yet implemented for new kernel template")
[7ae2b7f]62    import pyopencl as cl  # type: ignore
[3c56da87]63    # Ask OpenCL for the default context so that we know that one exists
64    cl.create_some_context(interactive=False)
[9404dd3]65except Exception as exc:
[40a87fa]66    warnings.warn("OpenCL startup failed with ***"
67                  + str(exc) + "***; using C compiler instead")
[664c8e7]68    raise RuntimeError("OpenCL not available")
[7841376]69
[14de349]70from pyopencl import mem_flags as mf
[5d316e9]71from pyopencl.characterize import get_fast_inaccurate_build_options
[14de349]72
[cb6ecf4]73from . import generate
[f619de7]74from .kernel import KernelModel, Kernel
[14de349]75
[2d81cfe]76# pylint: disable=unused-import
[a5b8477]77try:
78    from typing import Tuple, Callable, Any
79    from .modelinfo import ModelInfo
80    from .details import CallDetails
81except ImportError:
82    pass
[2d81cfe]83# pylint: enable=unused-import
[a5b8477]84
[20317b3]85# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
86def quote_path(v):
87    """
88    Quote the path if it is not already quoted.
89
90    If v starts with '-', then assume that it is a -I option or similar
91    and do not quote it.  This is fragile:  -Ipath with space needs to
92    be quoted.
93    """
94    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
95
96def fix_pyopencl_include():
[40a87fa]97    """
98    Monkey patch pyopencl to allow spaces in include file path.
99    """
[20317b3]100    import pyopencl as cl
101    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
102        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
103
104fix_pyopencl_include()
105
106
[ce27e21]107# The max loops number is limited by the amount of local memory available
108# on the device.  You don't want to make this value too big because it will
109# waste resources, nor too small because it may interfere with users trying
110# to do their polydispersity calculations.  A value of 1024 should be much
111# larger than necessary given that cost grows as npts^k where k is the number
112# of polydisperse parameters.
[5d4777d]113MAX_LOOPS = 2048
114
[ce27e21]115
[5464d68]116# Pragmas for enable OpenCL features.  Be sure to protect them so that they
117# still compile even if OpenCL is not present.
118_F16_PRAGMA = """\
119#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
120#  pragma OPENCL EXTENSION cl_khr_fp16: enable
121#endif
122"""
123
124_F64_PRAGMA = """\
125#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
126#  pragma OPENCL EXTENSION cl_khr_fp64: enable
127#endif
128"""
129
130
[14de349]131ENV = None
132def environment():
[dd7fc12]133    # type: () -> "GpuEnvironment"
[14de349]134    """
135    Returns a singleton :class:`GpuEnvironment`.
136
137    This provides an OpenCL context and one queue per device.
138    """
139    global ENV
140    if ENV is None:
141        ENV = GpuEnvironment()
142    return ENV
143
[5d316e9]144def has_type(device, dtype):
[dd7fc12]145    # type: (cl.Device, np.dtype) -> bool
[14de349]146    """
[5d316e9]147    Return true if device supports the requested precision.
[14de349]148    """
[5d316e9]149    if dtype == generate.F32:
150        return True
151    elif dtype == generate.F64:
152        return "cl_khr_fp64" in device.extensions
153    elif dtype == generate.F16:
154        return "cl_khr_fp16" in device.extensions
155    else:
156        return False
[14de349]157
[f5b9a6b]158def get_warp(kernel, queue):
[dd7fc12]159    # type: (cl.Kernel, cl.CommandQueue) -> int
[f5b9a6b]160    """
161    Return the size of an execution batch for *kernel* running on *queue*.
162    """
[750ffa5]163    return kernel.get_work_group_info(
[63b32bb]164        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
165        queue.device)
[14de349]166
[f5b9a6b]167def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
[dd7fc12]168    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
[14de349]169    """
170    Stretch an input vector to the correct boundary.
171
172    Performance on the kernels can drop by a factor of two or more if the
173    number of values to compute does not fall on a nice power of two
[f5b9a6b]174    boundary.   The trailing additional vector elements are given a
175    value of *extra*, and so f(*extra*) will be computed for each of
176    them.  The returned array will thus be a subset of the computed array.
177
178    *boundary* should be a power of 2 which is at least 32 for good
179    performance on current platforms (as of Jan 2015).  It should
180    probably be the max of get_warp(kernel,queue) and
181    device.min_data_type_align_size//4.
182    """
[c85db69]183    remainder = vector.size % boundary
[f5b9a6b]184    if remainder != 0:
185        size = vector.size + (boundary - remainder)
[c85db69]186        vector = np.hstack((vector, [extra] * (size - vector.size)))
[14de349]187    return np.ascontiguousarray(vector, dtype=dtype)
188
189
[5d316e9]190def compile_model(context, source, dtype, fast=False):
[dd7fc12]191    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
[14de349]192    """
193    Build a model to run on the gpu.
194
195    Returns the compiled program and its type.  The returned type will
196    be float32 even if the desired type is float64 if any of the
197    devices in the context do not support the cl_khr_fp64 extension.
198    """
199    dtype = np.dtype(dtype)
[5d316e9]200    if not all(has_type(d, dtype) for d in context.devices):
201        raise RuntimeError("%s not supported for devices"%dtype)
[14de349]202
[5464d68]203    source_list = [generate.convert_type(source, dtype)]
204
205    if dtype == generate.F16:
206        source_list.insert(0, _F16_PRAGMA)
207    elif dtype == generate.F64:
208        source_list.insert(0, _F64_PRAGMA)
209
[14de349]210    # Note: USE_SINCOS makes the intel cpu slower under opencl
211    if context.devices[0].type == cl.device_type.GPU:
[5464d68]212        source_list.insert(0, "#define USE_SINCOS\n")
[5d316e9]213    options = (get_fast_inaccurate_build_options(context.devices[0])
214               if fast else [])
[ba32cdd]215    source = "\n".join(source_list)
[5d316e9]216    program = cl.Program(context, source).build(options=options)
[821a9c6]217    #print("done with "+program)
[ce27e21]218    return program
[14de349]219
220
221# for now, this returns one device in the context
222# TODO: create a context that contains all devices on all platforms
223class GpuEnvironment(object):
224    """
225    GPU context, with possibly many devices, and one queue per device.
226    """
227    def __init__(self):
[dd7fc12]228        # type: () -> None
[250fa25]229        # find gpu context
230        #self.context = cl.create_some_context()
231
232        self.context = None
[880a2ed]233        if 'SAS_OPENCL' in os.environ:
234            #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
235            os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
[a557a99]236        if 'PYOPENCL_CTX' in os.environ:
[250fa25]237            self._create_some_context()
238
239        if not self.context:
[3c56da87]240            self.context = _get_default_context()
[250fa25]241
[f5b9a6b]242        # Byte boundary for data alignment
243        #self.data_boundary = max(d.min_data_type_align_size
244        #                         for d in self.context.devices)
[d18582e]245        self.queues = [cl.CommandQueue(context, context.devices[0])
246                       for context in self.context]
[ce27e21]247        self.compiled = {}
248
[5d316e9]249    def has_type(self, dtype):
[dd7fc12]250        # type: (np.dtype) -> bool
[eafc9fa]251        """
252        Return True if all devices support a given type.
253        """
[d18582e]254        return any(has_type(d, dtype)
255                   for context in self.context
256                   for d in context.devices)
257
258    def get_queue(self, dtype):
[dd7fc12]259        # type: (np.dtype) -> cl.CommandQueue
[d18582e]260        """
261        Return a command queue for the kernels of type dtype.
262        """
263        for context, queue in zip(self.context, self.queues):
264            if all(has_type(d, dtype) for d in context.devices):
265                return queue
266
267    def get_context(self, dtype):
[dd7fc12]268        # type: (np.dtype) -> cl.Context
[d18582e]269        """
270        Return a OpenCL context for the kernels of type dtype.
271        """
[20317b3]272        for context in self.context:
[d18582e]273            if all(has_type(d, dtype) for d in context.devices):
274                return context
[5d316e9]275
[250fa25]276    def _create_some_context(self):
[dd7fc12]277        # type: () -> cl.Context
[eafc9fa]278        """
279        Protected call to cl.create_some_context without interactivity.  Use
[880a2ed]280        this if SAS_OPENCL is set in the environment.  Sets the *context*
[eafc9fa]281        attribute.
282        """
[250fa25]283        try:
[d18582e]284            self.context = [cl.create_some_context(interactive=False)]
[9404dd3]285        except Exception as exc:
[250fa25]286            warnings.warn(str(exc))
287            warnings.warn("pyopencl.create_some_context() failed")
[880a2ed]288            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
[250fa25]289
[300a2f7]290    def compile_program(self, name, source, dtype, fast, timestamp):
291        # type: (str, str, np.dtype, bool, float) -> cl.Program
[eafc9fa]292        """
293        Compile the program for the device in the given context.
294        """
[300a2f7]295        # Note: PyOpenCL caches based on md5 hash of source, options and device
296        # so we don't really need to cache things for ourselves.  I'll do so
297        # anyway just to save some data munging time.
[7fcdc9f]298        tag = generate.tag_source(source)
299        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
[300a2f7]300        # Check timestamp on program
301        program, program_timestamp = self.compiled.get(key, (None, np.inf))
302        if program_timestamp < timestamp:
303            del self.compiled[key]
[cde11f0f]304        if key not in self.compiled:
[821a9c6]305            context = self.get_context(dtype)
[20317b3]306            logging.info("building %s for OpenCL %s", key,
307                         context.devices[0].name.strip())
[fec69dd]308            program = compile_model(self.get_context(dtype),
309                                    str(source), dtype, fast)
[300a2f7]310            self.compiled[key] = (program, timestamp)
311        return program
[14de349]312
[3c56da87]313def _get_default_context():
[20317b3]314    # type: () -> List[cl.Context]
[eafc9fa]315    """
[d18582e]316    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
317    drivers over AMD drivers.
[eafc9fa]318    """
[d18582e]319    # Note: on mobile devices there is automatic clock scaling if either the
320    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
321    # it did, it would mean that putting a busy loop on the CPU while the GPU
322    # is running may increase throughput.
323    #
324    # Macbook pro, base install:
325    #     {'Apple': [Intel CPU, NVIDIA GPU]}
326    # Macbook pro, base install:
327    #     {'Apple': [Intel CPU, Intel GPU]}
328    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
329    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
330    gpu, cpu = None, None
[3c56da87]331    for platform in cl.get_platforms():
[e6a5556]332        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
[20317b3]333        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
334        # them over the integrated graphics driver that may have been supplied
335        # with the CPU chipset.
336        preferred_cpu = (platform.vendor.startswith('Intel')
337                         or platform.vendor.startswith('Apple'))
338        preferred_gpu = (platform.vendor.startswith('Advanced')
339                         or platform.vendor.startswith('NVIDIA'))
[3c56da87]340        for device in platform.get_devices():
341            if device.type == cl.device_type.GPU:
[20317b3]342                # If the existing type is not GPU then it will be CUSTOM
343                # or ACCELERATOR so don't override it.
[e6a5556]344                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
345                    gpu = device
346            elif device.type == cl.device_type.CPU:
347                if cpu is None or preferred_cpu:
348                    cpu = device
[d18582e]349            else:
[e6a5556]350                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
351                # Intel Phi for example registers as an accelerator
[20317b3]352                # Since the user installed a custom device on their system
353                # and went through the pain of sorting out OpenCL drivers for
354                # it, lets assume they really do want to use it as their
355                # primary compute device.
[e6a5556]356                gpu = device
[199d40d]357
[20317b3]358    # order the devices by gpu then by cpu; when searching for an available
359    # device by data type they will be checked in this order, which means
360    # that if the gpu supports double then the cpu will never be used (though
361    # we may make it possible to explicitly request the cpu at some point).
[e6a5556]362    devices = []
363    if gpu is not None:
364        devices.append(gpu)
365    if cpu is not None:
366        devices.append(cpu)
367    return [cl.Context([d]) for d in devices]
[3c56da87]368
[250fa25]369
[f619de7]370class GpuModel(KernelModel):
[14de349]371    """
372    GPU wrapper for a single model.
373
[17bbadd]374    *source* and *model_info* are the model source and interface as returned
375    from :func:`generate.make_source` and :func:`generate.make_model_info`.
[14de349]376
377    *dtype* is the desired model precision.  Any numpy dtype for single
378    or double precision floats will do, such as 'f', 'float32' or 'single'
379    for single and 'd', 'float64' or 'double' for double.  Double precision
380    is an optional extension which may not be available on all devices.
[cde11f0f]381    Half precision ('float16','half') may be available on some devices.
382    Fast precision ('fast') is a loose version of single precision, indicating
383    that the compiler is allowed to take shortcuts.
[14de349]384    """
[dd7fc12]385    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
[a4280bd]386        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
[17bbadd]387        self.info = model_info
[ce27e21]388        self.source = source
[dd7fc12]389        self.dtype = dtype
390        self.fast = fast
[ce27e21]391        self.program = None # delay program creation
[20317b3]392        self._kernels = None
[14de349]393
[ce27e21]394    def __getstate__(self):
[dd7fc12]395        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
[eafc9fa]396        return self.info, self.source, self.dtype, self.fast
[14de349]397
[ce27e21]398    def __setstate__(self, state):
[dd7fc12]399        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
[eafc9fa]400        self.info, self.source, self.dtype, self.fast = state
401        self.program = None
[ce27e21]402
[9eb3632]403    def make_kernel(self, q_vectors):
[dd7fc12]404        # type: (List[np.ndarray]) -> "GpuKernel"
[ce27e21]405        if self.program is None:
[a4280bd]406            compile_program = environment().compile_program
[0dc34c3]407            timestamp = generate.ocl_timestamp(self.info)
[a4280bd]408            self.program = compile_program(
409                self.info.name,
410                self.source['opencl'],
411                self.dtype,
[300a2f7]412                self.fast,
413                timestamp)
[a4280bd]414            variants = ['Iq', 'Iqxy', 'Imagnetic']
415            names = [generate.kernel_name(self.info, k) for k in variants]
416            kernels = [getattr(self.program, k) for k in names]
[20317b3]417            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
[eafc9fa]418        is_2d = len(q_vectors) == 2
[a4280bd]419        if is_2d:
420            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
421        else:
422            kernel = [self._kernels['Iq']]*2
[f2f67a6]423        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
[ce27e21]424
425    def release(self):
[dd7fc12]426        # type: () -> None
[eafc9fa]427        """
428        Free the resources associated with the model.
429        """
[ce27e21]430        if self.program is not None:
431            self.program = None
[14de349]432
[eafc9fa]433    def __del__(self):
[dd7fc12]434        # type: () -> None
[eafc9fa]435        self.release()
[14de349]436
437# TODO: check that we don't need a destructor for buffers which go out of scope
438class GpuInput(object):
439    """
440    Make q data available to the gpu.
441
442    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
443    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
444    to get the best performance on OpenCL, which may involve shifting and
445    stretching the array to better match the memory architecture.  Additional
446    points will be evaluated with *q=1e-3*.
447
448    *dtype* is the data type for the q vectors. The data type should be
449    set to match that of the kernel, which is an attribute of
450    :class:`GpuProgram`.  Note that not all kernels support double
451    precision, so even if the program was created for double precision,
452    the *GpuProgram.dtype* may be single precision.
453
454    Call :meth:`release` when complete.  Even if not called directly, the
455    buffer will be released when the data object is freed.
456    """
[cb6ecf4]457    def __init__(self, q_vectors, dtype=generate.F32):
[dd7fc12]458        # type: (List[np.ndarray], np.dtype) -> None
[17bbadd]459        # TODO: do we ever need double precision q?
[14de349]460        env = environment()
461        self.nq = q_vectors[0].size
462        self.dtype = np.dtype(dtype)
[eafc9fa]463        self.is_2d = (len(q_vectors) == 2)
[f5b9a6b]464        # TODO: stretch input based on get_warp()
465        # not doing it now since warp depends on kernel, which is not known
466        # at this point, so instead using 32, which is good on the set of
467        # architectures tested so far.
[c072f83]468        if self.is_2d:
[b8ddf2e]469            # Note: 16 rather than 15 because result is 1 longer than input.
470            width = ((self.nq+16)//16)*16
[c072f83]471            self.q = np.empty((width, 2), dtype=dtype)
472            self.q[:self.nq, 0] = q_vectors[0]
473            self.q[:self.nq, 1] = q_vectors[1]
474        else:
[b8ddf2e]475            # Note: 32 rather than 31 because result is 1 longer than input.
476            width = ((self.nq+32)//32)*32
[c072f83]477            self.q = np.empty(width, dtype=dtype)
478            self.q[:self.nq] = q_vectors[0]
479        self.global_size = [self.q.shape[0]]
[d18582e]480        context = env.get_context(self.dtype)
[c094758]481        #print("creating inputs of size", self.global_size)
[c072f83]482        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
483                             hostbuf=self.q)
[14de349]484
485    def release(self):
[dd7fc12]486        # type: () -> None
[eafc9fa]487        """
488        Free the memory.
489        """
[f2f67a6]490        if self.q_b is not None:
491            self.q_b.release()
492            self.q_b = None
[14de349]493
[eafc9fa]494    def __del__(self):
[dd7fc12]495        # type: () -> None
[eafc9fa]496        self.release()
497
[f619de7]498class GpuKernel(Kernel):
[ff7119b]499    """
500    Callable SAS kernel.
501
[eafc9fa]502    *kernel* is the GpuKernel object to call
[ff7119b]503
[17bbadd]504    *model_info* is the module information
[ff7119b]505
[eafc9fa]506    *q_vectors* is the q vectors at which the kernel should be evaluated
507
508    *dtype* is the kernel precision
[ff7119b]509
510    The resulting call method takes the *pars*, a list of values for
511    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
512    vectors for the polydisperse parameters.  *cutoff* determines the
513    integration limits: any points with combined weight less than *cutoff*
514    will not be calculated.
515
516    Call :meth:`release` when done with the kernel instance.
517    """
[f2f67a6]518    def __init__(self, kernel, dtype, model_info, q_vectors):
519        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
520        q_input = GpuInput(q_vectors, dtype)
[14de349]521        self.kernel = kernel
[17bbadd]522        self.info = model_info
[f2f67a6]523        self.dtype = dtype
[a5b8477]524        self.dim = '2d' if q_input.is_2d else '1d'
[c072f83]525        # plus three for the normalization values
[b8ddf2e]526        self.result = np.empty(q_input.nq+1, dtype)
[14de349]527
528        # Inputs and outputs for each kernel call
[ce27e21]529        # Note: res may be shorter than res_b if global_size != nq
530        env = environment()
[f2f67a6]531        self.queue = env.get_queue(dtype)
[c072f83]532
533        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
[b8ddf2e]534                                  q_input.global_size[0] * dtype.itemsize)
[c072f83]535        self.q_input = q_input # allocated by GpuInput above
[14de349]536
[20317b3]537        self._need_release = [self.result_b, self.q_input]
[f2f67a6]538        self.real = (np.float32 if dtype == generate.F32
539                     else np.float64 if dtype == generate.F64
540                     else np.float16 if dtype == generate.F16
[8d62008]541                     else np.float32)  # will never get here, so use np.float32
[d18582e]542
[32e3c9b]543    def __call__(self, call_details, values, cutoff, magnetic):
544        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
[48fbd50]545        context = self.queue.context
[8d62008]546        # Arrange data transfer to card
[48fbd50]547        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
[8d62008]548                              hostbuf=call_details.buffer)
[48fbd50]549        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
550                             hostbuf=values)
551
[9eb3632]552        kernel = self.kernel[1 if magnetic else 0]
553        args = [
554            np.uint32(self.q_input.nq), None, None,
555            details_b, values_b, self.q_input.q_b, self.result_b,
556            self.real(cutoff),
557        ]
558        #print("Calling OpenCL")
[bde38b5]559        #call_details.show(values)
[ae2b6b5]560        # Call kernel and retrieve results
[6e5b2a7]561        wait_for = None
562        last_nap = time.clock()
563        step = 1000000//self.q_input.nq + 1
[bde38b5]564        for start in range(0, call_details.num_eval, step):
565            stop = min(start + step, call_details.num_eval)
[9eb3632]566            #print("queuing",start,stop)
567            args[1:3] = [np.int32(start), np.int32(stop)]
[6e5b2a7]568            wait_for = [kernel(self.queue, self.q_input.global_size, None,
569                               *args, wait_for=wait_for)]
570            if stop < call_details.num_eval:
571                # Allow other processes to run
572                wait_for[0].wait()
573                current_time = time.clock()
574                if current_time - last_nap > 0.5:
575                    time.sleep(0.05)
576                    last_nap = current_time
[c072f83]577        cl.enqueue_copy(self.queue, self.result, self.result_b)
[bde38b5]578        #print("result", self.result)
[ae2b6b5]579
580        # Free buffers
[a738209]581        for v in (details_b, values_b):
[c1114bf]582            if v is not None:
583                v.release()
[14de349]584
[14a15a3]585        pd_norm = self.result[self.q_input.nq]
[c1114bf]586        scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0)
[9eb3632]587        background = values[1]
[14a15a3]588        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
[9eb3632]589        return scale*self.result[:self.q_input.nq] + background
590        # return self.result[:self.q_input.nq]
[14de349]591
592    def release(self):
[dd7fc12]593        # type: () -> None
[eafc9fa]594        """
595        Release resources associated with the kernel.
596        """
[d18582e]597        for v in self._need_release:
598            v.release()
599        self._need_release = []
[14de349]600
601    def __del__(self):
[dd7fc12]602        # type: () -> None
[14de349]603        self.release()
Note: See TracBrowser for help on using the repository browser.