source: sasmodels/sasmodels/kernelcl.py @ 95f62aa

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 95f62aa was 95f62aa, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

store OpenCL objects in environment so they get recreated when the environment is reset

  • Property mode set to 100644
File size: 25.7 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
50"""
51from __future__ import print_function
52
53import os
54import warnings
55import logging
56import time
57
58import numpy as np  # type: ignore
59
60
61# Attempt to setup opencl. This may fail if the opencl package is not
62# installed or if it is installed but there are no devices available.
63try:
64    import pyopencl as cl  # type: ignore
65    from pyopencl import mem_flags as mf
66    from pyopencl.characterize import get_fast_inaccurate_build_options
67    # Ask OpenCL for the default context so that we know that one exists
68    cl.create_some_context(interactive=False)
69    HAVE_OPENCL = True
70    OPENCL_ERROR = ""
71except Exception as exc:
72    HAVE_OPENCL = False
73    OPENCL_ERROR = str(exc)
74
75from . import generate
76from .generate import F32, F64
77from .kernel import KernelModel, Kernel
78
79# pylint: disable=unused-import
80try:
81    from typing import Tuple, Callable, Any
82    from .modelinfo import ModelInfo
83    from .details import CallDetails
84except ImportError:
85    pass
86# pylint: enable=unused-import
87
88# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
89def quote_path(v):
90    """
91    Quote the path if it is not already quoted.
92
93    If v starts with '-', then assume that it is a -I option or similar
94    and do not quote it.  This is fragile:  -Ipath with space needs to
95    be quoted.
96    """
97    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
98
99def fix_pyopencl_include():
100    """
101    Monkey patch pyopencl to allow spaces in include file path.
102    """
103    import pyopencl as cl
104    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
105        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
106
107if HAVE_OPENCL:
108    fix_pyopencl_include()
109
110# The max loops number is limited by the amount of local memory available
111# on the device.  You don't want to make this value too big because it will
112# waste resources, nor too small because it may interfere with users trying
113# to do their polydispersity calculations.  A value of 1024 should be much
114# larger than necessary given that cost grows as npts^k where k is the number
115# of polydisperse parameters.
116MAX_LOOPS = 2048
117
118
119# Pragmas for enable OpenCL features.  Be sure to protect them so that they
120# still compile even if OpenCL is not present.
121_F16_PRAGMA = """\
122#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
123#  pragma OPENCL EXTENSION cl_khr_fp16: enable
124#endif
125"""
126
127_F64_PRAGMA = """\
128#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
129#  pragma OPENCL EXTENSION cl_khr_fp64: enable
130#endif
131"""
132
133def use_opencl():
134    return HAVE_OPENCL and os.environ.get("SAS_OPENCL", "").lower() != "none"
135
136ENV = None
137def reset_environment():
138    """
139    Call to create a new OpenCL context, such as after a change to SAS_OPENCL.
140    """
141    global ENV
142    ENV = GpuEnvironment() if use_opencl() else None
143
144def environment():
145    # type: () -> "GpuEnvironment"
146    """
147    Returns a singleton :class:`GpuEnvironment`.
148
149    This provides an OpenCL context and one queue per device.
150    """
151    if ENV is None:
152        if not HAVE_OPENCL:
153            raise RuntimeError("OpenCL startup failed with ***"
154                               + OPENCL_ERROR + "***; using C compiler instead")
155        reset_environment()
156        if ENV is None:
157            raise RuntimeError("SAS_OPENCL=None in environment")
158    return ENV
159
160def has_type(device, dtype):
161    # type: (cl.Device, np.dtype) -> bool
162    """
163    Return true if device supports the requested precision.
164    """
165    if dtype == F32:
166        return True
167    elif dtype == generate.F64:
168        return "cl_khr_fp64" in device.extensions
169    elif dtype == generate.F16:
170        return "cl_khr_fp16" in device.extensions
171    else:
172        return False
173
174def get_warp(kernel, queue):
175    # type: (cl.Kernel, cl.CommandQueue) -> int
176    """
177    Return the size of an execution batch for *kernel* running on *queue*.
178    """
179    return kernel.get_work_group_info(
180        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
181        queue.device)
182
183def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
184    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
185    """
186    Stretch an input vector to the correct boundary.
187
188    Performance on the kernels can drop by a factor of two or more if the
189    number of values to compute does not fall on a nice power of two
190    boundary.   The trailing additional vector elements are given a
191    value of *extra*, and so f(*extra*) will be computed for each of
192    them.  The returned array will thus be a subset of the computed array.
193
194    *boundary* should be a power of 2 which is at least 32 for good
195    performance on current platforms (as of Jan 2015).  It should
196    probably be the max of get_warp(kernel,queue) and
197    device.min_data_type_align_size//4.
198    """
199    remainder = vector.size % boundary
200    if remainder != 0:
201        size = vector.size + (boundary - remainder)
202        vector = np.hstack((vector, [extra] * (size - vector.size)))
203    return np.ascontiguousarray(vector, dtype=dtype)
204
205
206def compile_model(context, source, dtype, fast=False):
207    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
208    """
209    Build a model to run on the gpu.
210
211    Returns the compiled program and its type.
212
213    Raises an error if the desired precision is not available.
214    """
215    dtype = np.dtype(dtype)
216    if not all(has_type(d, dtype) for d in context.devices):
217        raise RuntimeError("%s not supported for devices"%dtype)
218
219    source_list = [generate.convert_type(source, dtype)]
220
221    if dtype == generate.F16:
222        source_list.insert(0, _F16_PRAGMA)
223    elif dtype == generate.F64:
224        source_list.insert(0, _F64_PRAGMA)
225
226    # Note: USE_SINCOS makes the intel cpu slower under opencl
227    if context.devices[0].type == cl.device_type.GPU:
228        source_list.insert(0, "#define USE_SINCOS\n")
229    options = (get_fast_inaccurate_build_options(context.devices[0])
230               if fast else [])
231    source = "\n".join(source_list)
232    program = cl.Program(context, source).build(options=options)
233    #print("done with "+program)
234    return program
235
236
237# for now, this returns one device in the context
238# TODO: create a context that contains all devices on all platforms
239class GpuEnvironment(object):
240    """
241    GPU context, with possibly many devices, and one queue per device.
242
243    Because the environment can be reset during a live program (e.g., if the
244    user changes the active GPU device in the GUI), everything associated
245    with the device context must be cached in the environment and recreated
246    if the environment changes.  The *cache* attribute is a simple dictionary
247    which holds keys and references to objects, such as compiled kernels and
248    allocated buffers.  The running program should check in the cache for
249    long lived objects and create them if they are not there.  The program
250    should not hold onto cached objects, but instead only keep them active
251    for the duration of a function call.  When the environment is destroyed
252    then the *release* method for each active cache item is called before
253    the environment is freed.  This means that each cl buffer should be
254    in its own cache entry.
255    """
256    def __init__(self):
257        # type: () -> None
258        # find gpu context
259        context_list = _create_some_context()
260
261        # Find a context for F32 and for F64 (maybe the same one).
262        # F16 isn't good enough.
263        self.context = {}
264        for dtype in (F32, F64):
265            for context in context_list:
266                if has_type(context.devices[0], dtype):
267                    self.context[dtype] = context
268                    break
269            else:
270                self.context[dtype] = None
271
272        # Build a queue for each context
273        self.queue = {}
274        context = self.context[F32]
275        self.queue[F32] = cl.CommandQueue(context, context.devices[0])
276        if self.context[F64] == self.context[F32]:
277            self.queue[F64] = self.queue[F32]
278        else:
279            context = self.context[F64]
280            self.queue[F64] = cl.CommandQueue(context, context.devices[0])
281
282        # Byte boundary for data alignment
283        #self.data_boundary = max(context.devices[0].min_data_type_align_size
284        #                         for context in self.context.values())
285
286        # Cache for compiled programs, and for items in context
287        self.compiled = {}
288        self.cache = {}
289
290    def has_type(self, dtype):
291        # type: (np.dtype) -> bool
292        """
293        Return True if all devices support a given type.
294        """
295        return self.context.get(dtype, None) is not None
296
297    def compile_program(self, name, source, dtype, fast, timestamp):
298        # type: (str, str, np.dtype, bool, float) -> cl.Program
299        """
300        Compile the program for the device in the given context.
301        """
302        # Note: PyOpenCL caches based on md5 hash of source, options and device
303        # so we don't really need to cache things for ourselves.  I'll do so
304        # anyway just to save some data munging time.
305        tag = generate.tag_source(source)
306        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
307        # Check timestamp on program
308        program, program_timestamp = self.compiled.get(key, (None, np.inf))
309        if program_timestamp < timestamp:
310            del self.compiled[key]
311        if key not in self.compiled:
312            context = self.context[dtype]
313            logging.info("building %s for OpenCL %s", key,
314                         context.devices[0].name.strip())
315            program = compile_model(self.context[dtype],
316                                    str(source), dtype, fast)
317            self.compiled[key] = (program, timestamp)
318        return program
319
320    def free_buffer(self, key):
321        if key in self.cache:
322            self.cache[key].release()
323            del self.cache[key]
324
325    def __del__(self):
326        for v in self.cache.values():
327            release = getattr(v, 'release', lambda: None)
328            release()
329        self.cache = {}
330
331_CURRENT_ID = 0
332def unique_id():
333    global _CURRENT_ID
334    _CURRENT_ID += 1
335    return _CURRENT_ID
336
337def _create_some_context():
338    # type: () -> cl.Context
339    """
340    Protected call to cl.create_some_context without interactivity.
341
342    Uses SAS_OPENCL or PYOPENCL_CTX if they are set in the environment,
343    otherwise scans for the most appropriate device using
344    :func:`_get_default_context`
345    """
346    if 'SAS_OPENCL' in os.environ:
347        #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
348        os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
349
350    if 'PYOPENCL_CTX' in os.environ:
351        try:
352            return [cl.create_some_context(interactive=False)]
353        except Exception as exc:
354            warnings.warn(str(exc))
355            warnings.warn("pyopencl.create_some_context() failed")
356            warnings.warn("the environment variable 'SAS_OPENCL' or 'PYOPENCL_CTX' might not be set correctly")
357
358    return _get_default_context()
359
360def _get_default_context():
361    # type: () -> List[cl.Context]
362    """
363    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
364    drivers over AMD drivers.
365    """
366    # Note: on mobile devices there is automatic clock scaling if either the
367    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
368    # it did, it would mean that putting a busy loop on the CPU while the GPU
369    # is running may increase throughput.
370    #
371    # Macbook pro, base install:
372    #     {'Apple': [Intel CPU, NVIDIA GPU]}
373    # Macbook pro, base install:
374    #     {'Apple': [Intel CPU, Intel GPU]}
375    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
376    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
377    gpu, cpu = None, None
378    for platform in cl.get_platforms():
379        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
380        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
381        # them over the integrated graphics driver that may have been supplied
382        # with the CPU chipset.
383        preferred_cpu = (platform.vendor.startswith('Intel')
384                         or platform.vendor.startswith('Apple'))
385        preferred_gpu = (platform.vendor.startswith('Advanced')
386                         or platform.vendor.startswith('NVIDIA'))
387        for device in platform.get_devices():
388            if device.type == cl.device_type.GPU:
389                # If the existing type is not GPU then it will be CUSTOM
390                # or ACCELERATOR so don't override it.
391                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
392                    gpu = device
393            elif device.type == cl.device_type.CPU:
394                if cpu is None or preferred_cpu:
395                    cpu = device
396            else:
397                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
398                # Intel Phi for example registers as an accelerator
399                # Since the user installed a custom device on their system
400                # and went through the pain of sorting out OpenCL drivers for
401                # it, lets assume they really do want to use it as their
402                # primary compute device.
403                gpu = device
404
405    # order the devices by gpu then by cpu; when searching for an available
406    # device by data type they will be checked in this order, which means
407    # that if the gpu supports double then the cpu will never be used (though
408    # we may make it possible to explicitly request the cpu at some point).
409    devices = []
410    if gpu is not None:
411        devices.append(gpu)
412    if cpu is not None:
413        devices.append(cpu)
414    return [cl.Context([d]) for d in devices]
415
416
417class GpuModel(KernelModel):
418    """
419    GPU wrapper for a single model.
420
421    *source* and *model_info* are the model source and interface as returned
422    from :func:`generate.make_source` and :func:`generate.make_model_info`.
423
424    *dtype* is the desired model precision.  Any numpy dtype for single
425    or double precision floats will do, such as 'f', 'float32' or 'single'
426    for single and 'd', 'float64' or 'double' for double.  Double precision
427    is an optional extension which may not be available on all devices.
428    Half precision ('float16','half') may be available on some devices.
429    Fast precision ('fast') is a loose version of single precision, indicating
430    that the compiler is allowed to take shortcuts.
431    """
432    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
433        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
434        self.info = model_info
435        self.source = source
436        self.dtype = dtype
437        self.fast = fast
438        self.timestamp = generate.ocl_timestamp(self.info)
439        self._cache_key = unique_id()
440
441    def __getstate__(self):
442        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
443        return self.info, self.source, self.dtype, self.fast
444
445    def __setstate__(self, state):
446        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
447        self.info, self.source, self.dtype, self.fast = state
448
449    def make_kernel(self, q_vectors):
450        # type: (List[np.ndarray]) -> "GpuKernel"
451        return GpuKernel(self, q_vectors)
452
453    @property
454    def Iq(self):
455        return self._fetch_kernel('Iq')
456
457    def fetch_kernel(self, name):
458        # type: (str) -> cl.Kernel
459        """
460        Fetch the kernel from the environment by name, compiling it if it
461        does not already exist.
462        """
463        gpu = environment()
464        key = self._cache_key
465        if key not in gpu.cache:
466            program = gpu.compile_program(
467                self.info.name,
468                self.source['opencl'],
469                self.dtype,
470                self.fast,
471                self.timestamp)
472            variants = ['Iq', 'Iqxy', 'Imagnetic']
473            names = [generate.kernel_name(self.info, k) for k in variants]
474            kernels = [getattr(program, k) for k in names]
475            data = dict((k, v) for k, v in zip(variants, kernels))
476            # keep a handle to program so GC doesn't collect
477            data['program'] = program
478            gpu.cache[key] = data
479        else:
480            data = gpu.cache[key]
481        return data[name]
482
483# TODO: check that we don't need a destructor for buffers which go out of scope
484class GpuInput(object):
485    """
486    Make q data available to the gpu.
487
488    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
489    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
490    to get the best performance on OpenCL, which may involve shifting and
491    stretching the array to better match the memory architecture.  Additional
492    points will be evaluated with *q=1e-3*.
493
494    *dtype* is the data type for the q vectors. The data type should be
495    set to match that of the kernel, which is an attribute of
496    :class:`GpuProgram`.  Note that not all kernels support double
497    precision, so even if the program was created for double precision,
498    the *GpuProgram.dtype* may be single precision.
499
500    Call :meth:`release` when complete.  Even if not called directly, the
501    buffer will be released when the data object is freed.
502    """
503    def __init__(self, q_vectors, dtype=generate.F32):
504        # type: (List[np.ndarray], np.dtype) -> None
505        # TODO: do we ever need double precision q?
506        self.nq = q_vectors[0].size
507        self.dtype = np.dtype(dtype)
508        self.is_2d = (len(q_vectors) == 2)
509        # TODO: stretch input based on get_warp()
510        # not doing it now since warp depends on kernel, which is not known
511        # at this point, so instead using 32, which is good on the set of
512        # architectures tested so far.
513        if self.is_2d:
514            # Note: 16 rather than 15 because result is 1 longer than input.
515            width = ((self.nq+16)//16)*16
516            self.q = np.empty((width, 2), dtype=dtype)
517            self.q[:self.nq, 0] = q_vectors[0]
518            self.q[:self.nq, 1] = q_vectors[1]
519        else:
520            # Note: 32 rather than 31 because result is 1 longer than input.
521            width = ((self.nq+32)//32)*32
522            self.q = np.empty(width, dtype=dtype)
523            self.q[:self.nq] = q_vectors[0]
524        self.global_size = [self.q.shape[0]]
525        self._cache_key = unique_id()
526
527    @property
528    def q_b(self):
529        """Lazy creation of q buffer so it can survive context reset"""
530        env = environment()
531        key = self._cache_key
532        if key not in env.cache:
533            context = env.context[self.dtype]
534            #print("creating inputs of size", self.global_size)
535            buffer = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
536                               hostbuf=self.q)
537            env.cache[key] = buffer
538        return env.cache[key]
539
540    def release(self):
541        # type: () -> None
542        """
543        Free the buffer associated with the q value
544        """
545        environment().free_buffer(id(self))
546
547    def __del__(self):
548        # type: () -> None
549        self.release()
550
551class GpuKernel(Kernel):
552    """
553    Callable SAS kernel.
554
555    *model* is the GpuModel object to call
556
557    The following attributes are defined:
558
559    *info* is the module information
560
561    *dtype* is the kernel precision
562
563    *dim* is '1d' or '2d'
564
565    *result* is a vector to contain the results of the call
566
567    The resulting call method takes the *pars*, a list of values for
568    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
569    vectors for the polydisperse parameters.  *cutoff* determines the
570    integration limits: any points with combined weight less than *cutoff*
571    will not be calculated.
572
573    Call :meth:`release` when done with the kernel instance.
574    """
575    def __init__(self, model, q_vectors):
576        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
577        dtype = model.dtype
578        self.q_input = GpuInput(q_vectors, dtype)
579        self._model = model
580        self._as_dtype = (np.float32 if dtype == generate.F32
581                          else np.float64 if dtype == generate.F64
582                          else np.float16 if dtype == generate.F16
583                          else np.float32)  # will never get here, so use np.float32
584        self._cache_key = unique_id()
585
586        # attributes accessed from the outside
587        self.dim = '2d' if self.q_input.is_2d else '1d'
588        self.info = model.info
589        self.dtype = model.dtype
590
591        # holding place for the returned value
592        # plus one for the normalization values
593        self.result = np.empty(self.q_input.nq+1, dtype)
594
595    @property
596    def _result_b(self):
597        """Lazy creation of result buffer so it can survive context reset"""
598        env = environment()
599        key = self._cache_key
600        if key not in env.cache:
601            context = env.context[self.dtype]
602            #print("creating inputs of size", self.global_size)
603            buffer = cl.Buffer(context, mf.READ_WRITE,
604                               self.q_input.global_size[0] * self.dtype.itemsize)
605            env.cache[key] = buffer
606        return env.cache[key]
607
608    def __call__(self, call_details, values, cutoff, magnetic):
609        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
610        env = environment()
611        queue = env.queue[self._model.dtype]
612        context = queue.context
613
614        # Arrange data transfer to/from card
615        q_b = self.q_input.q_b
616        result_b = self._result_b
617        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
618                              hostbuf=call_details.buffer)
619        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
620                             hostbuf=values)
621
622        name = 'Iq' if self.dim == '1d' else 'Imagnetic' if magnetic else 'Iqxy'
623        kernel = self._model.fetch_kernel(name)
624        kernel_args = [
625            np.uint32(self.q_input.nq), None, None,
626            details_b, values_b, q_b, result_b,
627            self._as_dtype(cutoff),
628        ]
629        #print("Calling OpenCL")
630        #call_details.show(values)
631        # Call kernel and retrieve results
632        wait_for = None
633        last_nap = time.clock()
634        step = 1000000//self.q_input.nq + 1
635        for start in range(0, call_details.num_eval, step):
636            stop = min(start + step, call_details.num_eval)
637            #print("queuing",start,stop)
638            kernel_args[1:3] = [np.int32(start), np.int32(stop)]
639            wait_for = [kernel(queue, self.q_input.global_size, None,
640                               *kernel_args, wait_for=wait_for)]
641            if stop < call_details.num_eval:
642                # Allow other processes to run
643                wait_for[0].wait()
644                current_time = time.clock()
645                if current_time - last_nap > 0.5:
646                    time.sleep(0.05)
647                    last_nap = current_time
648        cl.enqueue_copy(queue, self.result, result_b, wait_for=wait_for)
649        #print("result", self.result)
650
651        # Free buffers
652        for v in (details_b, values_b):
653            if v is not None:
654                v.release()
655
656        pd_norm = self.result[self.q_input.nq]
657        scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0)
658        background = values[1]
659        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
660        return scale*self.result[:self.q_input.nq] + background
661        # return self.result[:self.q_input.nq]
662
663    def release(self):
664        # type: () -> None
665        """
666        Release resources associated with the kernel.
667        """
668        environment().free_buffer(id(self))
669        self.q_input.release()
670
671    def __del__(self):
672        # type: () -> None
673        self.release()
Note: See TracBrowser for help on using the repository browser.