source: sasmodels/sasmodels/kernelcl.py @ dd7fc12

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since dd7fc12 was dd7fc12, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

fix kerneldll dtype problem; more type hinting

  • Property mode set to 100644
File size: 20.9 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50from __future__ import print_function
51
52import os
53import warnings
54
55import numpy as np  # type: ignore
56
57try:
58    raise NotImplementedError("OpenCL not yet implemented for new kernel template")
59    import pyopencl as cl  # type: ignore
60    # Ask OpenCL for the default context so that we know that one exists
61    cl.create_some_context(interactive=False)
62except Exception as exc:
63    warnings.warn(str(exc))
64    raise RuntimeError("OpenCL not available")
65
66from pyopencl import mem_flags as mf  # type: ignore
67from pyopencl.characterize import get_fast_inaccurate_build_options  # type: ignore
68
69from . import generate
70from .kernel import KernelModel, Kernel
71
72try:
73    from typing import Tuple, Callable, Any
74    from .modelinfo import ModelInfo
75    from .details import CallDetails
76except ImportError:
77    pass
78
79# The max loops number is limited by the amount of local memory available
80# on the device.  You don't want to make this value too big because it will
81# waste resources, nor too small because it may interfere with users trying
82# to do their polydispersity calculations.  A value of 1024 should be much
83# larger than necessary given that cost grows as npts^k where k is the number
84# of polydisperse parameters.
85MAX_LOOPS = 2048
86
87
88# Pragmas for enable OpenCL features.  Be sure to protect them so that they
89# still compile even if OpenCL is not present.
90_F16_PRAGMA = """\
91#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
92#  pragma OPENCL EXTENSION cl_khr_fp16: enable
93#endif
94"""
95
96_F64_PRAGMA = """\
97#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
98#  pragma OPENCL EXTENSION cl_khr_fp64: enable
99#endif
100"""
101
102
103ENV = None
104def environment():
105    # type: () -> "GpuEnvironment"
106    """
107    Returns a singleton :class:`GpuEnvironment`.
108
109    This provides an OpenCL context and one queue per device.
110    """
111    global ENV
112    if ENV is None:
113        ENV = GpuEnvironment()
114    return ENV
115
116def has_type(device, dtype):
117    # type: (cl.Device, np.dtype) -> bool
118    """
119    Return true if device supports the requested precision.
120    """
121    if dtype == generate.F32:
122        return True
123    elif dtype == generate.F64:
124        return "cl_khr_fp64" in device.extensions
125    elif dtype == generate.F16:
126        return "cl_khr_fp16" in device.extensions
127    else:
128        return False
129
130def get_warp(kernel, queue):
131    # type: (cl.Kernel, cl.CommandQueue) -> int
132    """
133    Return the size of an execution batch for *kernel* running on *queue*.
134    """
135    return kernel.get_work_group_info(
136        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
137        queue.device)
138
139def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
140    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
141    """
142    Stretch an input vector to the correct boundary.
143
144    Performance on the kernels can drop by a factor of two or more if the
145    number of values to compute does not fall on a nice power of two
146    boundary.   The trailing additional vector elements are given a
147    value of *extra*, and so f(*extra*) will be computed for each of
148    them.  The returned array will thus be a subset of the computed array.
149
150    *boundary* should be a power of 2 which is at least 32 for good
151    performance on current platforms (as of Jan 2015).  It should
152    probably be the max of get_warp(kernel,queue) and
153    device.min_data_type_align_size//4.
154    """
155    remainder = vector.size % boundary
156    if remainder != 0:
157        size = vector.size + (boundary - remainder)
158        vector = np.hstack((vector, [extra] * (size - vector.size)))
159    return np.ascontiguousarray(vector, dtype=dtype)
160
161
162def compile_model(context, source, dtype, fast=False):
163    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
164    """
165    Build a model to run on the gpu.
166
167    Returns the compiled program and its type.  The returned type will
168    be float32 even if the desired type is float64 if any of the
169    devices in the context do not support the cl_khr_fp64 extension.
170    """
171    dtype = np.dtype(dtype)
172    if not all(has_type(d, dtype) for d in context.devices):
173        raise RuntimeError("%s not supported for devices"%dtype)
174
175    source_list = [generate.convert_type(source, dtype)]
176
177    if dtype == generate.F16:
178        source_list.insert(0, _F16_PRAGMA)
179    elif dtype == generate.F64:
180        source_list.insert(0, _F64_PRAGMA)
181
182    # Note: USE_SINCOS makes the intel cpu slower under opencl
183    if context.devices[0].type == cl.device_type.GPU:
184        source_list.insert(0, "#define USE_SINCOS\n")
185    options = (get_fast_inaccurate_build_options(context.devices[0])
186               if fast else [])
187    source = "\n".join(source_list)
188    program = cl.Program(context, source).build(options=options)
189    return program
190
191
192# for now, this returns one device in the context
193# TODO: create a context that contains all devices on all platforms
194class GpuEnvironment(object):
195    """
196    GPU context, with possibly many devices, and one queue per device.
197    """
198    def __init__(self):
199        # type: () -> None
200        # find gpu context
201        #self.context = cl.create_some_context()
202
203        self.context = None
204        if 'PYOPENCL_CTX' in os.environ:
205            self._create_some_context()
206
207        if not self.context:
208            self.context = _get_default_context()
209
210        # Byte boundary for data alignment
211        #self.data_boundary = max(d.min_data_type_align_size
212        #                         for d in self.context.devices)
213        self.queues = [cl.CommandQueue(context, context.devices[0])
214                       for context in self.context]
215        self.compiled = {}
216
217    def has_type(self, dtype):
218        # type: (np.dtype) -> bool
219        """
220        Return True if all devices support a given type.
221        """
222        return any(has_type(d, dtype)
223                   for context in self.context
224                   for d in context.devices)
225
226    def get_queue(self, dtype):
227        # type: (np.dtype) -> cl.CommandQueue
228        """
229        Return a command queue for the kernels of type dtype.
230        """
231        for context, queue in zip(self.context, self.queues):
232            if all(has_type(d, dtype) for d in context.devices):
233                return queue
234
235    def get_context(self, dtype):
236        # type: (np.dtype) -> cl.Context
237        """
238        Return a OpenCL context for the kernels of type dtype.
239        """
240        for context, queue in zip(self.context, self.queues):
241            if all(has_type(d, dtype) for d in context.devices):
242                return context
243
244    def _create_some_context(self):
245        # type: () -> cl.Context
246        """
247        Protected call to cl.create_some_context without interactivity.  Use
248        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
249        attribute.
250        """
251        try:
252            self.context = [cl.create_some_context(interactive=False)]
253        except Exception as exc:
254            warnings.warn(str(exc))
255            warnings.warn("pyopencl.create_some_context() failed")
256            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
257
258    def compile_program(self, name, source, dtype, fast=False):
259        # type: (str, str, np.dtype, bool) -> cl.Program
260        """
261        Compile the program for the device in the given context.
262        """
263        key = "%s-%s-%s"%(name, dtype, fast)
264        if key not in self.compiled:
265            print("compiling",name)
266            dtype = np.dtype(dtype)
267            program = compile_model(self.get_context(dtype),
268                                    str(source), dtype, fast)
269            self.compiled[key] = program
270        return self.compiled[key]
271
272    def release_program(self, name):
273        # type: (str) -> None
274        """
275        Free memory associated with the program on the device.
276        """
277        if name in self.compiled:
278            self.compiled[name].release()
279            del self.compiled[name]
280
281def _get_default_context():
282    # type: () -> cl.Context
283    """
284    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
285    drivers over AMD drivers.
286    """
287    # Note: on mobile devices there is automatic clock scaling if either the
288    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
289    # it did, it would mean that putting a busy loop on the CPU while the GPU
290    # is running may increase throughput.
291    #
292    # Macbook pro, base install:
293    #     {'Apple': [Intel CPU, NVIDIA GPU]}
294    # Macbook pro, base install:
295    #     {'Apple': [Intel CPU, Intel GPU]}
296    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
297    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
298    gpu, cpu = None, None
299    for platform in cl.get_platforms():
300        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
301        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
302        # graphics driver that may have been supplied with the CPU chipset.
303        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
304        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
305        for device in platform.get_devices():
306            if device.type == cl.device_type.GPU:
307                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
308                # so don't override it.
309                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
310                    gpu = device
311            elif device.type == cl.device_type.CPU:
312                if cpu is None or preferred_cpu:
313                    cpu = device
314            else:
315                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
316                # Intel Phi for example registers as an accelerator
317                # Since the user installed a custom device on their system and went through the
318                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
319                # use it as their primary compute device.
320                gpu = device
321
322    # order the devices by gpu then by cpu; when searching for an available device by data type they
323    # will be checked in this order, which means that if the gpu supports double then the cpu will never
324    # be used (though we may make it possible to explicitly request the cpu at some point).
325    devices = []
326    if gpu is not None:
327        devices.append(gpu)
328    if cpu is not None:
329        devices.append(cpu)
330    return [cl.Context([d]) for d in devices]
331
332
333class GpuModel(KernelModel):
334    """
335    GPU wrapper for a single model.
336
337    *source* and *model_info* are the model source and interface as returned
338    from :func:`generate.make_source` and :func:`generate.make_model_info`.
339
340    *dtype* is the desired model precision.  Any numpy dtype for single
341    or double precision floats will do, such as 'f', 'float32' or 'single'
342    for single and 'd', 'float64' or 'double' for double.  Double precision
343    is an optional extension which may not be available on all devices.
344    Half precision ('float16','half') may be available on some devices.
345    Fast precision ('fast') is a loose version of single precision, indicating
346    that the compiler is allowed to take shortcuts.
347    """
348    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
349        # type: (str, ModelInfo, np.dtype, bool) -> None
350        self.info = model_info
351        self.source = source
352        self.dtype = dtype
353        self.fast = fast
354        self.program = None # delay program creation
355
356    def __getstate__(self):
357        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
358        return self.info, self.source, self.dtype, self.fast
359
360    def __setstate__(self, state):
361        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
362        self.info, self.source, self.dtype, self.fast = state
363        self.program = None
364
365    def make_kernel(self, q_vectors):
366        # type: (List[np.ndarray]) -> "GpuKernel"
367        if self.program is None:
368            compiler = environment().compile_program
369            self.program = compiler(self.info.name, self.source,
370                                    self.dtype, self.fast)
371        is_2d = len(q_vectors) == 2
372        kernel_name = generate.kernel_name(self.info, is_2d)
373        kernel = getattr(self.program, kernel_name)
374        return GpuKernel(kernel, self.info, q_vectors)
375
376    def release(self):
377        # type: () -> None
378        """
379        Free the resources associated with the model.
380        """
381        if self.program is not None:
382            environment().release_program(self.info.name)
383            self.program = None
384
385    def __del__(self):
386        # type: () -> None
387        self.release()
388
389# TODO: check that we don't need a destructor for buffers which go out of scope
390class GpuInput(object):
391    """
392    Make q data available to the gpu.
393
394    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
395    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
396    to get the best performance on OpenCL, which may involve shifting and
397    stretching the array to better match the memory architecture.  Additional
398    points will be evaluated with *q=1e-3*.
399
400    *dtype* is the data type for the q vectors. The data type should be
401    set to match that of the kernel, which is an attribute of
402    :class:`GpuProgram`.  Note that not all kernels support double
403    precision, so even if the program was created for double precision,
404    the *GpuProgram.dtype* may be single precision.
405
406    Call :meth:`release` when complete.  Even if not called directly, the
407    buffer will be released when the data object is freed.
408    """
409    def __init__(self, q_vectors, dtype=generate.F32):
410        # type: (List[np.ndarray], np.dtype) -> None
411        # TODO: do we ever need double precision q?
412        env = environment()
413        self.nq = q_vectors[0].size
414        self.dtype = np.dtype(dtype)
415        self.is_2d = (len(q_vectors) == 2)
416        # TODO: stretch input based on get_warp()
417        # not doing it now since warp depends on kernel, which is not known
418        # at this point, so instead using 32, which is good on the set of
419        # architectures tested so far.
420        if self.is_2d:
421            # Note: 17 rather than 15 because results is 2 elements
422            # longer than input.
423            width = ((self.nq+17)//16)*16
424            self.q = np.empty((width, 2), dtype=dtype)
425            self.q[:self.nq, 0] = q_vectors[0]
426            self.q[:self.nq, 1] = q_vectors[1]
427        else:
428            # Note: 33 rather than 31 because results is 2 elements
429            # longer than input.
430            width = ((self.nq+33)//32)*32
431            self.q = np.empty(width, dtype=dtype)
432            self.q[:self.nq] = q_vectors[0]
433        self.global_size = [self.q.shape[0]]
434        context = env.get_context(self.dtype)
435        #print("creating inputs of size", self.global_size)
436        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
437                             hostbuf=self.q)
438
439    def release(self):
440        # type: () -> None
441        """
442        Free the memory.
443        """
444        if self.q is not None:
445            self.q.release()
446            self.q = None
447
448    def __del__(self):
449        # type: () -> None
450        self.release()
451
452class GpuKernel(Kernel):
453    """
454    Callable SAS kernel.
455
456    *kernel* is the GpuKernel object to call
457
458    *model_info* is the module information
459
460    *q_vectors* is the q vectors at which the kernel should be evaluated
461
462    *dtype* is the kernel precision
463
464    The resulting call method takes the *pars*, a list of values for
465    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
466    vectors for the polydisperse parameters.  *cutoff* determines the
467    integration limits: any points with combined weight less than *cutoff*
468    will not be calculated.
469
470    Call :meth:`release` when done with the kernel instance.
471    """
472    def __init__(self, kernel, model_info, q_vectors):
473        # type: (cl.Kernel, ModelInfo, List[np.ndarray]) -> None
474        max_pd = model_info.parameters.max_pd
475        npars = len(model_info.parameters.kernel_parameters)-2
476        q_input = GpuInput(q_vectors, kernel.dtype)
477        self.kernel = kernel
478        self.info = model_info
479        self.dtype = kernel.dtype
480        self.dim = '2d' if q_input.is_2d else '1d'
481        self.pd_stop_index = 4*max_pd-1
482        # plus three for the normalization values
483        self.result = np.empty(q_input.nq+3, q_input.dtype)
484
485        # Inputs and outputs for each kernel call
486        # Note: res may be shorter than res_b if global_size != nq
487        env = environment()
488        self.queue = env.get_queue(kernel.dtype)
489
490        # details is int32 data, padded to an 8 integer boundary
491        size = ((max_pd*5 + npars*3 + 2 + 7)//8)*8
492        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
493                               q_input.global_size[0] * kernel.dtype.itemsize)
494        self.q_input = q_input # allocated by GpuInput above
495
496        self._need_release = [ self.result_b, self.q_input ]
497
498    def __call__(self, call_details, weights, values, cutoff):
499        # type: (CallDetails, np.ndarray, np.ndarray, float) -> np.ndarray
500        real = (np.float32 if self.q_input.dtype == generate.F32
501                else np.float64 if self.q_input.dtype == generate.F64
502                else np.float16 if self.q_input.dtype == generate.F16
503                else np.float32)  # will never get here, so use np.float32
504        assert call_details.dtype == np.int32
505        assert weights.dtype == real and values.dtype == real
506
507        context = self.queue.context
508        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
509                              hostbuf=call_details)
510        weights_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
511                              hostbuf=weights)
512        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
513                             hostbuf=values)
514
515        start, stop = 0, self.details[self.pd_stop_index]
516        args = [
517            np.uint32(self.q_input.nq), np.uint32(start), np.uint32(stop),
518            self.details_b, self.weights_b, self.values_b,
519            self.q_input.q_b, self.result_b, real(cutoff),
520        ]
521        self.kernel(self.queue, self.q_input.global_size, None, *args)
522        cl.enqueue_copy(self.queue, self.result, self.result_b)
523        [v.release() for v in (details_b, weights_b, values_b)]
524
525        return self.result[:self.nq]
526
527    def release(self):
528        # type: () -> None
529        """
530        Release resources associated with the kernel.
531        """
532        for v in self._need_release:
533            v.release()
534        self._need_release = []
535
536    def __del__(self):
537        # type: () -> None
538        self.release()
Note: See TracBrowser for help on using the repository browser.