source: sasmodels/sasmodels/kernelcl.py @ 7722b4a

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 7722b4a was 20317b3, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

lint

  • Property mode set to 100644
File size: 22.3 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50from __future__ import print_function
51
52import os
53import warnings
54import logging
55
56import numpy as np  # type: ignore
57
58try:
59    #raise NotImplementedError("OpenCL not yet implemented for new kernel template")
60    import pyopencl as cl  # type: ignore
61    # Ask OpenCL for the default context so that we know that one exists
62    cl.create_some_context(interactive=False)
63except Exception as exc:
64    warnings.warn("OpenCL startup failed with ***"+str(exc)+"***; using C compiler instead")
65    raise RuntimeError("OpenCL not available")
66
67from pyopencl import mem_flags as mf
68from pyopencl.characterize import get_fast_inaccurate_build_options
69
70from . import generate
71from .kernel import KernelModel, Kernel
72
73try:
74    from typing import Tuple, Callable, Any
75    from .modelinfo import ModelInfo
76    from .details import CallDetails
77except ImportError:
78    pass
79
80# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
81def quote_path(v):
82    """
83    Quote the path if it is not already quoted.
84
85    If v starts with '-', then assume that it is a -I option or similar
86    and do not quote it.  This is fragile:  -Ipath with space needs to
87    be quoted.
88    """
89    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
90
91def fix_pyopencl_include():
92    import pyopencl as cl
93    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
94        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
95
96fix_pyopencl_include()
97
98
99# The max loops number is limited by the amount of local memory available
100# on the device.  You don't want to make this value too big because it will
101# waste resources, nor too small because it may interfere with users trying
102# to do their polydispersity calculations.  A value of 1024 should be much
103# larger than necessary given that cost grows as npts^k where k is the number
104# of polydisperse parameters.
105MAX_LOOPS = 2048
106
107
108# Pragmas for enable OpenCL features.  Be sure to protect them so that they
109# still compile even if OpenCL is not present.
110_F16_PRAGMA = """\
111#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
112#  pragma OPENCL EXTENSION cl_khr_fp16: enable
113#endif
114"""
115
116_F64_PRAGMA = """\
117#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
118#  pragma OPENCL EXTENSION cl_khr_fp64: enable
119#endif
120"""
121
122
123ENV = None
124def environment():
125    # type: () -> "GpuEnvironment"
126    """
127    Returns a singleton :class:`GpuEnvironment`.
128
129    This provides an OpenCL context and one queue per device.
130    """
131    global ENV
132    if ENV is None:
133        ENV = GpuEnvironment()
134    return ENV
135
136def has_type(device, dtype):
137    # type: (cl.Device, np.dtype) -> bool
138    """
139    Return true if device supports the requested precision.
140    """
141    if dtype == generate.F32:
142        return True
143    elif dtype == generate.F64:
144        return "cl_khr_fp64" in device.extensions
145    elif dtype == generate.F16:
146        return "cl_khr_fp16" in device.extensions
147    else:
148        return False
149
150def get_warp(kernel, queue):
151    # type: (cl.Kernel, cl.CommandQueue) -> int
152    """
153    Return the size of an execution batch for *kernel* running on *queue*.
154    """
155    return kernel.get_work_group_info(
156        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
157        queue.device)
158
159def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
160    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
161    """
162    Stretch an input vector to the correct boundary.
163
164    Performance on the kernels can drop by a factor of two or more if the
165    number of values to compute does not fall on a nice power of two
166    boundary.   The trailing additional vector elements are given a
167    value of *extra*, and so f(*extra*) will be computed for each of
168    them.  The returned array will thus be a subset of the computed array.
169
170    *boundary* should be a power of 2 which is at least 32 for good
171    performance on current platforms (as of Jan 2015).  It should
172    probably be the max of get_warp(kernel,queue) and
173    device.min_data_type_align_size//4.
174    """
175    remainder = vector.size % boundary
176    if remainder != 0:
177        size = vector.size + (boundary - remainder)
178        vector = np.hstack((vector, [extra] * (size - vector.size)))
179    return np.ascontiguousarray(vector, dtype=dtype)
180
181
182def compile_model(context, source, dtype, fast=False):
183    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
184    """
185    Build a model to run on the gpu.
186
187    Returns the compiled program and its type.  The returned type will
188    be float32 even if the desired type is float64 if any of the
189    devices in the context do not support the cl_khr_fp64 extension.
190    """
191    dtype = np.dtype(dtype)
192    if not all(has_type(d, dtype) for d in context.devices):
193        raise RuntimeError("%s not supported for devices"%dtype)
194
195    source_list = [generate.convert_type(source, dtype)]
196
197    if dtype == generate.F16:
198        source_list.insert(0, _F16_PRAGMA)
199    elif dtype == generate.F64:
200        source_list.insert(0, _F64_PRAGMA)
201
202    # Note: USE_SINCOS makes the intel cpu slower under opencl
203    if context.devices[0].type == cl.device_type.GPU:
204        source_list.insert(0, "#define USE_SINCOS\n")
205    options = (get_fast_inaccurate_build_options(context.devices[0])
206               if fast else [])
207    source = "\n".join(source_list)
208    program = cl.Program(context, source).build(options=options)
209    #print("done with "+program)
210    return program
211
212
213# for now, this returns one device in the context
214# TODO: create a context that contains all devices on all platforms
215class GpuEnvironment(object):
216    """
217    GPU context, with possibly many devices, and one queue per device.
218    """
219    def __init__(self):
220        # type: () -> None
221        # find gpu context
222        #self.context = cl.create_some_context()
223
224        self.context = None
225        if 'PYOPENCL_CTX' in os.environ:
226            self._create_some_context()
227
228        if not self.context:
229            self.context = _get_default_context()
230
231        # Byte boundary for data alignment
232        #self.data_boundary = max(d.min_data_type_align_size
233        #                         for d in self.context.devices)
234        self.queues = [cl.CommandQueue(context, context.devices[0])
235                       for context in self.context]
236        self.compiled = {}
237
238    def has_type(self, dtype):
239        # type: (np.dtype) -> bool
240        """
241        Return True if all devices support a given type.
242        """
243        return any(has_type(d, dtype)
244                   for context in self.context
245                   for d in context.devices)
246
247    def get_queue(self, dtype):
248        # type: (np.dtype) -> cl.CommandQueue
249        """
250        Return a command queue for the kernels of type dtype.
251        """
252        for context, queue in zip(self.context, self.queues):
253            if all(has_type(d, dtype) for d in context.devices):
254                return queue
255
256    def get_context(self, dtype):
257        # type: (np.dtype) -> cl.Context
258        """
259        Return a OpenCL context for the kernels of type dtype.
260        """
261        for context in self.context:
262            if all(has_type(d, dtype) for d in context.devices):
263                return context
264
265    def _create_some_context(self):
266        # type: () -> cl.Context
267        """
268        Protected call to cl.create_some_context without interactivity.  Use
269        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
270        attribute.
271        """
272        try:
273            self.context = [cl.create_some_context(interactive=False)]
274        except Exception as exc:
275            warnings.warn(str(exc))
276            warnings.warn("pyopencl.create_some_context() failed")
277            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
278
279    def compile_program(self, name, source, dtype, fast=False):
280        # type: (str, str, np.dtype, bool) -> cl.Program
281        """
282        Compile the program for the device in the given context.
283        """
284        key = "%s-%s%s"%(name, dtype, ("-fast" if fast else ""))
285        if key not in self.compiled:
286            context = self.get_context(dtype)
287            logging.info("building %s for OpenCL %s", key,
288                         context.devices[0].name.strip())
289            program = compile_model(self.get_context(dtype),
290                                    str(source), dtype, fast)
291            self.compiled[key] = program
292        return self.compiled[key]
293
294    def release_program(self, name):
295        # type: (str) -> None
296        """
297        Free memory associated with the program on the device.
298        """
299        if name in self.compiled:
300            self.compiled[name].release()
301            del self.compiled[name]
302
303def _get_default_context():
304    # type: () -> List[cl.Context]
305    """
306    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
307    drivers over AMD drivers.
308    """
309    # Note: on mobile devices there is automatic clock scaling if either the
310    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
311    # it did, it would mean that putting a busy loop on the CPU while the GPU
312    # is running may increase throughput.
313    #
314    # Macbook pro, base install:
315    #     {'Apple': [Intel CPU, NVIDIA GPU]}
316    # Macbook pro, base install:
317    #     {'Apple': [Intel CPU, Intel GPU]}
318    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
319    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
320    gpu, cpu = None, None
321    for platform in cl.get_platforms():
322        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
323        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
324        # them over the integrated graphics driver that may have been supplied
325        # with the CPU chipset.
326        preferred_cpu = (platform.vendor.startswith('Intel')
327                         or platform.vendor.startswith('Apple'))
328        preferred_gpu = (platform.vendor.startswith('Advanced')
329                         or platform.vendor.startswith('NVIDIA'))
330        for device in platform.get_devices():
331            if device.type == cl.device_type.GPU:
332                # If the existing type is not GPU then it will be CUSTOM
333                # or ACCELERATOR so don't override it.
334                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
335                    gpu = device
336            elif device.type == cl.device_type.CPU:
337                if cpu is None or preferred_cpu:
338                    cpu = device
339            else:
340                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
341                # Intel Phi for example registers as an accelerator
342                # Since the user installed a custom device on their system
343                # and went through the pain of sorting out OpenCL drivers for
344                # it, lets assume they really do want to use it as their
345                # primary compute device.
346                gpu = device
347
348    # order the devices by gpu then by cpu; when searching for an available
349    # device by data type they will be checked in this order, which means
350    # that if the gpu supports double then the cpu will never be used (though
351    # we may make it possible to explicitly request the cpu at some point).
352    devices = []
353    if gpu is not None:
354        devices.append(gpu)
355    if cpu is not None:
356        devices.append(cpu)
357    return [cl.Context([d]) for d in devices]
358
359
360class GpuModel(KernelModel):
361    """
362    GPU wrapper for a single model.
363
364    *source* and *model_info* are the model source and interface as returned
365    from :func:`generate.make_source` and :func:`generate.make_model_info`.
366
367    *dtype* is the desired model precision.  Any numpy dtype for single
368    or double precision floats will do, such as 'f', 'float32' or 'single'
369    for single and 'd', 'float64' or 'double' for double.  Double precision
370    is an optional extension which may not be available on all devices.
371    Half precision ('float16','half') may be available on some devices.
372    Fast precision ('fast') is a loose version of single precision, indicating
373    that the compiler is allowed to take shortcuts.
374    """
375    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
376        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
377        self.info = model_info
378        self.source = source
379        self.dtype = dtype
380        self.fast = fast
381        self.program = None # delay program creation
382        self._kernels = None
383
384    def __getstate__(self):
385        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
386        return self.info, self.source, self.dtype, self.fast
387
388    def __setstate__(self, state):
389        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
390        self.info, self.source, self.dtype, self.fast = state
391        self.program = None
392
393    def make_kernel(self, q_vectors):
394        # type: (List[np.ndarray]) -> "GpuKernel"
395        if self.program is None:
396            compile_program = environment().compile_program
397            self.program = compile_program(
398                self.info.name,
399                self.source['opencl'],
400                self.dtype,
401                self.fast)
402            variants = ['Iq', 'Iqxy', 'Imagnetic']
403            names = [generate.kernel_name(self.info, k) for k in variants]
404            kernels = [getattr(self.program, k) for k in names]
405            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
406        is_2d = len(q_vectors) == 2
407        if is_2d:
408            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
409        else:
410            kernel = [self._kernels['Iq']]*2
411        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
412
413    def release(self):
414        # type: () -> None
415        """
416        Free the resources associated with the model.
417        """
418        if self.program is not None:
419            environment().release_program(self.info.name)
420            self.program = None
421
422    def __del__(self):
423        # type: () -> None
424        self.release()
425
426# TODO: check that we don't need a destructor for buffers which go out of scope
427class GpuInput(object):
428    """
429    Make q data available to the gpu.
430
431    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
432    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
433    to get the best performance on OpenCL, which may involve shifting and
434    stretching the array to better match the memory architecture.  Additional
435    points will be evaluated with *q=1e-3*.
436
437    *dtype* is the data type for the q vectors. The data type should be
438    set to match that of the kernel, which is an attribute of
439    :class:`GpuProgram`.  Note that not all kernels support double
440    precision, so even if the program was created for double precision,
441    the *GpuProgram.dtype* may be single precision.
442
443    Call :meth:`release` when complete.  Even if not called directly, the
444    buffer will be released when the data object is freed.
445    """
446    def __init__(self, q_vectors, dtype=generate.F32):
447        # type: (List[np.ndarray], np.dtype) -> None
448        # TODO: do we ever need double precision q?
449        env = environment()
450        self.nq = q_vectors[0].size
451        self.dtype = np.dtype(dtype)
452        self.is_2d = (len(q_vectors) == 2)
453        # TODO: stretch input based on get_warp()
454        # not doing it now since warp depends on kernel, which is not known
455        # at this point, so instead using 32, which is good on the set of
456        # architectures tested so far.
457        if self.is_2d:
458            # Note: 17 rather than 15 because results is 2 elements
459            # longer than input.
460            width = ((self.nq+17)//16)*16
461            self.q = np.empty((width, 2), dtype=dtype)
462            self.q[:self.nq, 0] = q_vectors[0]
463            self.q[:self.nq, 1] = q_vectors[1]
464        else:
465            # Note: 33 rather than 31 because results is 2 elements
466            # longer than input.
467            width = ((self.nq+33)//32)*32
468            self.q = np.empty(width, dtype=dtype)
469            self.q[:self.nq] = q_vectors[0]
470        self.global_size = [self.q.shape[0]]
471        context = env.get_context(self.dtype)
472        #print("creating inputs of size", self.global_size)
473        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
474                             hostbuf=self.q)
475
476    def release(self):
477        # type: () -> None
478        """
479        Free the memory.
480        """
481        if self.q_b is not None:
482            self.q_b.release()
483            self.q_b = None
484
485    def __del__(self):
486        # type: () -> None
487        self.release()
488
489class GpuKernel(Kernel):
490    """
491    Callable SAS kernel.
492
493    *kernel* is the GpuKernel object to call
494
495    *model_info* is the module information
496
497    *q_vectors* is the q vectors at which the kernel should be evaluated
498
499    *dtype* is the kernel precision
500
501    The resulting call method takes the *pars*, a list of values for
502    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
503    vectors for the polydisperse parameters.  *cutoff* determines the
504    integration limits: any points with combined weight less than *cutoff*
505    will not be calculated.
506
507    Call :meth:`release` when done with the kernel instance.
508    """
509    def __init__(self, kernel, dtype, model_info, q_vectors):
510        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
511        q_input = GpuInput(q_vectors, dtype)
512        self.kernel = kernel
513        self.info = model_info
514        self.dtype = dtype
515        self.dim = '2d' if q_input.is_2d else '1d'
516        # plus three for the normalization values
517        self.result = np.empty(q_input.nq+3, dtype)
518
519        # Inputs and outputs for each kernel call
520        # Note: res may be shorter than res_b if global_size != nq
521        env = environment()
522        self.queue = env.get_queue(dtype)
523
524        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
525                                  q_input.global_size[0] * dtype.itemsize)
526        self.q_input = q_input # allocated by GpuInput above
527
528        self._need_release = [self.result_b, self.q_input]
529        self.real = (np.float32 if dtype == generate.F32
530                     else np.float64 if dtype == generate.F64
531                     else np.float16 if dtype == generate.F16
532                     else np.float32)  # will never get here, so use np.float32
533
534    def __call__(self, call_details, values, cutoff, magnetic):
535        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
536        context = self.queue.context
537        # Arrange data transfer to card
538        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
539                              hostbuf=call_details.buffer)
540        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
541                             hostbuf=values)
542
543        kernel = self.kernel[1 if magnetic else 0]
544        args = [
545            np.uint32(self.q_input.nq), None, None,
546            details_b, values_b, self.q_input.q_b, self.result_b,
547            self.real(cutoff),
548        ]
549        #print("Calling OpenCL")
550        #print("values",values)
551        # Call kernel and retrieve results
552        last_call = None
553        step = 100
554        for start in range(0, call_details.pd_prod, step):
555            stop = min(start+step, call_details.pd_prod)
556            #print("queuing",start,stop)
557            args[1:3] = [np.int32(start), np.int32(stop)]
558            last_call = [kernel(self.queue, self.q_input.global_size,
559                                None, *args, wait_for=last_call)]
560        cl.enqueue_copy(self.queue, self.result, self.result_b)
561
562        # Free buffers
563        for v in (details_b, values_b):
564            if v is not None: v.release()
565
566        scale = values[0]/self.result[self.q_input.nq]
567        background = values[1]
568        #print("scale",scale,background)
569        return scale*self.result[:self.q_input.nq] + background
570        # return self.result[:self.q_input.nq]
571
572    def release(self):
573        # type: () -> None
574        """
575        Release resources associated with the kernel.
576        """
577        for v in self._need_release:
578            v.release()
579        self._need_release = []
580
581    def __del__(self):
582        # type: () -> None
583        self.release()
Note: See TracBrowser for help on using the repository browser.