source: sasmodels/sasmodels/kernelcl.py @ 7126c04

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 7126c04 was 7126c04, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

use similar code for cuda and opencl

  • Property mode set to 100644
File size: 23.9 KB
Line 
1"""
2GPU driver for C kernels
3
4TODO: docs are out of date
5
6There should be a single GPU environment running on the system.  This
7environment is constructed on the first call to :func:`env`, and the
8same environment is returned on each call.
9
10After retrieving the environment, the next step is to create the kernel.
11This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
12returns the type of data used by the kernel.
13
14Next a :class:`GpuData` object should be created with the correct kind
15of data.  This data object can be used by multiple kernels, for example,
16if the target model is a weighted sum of multiple kernels.  The data
17should include any extra evaluation points required to compute the proper
18data smearing.  This need not match the square grid for 2D data if there
19is an index saying which q points are active.
20
21Together the GpuData, the program, and a device form a :class:`GpuKernel`.
22This kernel is used during fitting, receiving new sets of parameters and
23evaluating them.  The output value is stored in an output buffer on the
24devices, where it can be combined with other structure factors and form
25factors and have instrumental resolution effects applied.
26
27In order to use OpenCL for your models, you will need OpenCL drivers for
28your machine.  These should be available from your graphics card vendor.
29Intel provides OpenCL drivers for CPUs as well as their integrated HD
30graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
31this writing the performance is lacking compared to the Intel drivers.
32NVidia combines drivers for CUDA and OpenCL in one package.  The result
33is a bit messy if you have multiple drivers installed.  You can see which
34drivers are available by starting python and running:
35
36    import pyopencl as cl
37    cl.create_some_context(interactive=True)
38
39Once you have done that, it will show the available drivers which you
40can select.  It will then tell you that you can use these drivers
41automatically by setting the SAS_OPENCL environment variable, which is
42PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
43
44Some graphics cards have multiple devices on the same card.  You cannot
45yet use both of them concurrently to evaluate models, but you can run
46the program twice using a different device for each session.
47
48OpenCL kernels are compiled when needed by the device driver.  Some
49drivers produce compiler output even when there is no error.  You
50can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
51harmless, albeit annoying.
52"""
53from __future__ import print_function
54
55import os
56import warnings
57import logging
58import time
59
60import numpy as np  # type: ignore
61
62
63# Attempt to setup opencl. This may fail if the pyopencl package is not
64# installed or if it is installed but there are no devices available.
65try:
66    import pyopencl as cl  # type: ignore
67    from pyopencl import mem_flags as mf
68    from pyopencl.characterize import get_fast_inaccurate_build_options
69    # Ask OpenCL for the default context so that we know that one exists
70    cl.create_some_context(interactive=False)
71    HAVE_OPENCL = True
72    OPENCL_ERROR = ""
73except Exception as exc:
74    HAVE_OPENCL = False
75    OPENCL_ERROR = str(exc)
76
77from . import generate
78from .generate import F32, F64
79from .kernel import KernelModel, Kernel
80
81# pylint: disable=unused-import
82try:
83    from typing import Tuple, Callable, Any
84    from .modelinfo import ModelInfo
85    from .details import CallDetails
86except ImportError:
87    pass
88# pylint: enable=unused-import
89
90# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
91def quote_path(v):
92    """
93    Quote the path if it is not already quoted.
94
95    If v starts with '-', then assume that it is a -I option or similar
96    and do not quote it.  This is fragile:  -Ipath with space needs to
97    be quoted.
98    """
99    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
100
101def fix_pyopencl_include():
102    """
103    Monkey patch pyopencl to allow spaces in include file path.
104    """
105    import pyopencl as cl
106    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
107        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
108
109if HAVE_OPENCL:
110    fix_pyopencl_include()
111
112# The max loops number is limited by the amount of local memory available
113# on the device.  You don't want to make this value too big because it will
114# waste resources, nor too small because it may interfere with users trying
115# to do their polydispersity calculations.  A value of 1024 should be much
116# larger than necessary given that cost grows as npts^k where k is the number
117# of polydisperse parameters.
118MAX_LOOPS = 2048
119
120
121# Pragmas for enable OpenCL features.  Be sure to protect them so that they
122# still compile even if OpenCL is not present.
123_F16_PRAGMA = """\
124#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
125#  pragma OPENCL EXTENSION cl_khr_fp16: enable
126#endif
127"""
128
129_F64_PRAGMA = """\
130#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
131#  pragma OPENCL EXTENSION cl_khr_fp64: enable
132#endif
133"""
134
135def use_opencl():
136    sas_opencl = os.environ.get("SAS_OPENCL", "OpenCL").lower()
137    return HAVE_OPENCL and sas_opencl != "none" and not sas_opencl.startswith("cuda")
138
139ENV = None
140def reset_environment():
141    """
142    Call to create a new OpenCL context, such as after a change to SAS_OPENCL.
143    """
144    global ENV
145    ENV = GpuEnvironment() if use_opencl() else None
146
147def environment():
148    # type: () -> "GpuEnvironment"
149    """
150    Returns a singleton :class:`GpuEnvironment`.
151
152    This provides an OpenCL context and one queue per device.
153    """
154    if ENV is None:
155        if not HAVE_OPENCL:
156            raise RuntimeError("OpenCL startup failed with ***"
157                               + OPENCL_ERROR + "***; using C compiler instead")
158        reset_environment()
159        if ENV is None:
160            raise RuntimeError("SAS_OPENCL=None in environment")
161    return ENV
162
163def has_type(device, dtype):
164    # type: (cl.Device, np.dtype) -> bool
165    """
166    Return true if device supports the requested precision.
167    """
168    if dtype == F32:
169        return True
170    elif dtype == F64:
171        return "cl_khr_fp64" in device.extensions
172    else:
173        # Not supporting F16 type since it isn't accurate enough
174        return False
175
176def get_warp(kernel, queue):
177    # type: (cl.Kernel, cl.CommandQueue) -> int
178    """
179    Return the size of an execution batch for *kernel* running on *queue*.
180    """
181    return kernel.get_work_group_info(
182        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
183        queue.device)
184
185def compile_model(context, source, dtype, fast=False):
186    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
187    """
188    Build a model to run on the gpu.
189
190    Returns the compiled program and its type.
191
192    Raises an error if the desired precision is not available.
193    """
194    dtype = np.dtype(dtype)
195    if not all(has_type(d, dtype) for d in context.devices):
196        raise RuntimeError("%s not supported for devices"%dtype)
197
198    source_list = [generate.convert_type(source, dtype)]
199
200    if dtype == generate.F16:
201        source_list.insert(0, _F16_PRAGMA)
202    elif dtype == generate.F64:
203        source_list.insert(0, _F64_PRAGMA)
204
205    # Note: USE_SINCOS makes the intel cpu slower under opencl
206    if context.devices[0].type == cl.device_type.GPU:
207        source_list.insert(0, "#define USE_SINCOS\n")
208    options = (get_fast_inaccurate_build_options(context.devices[0])
209               if fast else [])
210    source = "\n".join(source_list)
211    program = cl.Program(context, source).build(options=options)
212    #print("done with "+program)
213    return program
214
215
216# for now, this returns one device in the context
217# TODO: create a context that contains all devices on all platforms
218class GpuEnvironment(object):
219    """
220    GPU context, with possibly many devices, and one queue per device.
221
222    Because the environment can be reset during a live program (e.g., if the
223    user changes the active GPU device in the GUI), everything associated
224    with the device context must be cached in the environment and recreated
225    if the environment changes.  The *cache* attribute is a simple dictionary
226    which holds keys and references to objects, such as compiled kernels and
227    allocated buffers.  The running program should check in the cache for
228    long lived objects and create them if they are not there.  The program
229    should not hold onto cached objects, but instead only keep them active
230    for the duration of a function call.  When the environment is destroyed
231    then the *release* method for each active cache item is called before
232    the environment is freed.  This means that each cl buffer should be
233    in its own cache entry.
234    """
235    def __init__(self):
236        # type: () -> None
237        # find gpu context
238        context_list = _create_some_context()
239
240        # Find a context for F32 and for F64 (maybe the same one).
241        # F16 isn't good enough.
242        self.context = {}
243        for dtype in (F32, F64):
244            for context in context_list:
245                if has_type(context.devices[0], dtype):
246                    self.context[dtype] = context
247                    break
248            else:
249                self.context[dtype] = None
250
251        # Build a queue for each context
252        self.queue = {}
253        context = self.context[F32]
254        self.queue[F32] = cl.CommandQueue(context, context.devices[0])
255        if self.context[F64] == self.context[F32]:
256            self.queue[F64] = self.queue[F32]
257        else:
258            context = self.context[F64]
259            self.queue[F64] = cl.CommandQueue(context, context.devices[0])
260
261        # Byte boundary for data alignment
262        #self.data_boundary = max(context.devices[0].min_data_type_align_size
263        #                         for context in self.context.values())
264
265        # Cache for compiled programs, and for items in context
266        self.compiled = {}
267
268    def has_type(self, dtype):
269        # type: (np.dtype) -> bool
270        """
271        Return True if all devices support a given type.
272        """
273        return self.context.get(dtype, None) is not None
274
275    def compile_program(self, name, source, dtype, fast, timestamp):
276        # type: (str, str, np.dtype, bool, float) -> cl.Program
277        """
278        Compile the program for the device in the given context.
279        """
280        # Note: PyOpenCL caches based on md5 hash of source, options and device
281        # so we don't really need to cache things for ourselves.  I'll do so
282        # anyway just to save some data munging time.
283        tag = generate.tag_source(source)
284        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
285        # Check timestamp on program
286        program, program_timestamp = self.compiled.get(key, (None, np.inf))
287        if program_timestamp < timestamp:
288            del self.compiled[key]
289        if key not in self.compiled:
290            context = self.context[dtype]
291            logging.info("building %s for OpenCL %s", key,
292                         context.devices[0].name.strip())
293            program = compile_model(self.context[dtype],
294                                    str(source), dtype, fast)
295            self.compiled[key] = (program, timestamp)
296        return program
297
298_CURRENT_ID = 0
299def _create_some_context():
300    # type: () -> cl.Context
301    """
302    Protected call to cl.create_some_context without interactivity.
303
304    Uses SAS_OPENCL or PYOPENCL_CTX if they are set in the environment,
305    otherwise scans for the most appropriate device using
306    :func:`_get_default_context`.  Ignore *SAS_OPENCL=OpenCL*, which
307    indicates that an OpenCL device should be used without specifying
308    which one (and not a CUDA device, or no GPU).
309    """
310    # Assume we do not get here if SAS_OPENCL is None or CUDA
311    sas_opencl = os.environ.get('SAS_OPENCL', 'opencl')
312    if sas_opencl.lower() != 'opencl':
313        # Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
314        os.environ["PYOPENCL_CTX"] = sas_opencl
315
316    if 'PYOPENCL_CTX' in os.environ:
317        try:
318            return [cl.create_some_context(interactive=False)]
319        except Exception as exc:
320            warnings.warn(str(exc))
321            warnings.warn("pyopencl.create_some_context() failed")
322            warnings.warn("the environment variable 'SAS_OPENCL' or 'PYOPENCL_CTX' might not be set correctly")
323
324    return _get_default_context()
325
326def _get_default_context():
327    # type: () -> List[cl.Context]
328    """
329    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
330    drivers over AMD drivers.
331    """
332    # Note: on mobile devices there is automatic clock scaling if either the
333    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
334    # it did, it would mean that putting a busy loop on the CPU while the GPU
335    # is running may increase throughput.
336    #
337    # Macbook pro, base install:
338    #     {'Apple': [Intel CPU, NVIDIA GPU]}
339    # Macbook pro, base install:
340    #     {'Apple': [Intel CPU, Intel GPU]}
341    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
342    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
343    gpu, cpu = None, None
344    for platform in cl.get_platforms():
345        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
346        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
347        # them over the integrated graphics driver that may have been supplied
348        # with the CPU chipset.
349        preferred_cpu = (platform.vendor.startswith('Intel')
350                         or platform.vendor.startswith('Apple'))
351        preferred_gpu = (platform.vendor.startswith('Advanced')
352                         or platform.vendor.startswith('NVIDIA'))
353        for device in platform.get_devices():
354            if device.type == cl.device_type.GPU:
355                # If the existing type is not GPU then it will be CUSTOM
356                # or ACCELERATOR so don't override it.
357                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
358                    gpu = device
359            elif device.type == cl.device_type.CPU:
360                if cpu is None or preferred_cpu:
361                    cpu = device
362            else:
363                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
364                # Intel Phi for example registers as an accelerator
365                # Since the user installed a custom device on their system
366                # and went through the pain of sorting out OpenCL drivers for
367                # it, lets assume they really do want to use it as their
368                # primary compute device.
369                gpu = device
370
371    # order the devices by gpu then by cpu; when searching for an available
372    # device by data type they will be checked in this order, which means
373    # that if the gpu supports double then the cpu will never be used (though
374    # we may make it possible to explicitly request the cpu at some point).
375    devices = []
376    if gpu is not None:
377        devices.append(gpu)
378    if cpu is not None:
379        devices.append(cpu)
380    return [cl.Context([d]) for d in devices]
381
382
383class GpuModel(KernelModel):
384    """
385    GPU wrapper for a single model.
386
387    *source* and *model_info* are the model source and interface as returned
388    from :func:`generate.make_source` and :func:`generate.make_model_info`.
389
390    *dtype* is the desired model precision.  Any numpy dtype for single
391    or double precision floats will do, such as 'f', 'float32' or 'single'
392    for single and 'd', 'float64' or 'double' for double.  Double precision
393    is an optional extension which may not be available on all devices.
394    Half precision ('float16','half') may be available on some devices.
395    Fast precision ('fast') is a loose version of single precision, indicating
396    that the compiler is allowed to take shortcuts.
397    """
398    info = None # type: ModelInfo
399    source = "" # type: str
400    dtype = None # type: np.dtype
401    fast = False # type: bool
402    _program = None # type: cl.Program
403    _kernels = None # type: Dict[str, cl.Kernel]
404
405    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
406        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
407        self.info = model_info
408        self.source = source
409        self.dtype = dtype
410        self.fast = fast
411
412    def __getstate__(self):
413        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
414        return self.info, self.source, self.dtype, self.fast
415
416    def __setstate__(self, state):
417        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
418        self.info, self.source, self.dtype, self.fast = state
419        self._program = self._kernels = None
420
421    def make_kernel(self, q_vectors):
422        # type: (List[np.ndarray]) -> "GpuKernel"
423        return GpuKernel(self, q_vectors)
424
425    def get_function(self, name):
426        # type: (str) -> cl.Kernel
427        """
428        Fetch the kernel from the environment by name, compiling it if it
429        does not already exist.
430        """
431        if self._program is None:
432            self._prepare_program()
433        return self._kernels[name]
434
435    def _prepare_program(self):
436        # type: (str) -> None
437        env = environment()
438        timestamp = generate.ocl_timestamp(self.info)
439        program = env.compile_program(
440            self.info.name,
441            self.source['opencl'],
442            self.dtype,
443            self.fast,
444            timestamp)
445        variants = ['Iq', 'Iqxy', 'Imagnetic']
446        names = [generate.kernel_name(self.info, k) for k in variants]
447        handles = [getattr(program, k) for k in names]
448        self._kernels = {k: v for k, v in zip(variants, handles)}
449        # keep a handle to program so GC doesn't collect
450        self._program = program
451
452# TODO: check that we don't need a destructor for buffers which go out of scope
453class GpuInput(object):
454    """
455    Make q data available to the gpu.
456
457    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
458    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
459    to get the best performance on OpenCL, which may involve shifting and
460    stretching the array to better match the memory architecture.  Additional
461    points will be evaluated with *q=1e-3*.
462
463    *dtype* is the data type for the q vectors. The data type should be
464    set to match that of the kernel, which is an attribute of
465    :class:`GpuProgram`.  Note that not all kernels support double
466    precision, so even if the program was created for double precision,
467    the *GpuProgram.dtype* may be single precision.
468
469    Call :meth:`release` when complete.  Even if not called directly, the
470    buffer will be released when the data object is freed.
471    """
472    def __init__(self, q_vectors, dtype=generate.F32):
473        # type: (List[np.ndarray], np.dtype) -> None
474        # TODO: do we ever need double precision q?
475        self.nq = q_vectors[0].size
476        self.dtype = np.dtype(dtype)
477        self.is_2d = (len(q_vectors) == 2)
478        # TODO: stretch input based on get_warp()
479        # not doing it now since warp depends on kernel, which is not known
480        # at this point, so instead using 32, which is good on the set of
481        # architectures tested so far.
482        if self.is_2d:
483            width = ((self.nq+15)//16)*16
484            self.q = np.empty((width, 2), dtype=dtype)
485            self.q[:self.nq, 0] = q_vectors[0]
486            self.q[:self.nq, 1] = q_vectors[1]
487        else:
488            width = ((self.nq+31)//32)*32
489            self.q = np.empty(width, dtype=dtype)
490            self.q[:self.nq] = q_vectors[0]
491        self.global_size = [self.q.shape[0]]
492        #print("creating inputs of size", self.global_size)
493
494        # transfer input value to gpu
495        env = environment()
496        context = env.context[self.dtype]
497        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
498                             hostbuf=self.q)
499
500    def release(self):
501        # type: () -> None
502        """
503        Free the buffer associated with the q value
504        """
505        if self.q_b is not None:
506            self.q_b.release()
507            self.q_b = None
508
509    def __del__(self):
510        # type: () -> None
511        self.release()
512
513class GpuKernel(Kernel):
514    """
515    Callable SAS kernel.
516
517    *model* is the GpuModel object to call
518
519    The kernel is derived from :class:`Kernel`, providing the
520    :meth:`call_kernel` method to evaluate the kernel for a given set of
521    parameters.  Because of the need to move the q values to the GPU before
522    evaluation, the kernel is instantiated for a particular set of q vectors,
523    and can be called many times without transfering q each time.
524
525    Call :meth:`release` when done with the kernel instance.
526    """
527    #: SAS model information structure
528    info = None # type: ModelInfo
529    #: kernel precision
530    dtype = None # type: np.dtype
531    #: kernel dimensions (1d or 2d)
532    dim = "" # type: str
533    #: calculation results, updated after each call to :meth:`_call_kernel`
534    result = None # type: np.ndarray
535
536    def __init__(self, model, q_vectors):
537        # type: (GpuModel, List[np.ndarray]) -> None
538        dtype = model.dtype
539        self.q_input = GpuInput(q_vectors, dtype)
540        self._model = model
541        # F16 isn't sufficient, so don't support it
542        self._as_dtype = np.float64 if dtype == generate.F64 else np.float32
543
544        # attributes accessed from the outside
545        self.dim = '2d' if self.q_input.is_2d else '1d'
546        self.info = model.info
547        self.dtype = model.dtype
548
549        # holding place for the returned value
550        nout = 2 if self.info.have_Fq and self.dim == '1d' else 1
551        extra_q = 4  # total weight, form volume, shell volume and R_eff
552        self.result = np.empty(self.q_input.nq*nout+extra_q, dtype)
553
554        # allocate result value on gpu
555        env = environment()
556        context = env.context[self.dtype]
557        width = ((self.result.size+31)//32)*32 * self.dtype.itemsize
558        self._result_b = cl.Buffer(context, mf.READ_WRITE, width)
559
560    def _call_kernel(self, call_details, values, cutoff, magnetic, effective_radius_type):
561        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
562        env = environment()
563        queue = env.queue[self._model.dtype]
564        context = queue.context
565
566        # Arrange data transfer to/from card
567        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
568                              hostbuf=call_details.buffer)
569        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
570                             hostbuf=values)
571
572        name = 'Iq' if self.dim == '1d' else 'Imagnetic' if magnetic else 'Iqxy'
573        kernel = self._model.get_function(name)
574        kernel_args = [
575            np.uint32(self.q_input.nq), None, None,
576            details_b, values_b, self.q_input.q_b, self._result_b,
577            self._as_dtype(cutoff),
578            np.uint32(effective_radius_type),
579        ]
580        #print("Calling OpenCL")
581        #call_details.show(values)
582        #Call kernel and retrieve results
583        wait_for = None
584        last_nap = time.clock()
585        step = 1000000//self.q_input.nq + 1
586        for start in range(0, call_details.num_eval, step):
587            stop = min(start + step, call_details.num_eval)
588            #print("queuing",start,stop)
589            kernel_args[1:3] = [np.int32(start), np.int32(stop)]
590            wait_for = [kernel(queue, self.q_input.global_size, None,
591                               *kernel_args, wait_for=wait_for)]
592            if stop < call_details.num_eval:
593                # Allow other processes to run
594                wait_for[0].wait()
595                current_time = time.clock()
596                if current_time - last_nap > 0.5:
597                    time.sleep(0.001)
598                    last_nap = current_time
599        cl.enqueue_copy(queue, self.result, self._result_b, wait_for=wait_for)
600        #print("result", self.result)
601
602        # Free buffers
603        details_b.release()
604        values_b.release()
605
606    def release(self):
607        # type: () -> None
608        """
609        Release resources associated with the kernel.
610        """
611        self.q_input.release()
612        if self._result_b is not None:
613            self._result_b.release()
614            self._result_b = None
615
616    def __del__(self):
617        # type: () -> None
618        self.release()
Note: See TracBrowser for help on using the repository browser.