source: sasmodels/sasmodels/kernelcl.py @ d42dd4a

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since d42dd4a was 07646b6, checked in by Paul Kienzle <pkienzle@…>, 5 years ago

Merge branch 'cuda-test' into beta_approx

  • Property mode set to 100644
File size: 24.7 KB
Line 
1"""
2GPU driver for C kernels
3
4TODO: docs are out of date
5
6There should be a single GPU environment running on the system.  This
7environment is constructed on the first call to :func:`env`, and the
8same environment is returned on each call.
9
10After retrieving the environment, the next step is to create the kernel.
11This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
12returns the type of data used by the kernel.
13
14Next a :class:`GpuData` object should be created with the correct kind
15of data.  This data object can be used by multiple kernels, for example,
16if the target model is a weighted sum of multiple kernels.  The data
17should include any extra evaluation points required to compute the proper
18data smearing.  This need not match the square grid for 2D data if there
19is an index saying which q points are active.
20
21Together the GpuData, the program, and a device form a :class:`GpuKernel`.
22This kernel is used during fitting, receiving new sets of parameters and
23evaluating them.  The output value is stored in an output buffer on the
24devices, where it can be combined with other structure factors and form
25factors and have instrumental resolution effects applied.
26
27In order to use OpenCL for your models, you will need OpenCL drivers for
28your machine.  These should be available from your graphics card vendor.
29Intel provides OpenCL drivers for CPUs as well as their integrated HD
30graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
31this writing the performance is lacking compared to the Intel drivers.
32NVidia combines drivers for CUDA and OpenCL in one package.  The result
33is a bit messy if you have multiple drivers installed.  You can see which
34drivers are available by starting python and running:
35
36    import pyopencl as cl
37    cl.create_some_context(interactive=True)
38
39Once you have done that, it will show the available drivers which you
40can select.  It will then tell you that you can use these drivers
41automatically by setting the SAS_OPENCL environment variable, which is
42PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
43
44Some graphics cards have multiple devices on the same card.  You cannot
45yet use both of them concurrently to evaluate models, but you can run
46the program twice using a different device for each session.
47
48OpenCL kernels are compiled when needed by the device driver.  Some
49drivers produce compiler output even when there is no error.  You
50can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
51harmless, albeit annoying.
52"""
53from __future__ import print_function
54
55import os
56import warnings
57import logging
58import time
59
60import numpy as np  # type: ignore
61
62
63# Attempt to setup opencl. This may fail if the pyopencl package is not
64# installed or if it is installed but there are no devices available.
65try:
66    import pyopencl as cl  # type: ignore
67    from pyopencl import mem_flags as mf
68    from pyopencl.characterize import get_fast_inaccurate_build_options
69    # Ask OpenCL for the default context so that we know that one exists
70    cl.create_some_context(interactive=False)
71    HAVE_OPENCL = True
72    OPENCL_ERROR = ""
73except Exception as exc:
74    HAVE_OPENCL = False
75    OPENCL_ERROR = str(exc)
76
77from . import generate
78from .generate import F32, F64
79from .kernel import KernelModel, Kernel
80
81# pylint: disable=unused-import
82try:
83    from typing import Tuple, Callable, Any
84    from .modelinfo import ModelInfo
85    from .details import CallDetails
86except ImportError:
87    pass
88# pylint: enable=unused-import
89
90# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
91def quote_path(v):
92    """
93    Quote the path if it is not already quoted.
94
95    If v starts with '-', then assume that it is a -I option or similar
96    and do not quote it.  This is fragile:  -Ipath with space needs to
97    be quoted.
98    """
99    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
100
101def fix_pyopencl_include():
102    """
103    Monkey patch pyopencl to allow spaces in include file path.
104    """
105    import pyopencl as cl
106    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
107        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
108
109if HAVE_OPENCL:
110    fix_pyopencl_include()
111
112# The max loops number is limited by the amount of local memory available
113# on the device.  You don't want to make this value too big because it will
114# waste resources, nor too small because it may interfere with users trying
115# to do their polydispersity calculations.  A value of 1024 should be much
116# larger than necessary given that cost grows as npts^k where k is the number
117# of polydisperse parameters.
118MAX_LOOPS = 2048
119
120
121# Pragmas for enable OpenCL features.  Be sure to protect them so that they
122# still compile even if OpenCL is not present.
123_F16_PRAGMA = """\
124#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
125#  pragma OPENCL EXTENSION cl_khr_fp16: enable
126#endif
127"""
128
129_F64_PRAGMA = """\
130#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
131#  pragma OPENCL EXTENSION cl_khr_fp64: enable
132#endif
133"""
134
135def use_opencl():
136    sas_opencl = os.environ.get("SAS_OPENCL", "OpenCL").lower()
137    return HAVE_OPENCL and sas_opencl != "none" and not sas_opencl.startswith("cuda")
138
139ENV = None
140def reset_environment():
141    """
142    Call to create a new OpenCL context, such as after a change to SAS_OPENCL.
143    """
144    global ENV
145    ENV = GpuEnvironment() if use_opencl() else None
146
147def environment():
148    # type: () -> "GpuEnvironment"
149    """
150    Returns a singleton :class:`GpuEnvironment`.
151
152    This provides an OpenCL context and one queue per device.
153    """
154    if ENV is None:
155        if not HAVE_OPENCL:
156            raise RuntimeError("OpenCL startup failed with ***"
157                               + OPENCL_ERROR + "***; using C compiler instead")
158        reset_environment()
159        if ENV is None:
160            raise RuntimeError("SAS_OPENCL=None in environment")
161    return ENV
162
163def has_type(device, dtype):
164    # type: (cl.Device, np.dtype) -> bool
165    """
166    Return true if device supports the requested precision.
167    """
168    if dtype == F32:
169        return True
170    elif dtype == generate.F64:
171        return "cl_khr_fp64" in device.extensions
172    elif dtype == generate.F16:
173        return "cl_khr_fp16" in device.extensions
174    else:
175        return False
176
177def get_warp(kernel, queue):
178    # type: (cl.Kernel, cl.CommandQueue) -> int
179    """
180    Return the size of an execution batch for *kernel* running on *queue*.
181    """
182    return kernel.get_work_group_info(
183        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
184        queue.device)
185
186def compile_model(context, source, dtype, fast=False):
187    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
188    """
189    Build a model to run on the gpu.
190
191    Returns the compiled program and its type.
192
193    Raises an error if the desired precision is not available.
194    """
195    dtype = np.dtype(dtype)
196    if not all(has_type(d, dtype) for d in context.devices):
197        raise RuntimeError("%s not supported for devices"%dtype)
198
199    source_list = [generate.convert_type(source, dtype)]
200
201    if dtype == generate.F16:
202        source_list.insert(0, _F16_PRAGMA)
203    elif dtype == generate.F64:
204        source_list.insert(0, _F64_PRAGMA)
205
206    # Note: USE_SINCOS makes the intel cpu slower under opencl
207    if context.devices[0].type == cl.device_type.GPU:
208        source_list.insert(0, "#define USE_SINCOS\n")
209    options = (get_fast_inaccurate_build_options(context.devices[0])
210               if fast else [])
211    source = "\n".join(source_list)
212    program = cl.Program(context, source).build(options=options)
213    #print("done with "+program)
214    return program
215
216
217# for now, this returns one device in the context
218# TODO: create a context that contains all devices on all platforms
219class GpuEnvironment(object):
220    """
221    GPU context, with possibly many devices, and one queue per device.
222
223    Because the environment can be reset during a live program (e.g., if the
224    user changes the active GPU device in the GUI), everything associated
225    with the device context must be cached in the environment and recreated
226    if the environment changes.  The *cache* attribute is a simple dictionary
227    which holds keys and references to objects, such as compiled kernels and
228    allocated buffers.  The running program should check in the cache for
229    long lived objects and create them if they are not there.  The program
230    should not hold onto cached objects, but instead only keep them active
231    for the duration of a function call.  When the environment is destroyed
232    then the *release* method for each active cache item is called before
233    the environment is freed.  This means that each cl buffer should be
234    in its own cache entry.
235    """
236    def __init__(self):
237        # type: () -> None
238        # find gpu context
239        context_list = _create_some_context()
240
241        # Find a context for F32 and for F64 (maybe the same one).
242        # F16 isn't good enough.
243        self.context = {}
244        for dtype in (F32, F64):
245            for context in context_list:
246                if has_type(context.devices[0], dtype):
247                    self.context[dtype] = context
248                    break
249            else:
250                self.context[dtype] = None
251
252        # Build a queue for each context
253        self.queue = {}
254        context = self.context[F32]
255        self.queue[F32] = cl.CommandQueue(context, context.devices[0])
256        if self.context[F64] == self.context[F32]:
257            self.queue[F64] = self.queue[F32]
258        else:
259            context = self.context[F64]
260            self.queue[F64] = cl.CommandQueue(context, context.devices[0])
261
262        # Byte boundary for data alignment
263        #self.data_boundary = max(context.devices[0].min_data_type_align_size
264        #                         for context in self.context.values())
265
266        # Cache for compiled programs, and for items in context
267        self.compiled = {}
268        self.cache = {}
269
270    def has_type(self, dtype):
271        # type: (np.dtype) -> bool
272        """
273        Return True if all devices support a given type.
274        """
275        return self.context.get(dtype, None) is not None
276
277    def compile_program(self, name, source, dtype, fast, timestamp):
278        # type: (str, str, np.dtype, bool, float) -> cl.Program
279        """
280        Compile the program for the device in the given context.
281        """
282        # Note: PyOpenCL caches based on md5 hash of source, options and device
283        # so we don't really need to cache things for ourselves.  I'll do so
284        # anyway just to save some data munging time.
285        tag = generate.tag_source(source)
286        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
287        # Check timestamp on program
288        program, program_timestamp = self.compiled.get(key, (None, np.inf))
289        if program_timestamp < timestamp:
290            del self.compiled[key]
291        if key not in self.compiled:
292            context = self.context[dtype]
293            logging.info("building %s for OpenCL %s", key,
294                         context.devices[0].name.strip())
295            program = compile_model(self.context[dtype],
296                                    str(source), dtype, fast)
297            self.compiled[key] = (program, timestamp)
298        return program
299
300    def free_buffer(self, key):
301        if key in self.cache:
302            self.cache[key].release()
303            del self.cache[key]
304
305    def __del__(self):
306        for v in self.cache.values():
307            release = getattr(v, 'release', lambda: None)
308            release()
309        self.cache = {}
310
311_CURRENT_ID = 0
312def unique_id():
313    global _CURRENT_ID
314    _CURRENT_ID += 1
315    return _CURRENT_ID
316
317def _create_some_context():
318    # type: () -> cl.Context
319    """
320    Protected call to cl.create_some_context without interactivity.
321
322    Uses SAS_OPENCL or PYOPENCL_CTX if they are set in the environment,
323    otherwise scans for the most appropriate device using
324    :func:`_get_default_context`.  Ignore *SAS_OPENCL=OpenCL*, which
325    indicates that an OpenCL device should be used without specifying
326    which one (and not a CUDA device, or no GPU).
327    """
328    # Assume we do not get here if SAS_OPENCL is None or CUDA
329    sas_opencl = os.environ.get('SAS_OPENCL', 'opencl')
330    if sas_opencl.lower() != 'opencl':
331        # Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
332        os.environ["PYOPENCL_CTX"] = sas_opencl
333
334    if 'PYOPENCL_CTX' in os.environ:
335        try:
336            return [cl.create_some_context(interactive=False)]
337        except Exception as exc:
338            warnings.warn(str(exc))
339            warnings.warn("pyopencl.create_some_context() failed")
340            warnings.warn("the environment variable 'SAS_OPENCL' or 'PYOPENCL_CTX' might not be set correctly")
341
342    return _get_default_context()
343
344def _get_default_context():
345    # type: () -> List[cl.Context]
346    """
347    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
348    drivers over AMD drivers.
349    """
350    # Note: on mobile devices there is automatic clock scaling if either the
351    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
352    # it did, it would mean that putting a busy loop on the CPU while the GPU
353    # is running may increase throughput.
354    #
355    # Macbook pro, base install:
356    #     {'Apple': [Intel CPU, NVIDIA GPU]}
357    # Macbook pro, base install:
358    #     {'Apple': [Intel CPU, Intel GPU]}
359    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
360    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
361    gpu, cpu = None, None
362    for platform in cl.get_platforms():
363        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
364        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
365        # them over the integrated graphics driver that may have been supplied
366        # with the CPU chipset.
367        preferred_cpu = (platform.vendor.startswith('Intel')
368                         or platform.vendor.startswith('Apple'))
369        preferred_gpu = (platform.vendor.startswith('Advanced')
370                         or platform.vendor.startswith('NVIDIA'))
371        for device in platform.get_devices():
372            if device.type == cl.device_type.GPU:
373                # If the existing type is not GPU then it will be CUSTOM
374                # or ACCELERATOR so don't override it.
375                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
376                    gpu = device
377            elif device.type == cl.device_type.CPU:
378                if cpu is None or preferred_cpu:
379                    cpu = device
380            else:
381                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
382                # Intel Phi for example registers as an accelerator
383                # Since the user installed a custom device on their system
384                # and went through the pain of sorting out OpenCL drivers for
385                # it, lets assume they really do want to use it as their
386                # primary compute device.
387                gpu = device
388
389    # order the devices by gpu then by cpu; when searching for an available
390    # device by data type they will be checked in this order, which means
391    # that if the gpu supports double then the cpu will never be used (though
392    # we may make it possible to explicitly request the cpu at some point).
393    devices = []
394    if gpu is not None:
395        devices.append(gpu)
396    if cpu is not None:
397        devices.append(cpu)
398    return [cl.Context([d]) for d in devices]
399
400
401class GpuModel(KernelModel):
402    """
403    GPU wrapper for a single model.
404
405    *source* and *model_info* are the model source and interface as returned
406    from :func:`generate.make_source` and :func:`generate.make_model_info`.
407
408    *dtype* is the desired model precision.  Any numpy dtype for single
409    or double precision floats will do, such as 'f', 'float32' or 'single'
410    for single and 'd', 'float64' or 'double' for double.  Double precision
411    is an optional extension which may not be available on all devices.
412    Half precision ('float16','half') may be available on some devices.
413    Fast precision ('fast') is a loose version of single precision, indicating
414    that the compiler is allowed to take shortcuts.
415    """
416    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
417        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
418        self.info = model_info
419        self.source = source
420        self.dtype = dtype
421        self.fast = fast
422        self.timestamp = generate.ocl_timestamp(self.info)
423        self._cache_key = unique_id()
424
425    def __getstate__(self):
426        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
427        return self.info, self.source, self.dtype, self.fast
428
429    def __setstate__(self, state):
430        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
431        self.info, self.source, self.dtype, self.fast = state
432
433    def make_kernel(self, q_vectors):
434        # type: (List[np.ndarray]) -> "GpuKernel"
435        return GpuKernel(self, q_vectors)
436
437    @property
438    def Iq(self):
439        return self._fetch_kernel('Iq')
440
441    def fetch_kernel(self, name):
442        # type: (str) -> cl.Kernel
443        """
444        Fetch the kernel from the environment by name, compiling it if it
445        does not already exist.
446        """
447        gpu = environment()
448        key = self._cache_key
449        if key not in gpu.cache:
450            program = gpu.compile_program(
451                self.info.name,
452                self.source['opencl'],
453                self.dtype,
454                self.fast,
455                self.timestamp)
456            variants = ['Iq', 'Iqxy', 'Imagnetic']
457            names = [generate.kernel_name(self.info, k) for k in variants]
458            kernels = [getattr(program, k) for k in names]
459            data = dict((k, v) for k, v in zip(variants, kernels))
460            # keep a handle to program so GC doesn't collect
461            data['program'] = program
462            gpu.cache[key] = data
463        else:
464            data = gpu.cache[key]
465        return data[name]
466
467# TODO: check that we don't need a destructor for buffers which go out of scope
468class GpuInput(object):
469    """
470    Make q data available to the gpu.
471
472    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
473    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
474    to get the best performance on OpenCL, which may involve shifting and
475    stretching the array to better match the memory architecture.  Additional
476    points will be evaluated with *q=1e-3*.
477
478    *dtype* is the data type for the q vectors. The data type should be
479    set to match that of the kernel, which is an attribute of
480    :class:`GpuProgram`.  Note that not all kernels support double
481    precision, so even if the program was created for double precision,
482    the *GpuProgram.dtype* may be single precision.
483
484    Call :meth:`release` when complete.  Even if not called directly, the
485    buffer will be released when the data object is freed.
486    """
487    def __init__(self, q_vectors, dtype=generate.F32):
488        # type: (List[np.ndarray], np.dtype) -> None
489        # TODO: do we ever need double precision q?
490        self.nq = q_vectors[0].size
491        self.dtype = np.dtype(dtype)
492        self.is_2d = (len(q_vectors) == 2)
493        # TODO: stretch input based on get_warp()
494        # not doing it now since warp depends on kernel, which is not known
495        # at this point, so instead using 32, which is good on the set of
496        # architectures tested so far.
497        if self.is_2d:
498            width = ((self.nq+15)//16)*16
499            self.q = np.empty((width, 2), dtype=dtype)
500            self.q[:self.nq, 0] = q_vectors[0]
501            self.q[:self.nq, 1] = q_vectors[1]
502        else:
503            width = ((self.nq+31)//32)*32
504            self.q = np.empty(width, dtype=dtype)
505            self.q[:self.nq] = q_vectors[0]
506        self.global_size = [self.q.shape[0]]
507        self._cache_key = unique_id()
508
509    @property
510    def q_b(self):
511        """Lazy creation of q buffer so it can survive context reset"""
512        env = environment()
513        key = self._cache_key
514        if key not in env.cache:
515            context = env.context[self.dtype]
516            #print("creating inputs of size", self.global_size)
517            buffer = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
518                               hostbuf=self.q)
519            env.cache[key] = buffer
520        return env.cache[key]
521
522    def release(self):
523        # type: () -> None
524        """
525        Free the buffer associated with the q value
526        """
527        environment().free_buffer(id(self))
528
529    def __del__(self):
530        # type: () -> None
531        self.release()
532
533class GpuKernel(Kernel):
534    """
535    Callable SAS kernel.
536
537    *model* is the GpuModel object to call
538
539    The following attributes are defined:
540
541    *info* is the module information
542
543    *dtype* is the kernel precision
544
545    *dim* is '1d' or '2d'
546
547    *result* is a vector to contain the results of the call
548
549    The resulting call method takes the *pars*, a list of values for
550    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
551    vectors for the polydisperse parameters.  *cutoff* determines the
552    integration limits: any points with combined weight less than *cutoff*
553    will not be calculated.
554
555    Call :meth:`release` when done with the kernel instance.
556    """
557    def __init__(self, model, q_vectors):
558        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
559        dtype = model.dtype
560        self.q_input = GpuInput(q_vectors, dtype)
561        self._model = model
562        self._as_dtype = (np.float32 if dtype == generate.F32
563                          else np.float64 if dtype == generate.F64
564                          else np.float16 if dtype == generate.F16
565                          else np.float32)  # will never get here, so use np.float32
566        self._cache_key = unique_id()
567
568        # attributes accessed from the outside
569        self.dim = '2d' if self.q_input.is_2d else '1d'
570        self.info = model.info
571        self.dtype = model.dtype
572
573        # holding place for the returned value
574        nout = 2 if self.info.have_Fq and self.dim == '1d' else 1
575        extra_q = 4  # total weight, form volume, shell volume and R_eff
576        self.result = np.empty(self.q_input.nq*nout+extra_q, dtype)
577
578    @property
579    def _result_b(self):
580        """Lazy creation of result buffer so it can survive context reset"""
581        env = environment()
582        key = self._cache_key
583        if key not in env.cache:
584            context = env.context[self.dtype]
585            width = ((self.result.size+31)//32)*32 * self.dtype.itemsize
586            buffer = cl.Buffer(context, mf.READ_WRITE, width)
587            env.cache[key] = buffer
588        return env.cache[key]
589
590    def _call_kernel(self, call_details, values, cutoff, magnetic, effective_radius_type):
591        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
592        env = environment()
593        queue = env.queue[self._model.dtype]
594        context = queue.context
595
596        # Arrange data transfer to/from card
597        q_b = self.q_input.q_b
598        result_b = self._result_b
599        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
600                              hostbuf=call_details.buffer)
601        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
602                             hostbuf=values)
603
604        name = 'Iq' if self.dim == '1d' else 'Imagnetic' if magnetic else 'Iqxy'
605        kernel = self._model.fetch_kernel(name)
606        kernel_args = [
607            np.uint32(self.q_input.nq), None, None,
608            details_b, values_b, q_b, result_b,
609            self._as_dtype(cutoff),
610            np.uint32(effective_radius_type),
611        ]
612        #print("Calling OpenCL")
613        #call_details.show(values)
614        #Call kernel and retrieve results
615        wait_for = None
616        last_nap = time.clock()
617        step = 1000000//self.q_input.nq + 1
618        for start in range(0, call_details.num_eval, step):
619            stop = min(start + step, call_details.num_eval)
620            #print("queuing",start,stop)
621            kernel_args[1:3] = [np.int32(start), np.int32(stop)]
622            wait_for = [kernel(queue, self.q_input.global_size, None,
623                               *kernel_args, wait_for=wait_for)]
624            if stop < call_details.num_eval:
625                # Allow other processes to run
626                wait_for[0].wait()
627                current_time = time.clock()
628                if current_time - last_nap > 0.5:
629                    time.sleep(0.001)
630                    last_nap = current_time
631        cl.enqueue_copy(queue, self.result, result_b, wait_for=wait_for)
632        #print("result", self.result)
633
634        # Free buffers
635        for v in (details_b, values_b):
636            if v is not None:
637                v.release()
638
639    def release(self):
640        # type: () -> None
641        """
642        Release resources associated with the kernel.
643        """
644        environment().free_buffer(id(self))
645        self.q_input.release()
646
647    def __del__(self):
648        # type: () -> None
649        self.release()
Note: See TracBrowser for help on using the repository browser.