source: sasmodels/sasmodels/kernelcl.py @ 3221de0

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 3221de0 was 3221de0, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

restructure handling of opencl flags so it works with sasview

  • Property mode set to 100644
File size: 23.8 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
50"""
51from __future__ import print_function
52
53import os
54import warnings
55import logging
56import time
57
58import numpy as np  # type: ignore
59
60
61# Attempt to setup opencl. This may fail if the opencl package is not
62# installed or if it is installed but there are no devices available.
63try:
64    import pyopencl as cl  # type: ignore
65    from pyopencl import mem_flags as mf
66    from pyopencl.characterize import get_fast_inaccurate_build_options
67    # Ask OpenCL for the default context so that we know that one exists
68    cl.create_some_context(interactive=False)
69    HAVE_OPENCL = True
70    OPENCL_ERROR = ""
71except Exception as exc:
72    HAVE_OPENCL = False
73    OPENCL_ERROR = str(exc)
74
75from . import generate
76from .kernel import KernelModel, Kernel
77
78# pylint: disable=unused-import
79try:
80    from typing import Tuple, Callable, Any
81    from .modelinfo import ModelInfo
82    from .details import CallDetails
83except ImportError:
84    pass
85# pylint: enable=unused-import
86
87# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
88def quote_path(v):
89    """
90    Quote the path if it is not already quoted.
91
92    If v starts with '-', then assume that it is a -I option or similar
93    and do not quote it.  This is fragile:  -Ipath with space needs to
94    be quoted.
95    """
96    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
97
98def fix_pyopencl_include():
99    """
100    Monkey patch pyopencl to allow spaces in include file path.
101    """
102    import pyopencl as cl
103    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
104        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
105
106if HAVE_OPENCL:
107    fix_pyopencl_include()
108
109# The max loops number is limited by the amount of local memory available
110# on the device.  You don't want to make this value too big because it will
111# waste resources, nor too small because it may interfere with users trying
112# to do their polydispersity calculations.  A value of 1024 should be much
113# larger than necessary given that cost grows as npts^k where k is the number
114# of polydisperse parameters.
115MAX_LOOPS = 2048
116
117
118# Pragmas for enable OpenCL features.  Be sure to protect them so that they
119# still compile even if OpenCL is not present.
120_F16_PRAGMA = """\
121#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
122#  pragma OPENCL EXTENSION cl_khr_fp16: enable
123#endif
124"""
125
126_F64_PRAGMA = """\
127#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
128#  pragma OPENCL EXTENSION cl_khr_fp64: enable
129#endif
130"""
131
132def use_opencl():
133    return HAVE_OPENCL and os.environ.get("SAS_OPENCL", "").lower() != "none"
134
135ENV = None
136def reset_environment():
137    """
138    Call to create a new OpenCL context, such as after a change to SAS_OPENCL.
139    """
140    global ENV
141    ENV = GpuEnvironment() if use_opencl() else None
142
143def environment():
144    # type: () -> "GpuEnvironment"
145    """
146    Returns a singleton :class:`GpuEnvironment`.
147
148    This provides an OpenCL context and one queue per device.
149    """
150    if not HAVE_OPENCL:
151        warnings.warn("OpenCL startup failed with ***"
152                      + OPENCL_ERROR + "***; using C compiler instead")
153    elif ENV is None:
154        reset_environment()
155    return ENV
156
157def has_type(device, dtype):
158    # type: (cl.Device, np.dtype) -> bool
159    """
160    Return true if device supports the requested precision.
161    """
162    if dtype == generate.F32:
163        return True
164    elif dtype == generate.F64:
165        return "cl_khr_fp64" in device.extensions
166    elif dtype == generate.F16:
167        return "cl_khr_fp16" in device.extensions
168    else:
169        return False
170
171def get_warp(kernel, queue):
172    # type: (cl.Kernel, cl.CommandQueue) -> int
173    """
174    Return the size of an execution batch for *kernel* running on *queue*.
175    """
176    return kernel.get_work_group_info(
177        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
178        queue.device)
179
180def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
181    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
182    """
183    Stretch an input vector to the correct boundary.
184
185    Performance on the kernels can drop by a factor of two or more if the
186    number of values to compute does not fall on a nice power of two
187    boundary.   The trailing additional vector elements are given a
188    value of *extra*, and so f(*extra*) will be computed for each of
189    them.  The returned array will thus be a subset of the computed array.
190
191    *boundary* should be a power of 2 which is at least 32 for good
192    performance on current platforms (as of Jan 2015).  It should
193    probably be the max of get_warp(kernel,queue) and
194    device.min_data_type_align_size//4.
195    """
196    remainder = vector.size % boundary
197    if remainder != 0:
198        size = vector.size + (boundary - remainder)
199        vector = np.hstack((vector, [extra] * (size - vector.size)))
200    return np.ascontiguousarray(vector, dtype=dtype)
201
202
203def compile_model(context, source, dtype, fast=False):
204    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
205    """
206    Build a model to run on the gpu.
207
208    Returns the compiled program and its type.  The returned type will
209    be float32 even if the desired type is float64 if any of the
210    devices in the context do not support the cl_khr_fp64 extension.
211    """
212    dtype = np.dtype(dtype)
213    if not all(has_type(d, dtype) for d in context.devices):
214        raise RuntimeError("%s not supported for devices"%dtype)
215
216    source_list = [generate.convert_type(source, dtype)]
217
218    if dtype == generate.F16:
219        source_list.insert(0, _F16_PRAGMA)
220    elif dtype == generate.F64:
221        source_list.insert(0, _F64_PRAGMA)
222
223    # Note: USE_SINCOS makes the intel cpu slower under opencl
224    if context.devices[0].type == cl.device_type.GPU:
225        source_list.insert(0, "#define USE_SINCOS\n")
226    options = (get_fast_inaccurate_build_options(context.devices[0])
227               if fast else [])
228    source = "\n".join(source_list)
229    program = cl.Program(context, source).build(options=options)
230    #print("done with "+program)
231    return program
232
233
234# for now, this returns one device in the context
235# TODO: create a context that contains all devices on all platforms
236class GpuEnvironment(object):
237    """
238    GPU context, with possibly many devices, and one queue per device.
239    """
240    def __init__(self):
241        # type: () -> None
242        # find gpu context
243        #self.context = cl.create_some_context()
244
245        self.context = None
246        if 'SAS_OPENCL' in os.environ:
247            #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
248            os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
249        if 'PYOPENCL_CTX' in os.environ:
250            self._create_some_context()
251
252        if not self.context:
253            self.context = _get_default_context()
254
255        # Byte boundary for data alignment
256        #self.data_boundary = max(d.min_data_type_align_size
257        #                         for d in self.context.devices)
258        self.queues = [cl.CommandQueue(context, context.devices[0])
259                       for context in self.context]
260        self.compiled = {}
261
262    def has_type(self, dtype):
263        # type: (np.dtype) -> bool
264        """
265        Return True if all devices support a given type.
266        """
267        return any(has_type(d, dtype)
268                   for context in self.context
269                   for d in context.devices)
270
271    def get_queue(self, dtype):
272        # type: (np.dtype) -> cl.CommandQueue
273        """
274        Return a command queue for the kernels of type dtype.
275        """
276        for context, queue in zip(self.context, self.queues):
277            if all(has_type(d, dtype) for d in context.devices):
278                return queue
279
280    def get_context(self, dtype):
281        # type: (np.dtype) -> cl.Context
282        """
283        Return a OpenCL context for the kernels of type dtype.
284        """
285        for context in self.context:
286            if all(has_type(d, dtype) for d in context.devices):
287                return context
288
289    def _create_some_context(self):
290        # type: () -> cl.Context
291        """
292        Protected call to cl.create_some_context without interactivity.  Use
293        this if SAS_OPENCL is set in the environment.  Sets the *context*
294        attribute.
295        """
296        try:
297            self.context = [cl.create_some_context(interactive=False)]
298        except Exception as exc:
299            warnings.warn(str(exc))
300            warnings.warn("pyopencl.create_some_context() failed")
301            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
302
303    def compile_program(self, name, source, dtype, fast, timestamp):
304        # type: (str, str, np.dtype, bool, float) -> cl.Program
305        """
306        Compile the program for the device in the given context.
307        """
308        # Note: PyOpenCL caches based on md5 hash of source, options and device
309        # so we don't really need to cache things for ourselves.  I'll do so
310        # anyway just to save some data munging time.
311        tag = generate.tag_source(source)
312        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
313        # Check timestamp on program
314        program, program_timestamp = self.compiled.get(key, (None, np.inf))
315        if program_timestamp < timestamp:
316            del self.compiled[key]
317        if key not in self.compiled:
318            context = self.get_context(dtype)
319            logging.info("building %s for OpenCL %s", key,
320                         context.devices[0].name.strip())
321            program = compile_model(self.get_context(dtype),
322                                    str(source), dtype, fast)
323            self.compiled[key] = (program, timestamp)
324        return program
325
326def _get_default_context():
327    # type: () -> List[cl.Context]
328    """
329    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
330    drivers over AMD drivers.
331    """
332    # Note: on mobile devices there is automatic clock scaling if either the
333    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
334    # it did, it would mean that putting a busy loop on the CPU while the GPU
335    # is running may increase throughput.
336    #
337    # Macbook pro, base install:
338    #     {'Apple': [Intel CPU, NVIDIA GPU]}
339    # Macbook pro, base install:
340    #     {'Apple': [Intel CPU, Intel GPU]}
341    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
342    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
343    gpu, cpu = None, None
344    for platform in cl.get_platforms():
345        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
346        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
347        # them over the integrated graphics driver that may have been supplied
348        # with the CPU chipset.
349        preferred_cpu = (platform.vendor.startswith('Intel')
350                         or platform.vendor.startswith('Apple'))
351        preferred_gpu = (platform.vendor.startswith('Advanced')
352                         or platform.vendor.startswith('NVIDIA'))
353        for device in platform.get_devices():
354            if device.type == cl.device_type.GPU:
355                # If the existing type is not GPU then it will be CUSTOM
356                # or ACCELERATOR so don't override it.
357                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
358                    gpu = device
359            elif device.type == cl.device_type.CPU:
360                if cpu is None or preferred_cpu:
361                    cpu = device
362            else:
363                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
364                # Intel Phi for example registers as an accelerator
365                # Since the user installed a custom device on their system
366                # and went through the pain of sorting out OpenCL drivers for
367                # it, lets assume they really do want to use it as their
368                # primary compute device.
369                gpu = device
370
371    # order the devices by gpu then by cpu; when searching for an available
372    # device by data type they will be checked in this order, which means
373    # that if the gpu supports double then the cpu will never be used (though
374    # we may make it possible to explicitly request the cpu at some point).
375    devices = []
376    if gpu is not None:
377        devices.append(gpu)
378    if cpu is not None:
379        devices.append(cpu)
380    return [cl.Context([d]) for d in devices]
381
382
383class GpuModel(KernelModel):
384    """
385    GPU wrapper for a single model.
386
387    *source* and *model_info* are the model source and interface as returned
388    from :func:`generate.make_source` and :func:`generate.make_model_info`.
389
390    *dtype* is the desired model precision.  Any numpy dtype for single
391    or double precision floats will do, such as 'f', 'float32' or 'single'
392    for single and 'd', 'float64' or 'double' for double.  Double precision
393    is an optional extension which may not be available on all devices.
394    Half precision ('float16','half') may be available on some devices.
395    Fast precision ('fast') is a loose version of single precision, indicating
396    that the compiler is allowed to take shortcuts.
397    """
398    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
399        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
400        self.info = model_info
401        self.source = source
402        self.dtype = dtype
403        self.fast = fast
404        self.program = None # delay program creation
405        self._kernels = None
406
407    def __getstate__(self):
408        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
409        return self.info, self.source, self.dtype, self.fast
410
411    def __setstate__(self, state):
412        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
413        self.info, self.source, self.dtype, self.fast = state
414        self.program = None
415
416    def make_kernel(self, q_vectors):
417        # type: (List[np.ndarray]) -> "GpuKernel"
418        if self.program is None:
419            compile_program = environment().compile_program
420            timestamp = generate.ocl_timestamp(self.info)
421            self.program = compile_program(
422                self.info.name,
423                self.source['opencl'],
424                self.dtype,
425                self.fast,
426                timestamp)
427            variants = ['Iq', 'Iqxy', 'Imagnetic']
428            names = [generate.kernel_name(self.info, k) for k in variants]
429            kernels = [getattr(self.program, k) for k in names]
430            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
431        is_2d = len(q_vectors) == 2
432        if is_2d:
433            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
434        else:
435            kernel = [self._kernels['Iq']]*2
436        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
437
438    def release(self):
439        # type: () -> None
440        """
441        Free the resources associated with the model.
442        """
443        if self.program is not None:
444            self.program = None
445
446    def __del__(self):
447        # type: () -> None
448        self.release()
449
450# TODO: check that we don't need a destructor for buffers which go out of scope
451class GpuInput(object):
452    """
453    Make q data available to the gpu.
454
455    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
456    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
457    to get the best performance on OpenCL, which may involve shifting and
458    stretching the array to better match the memory architecture.  Additional
459    points will be evaluated with *q=1e-3*.
460
461    *dtype* is the data type for the q vectors. The data type should be
462    set to match that of the kernel, which is an attribute of
463    :class:`GpuProgram`.  Note that not all kernels support double
464    precision, so even if the program was created for double precision,
465    the *GpuProgram.dtype* may be single precision.
466
467    Call :meth:`release` when complete.  Even if not called directly, the
468    buffer will be released when the data object is freed.
469    """
470    def __init__(self, q_vectors, dtype=generate.F32):
471        # type: (List[np.ndarray], np.dtype) -> None
472        # TODO: do we ever need double precision q?
473        env = environment()
474        self.nq = q_vectors[0].size
475        self.dtype = np.dtype(dtype)
476        self.is_2d = (len(q_vectors) == 2)
477        # TODO: stretch input based on get_warp()
478        # not doing it now since warp depends on kernel, which is not known
479        # at this point, so instead using 32, which is good on the set of
480        # architectures tested so far.
481        if self.is_2d:
482            # Note: 16 rather than 15 because result is 1 longer than input.
483            width = ((self.nq+16)//16)*16
484            self.q = np.empty((width, 2), dtype=dtype)
485            self.q[:self.nq, 0] = q_vectors[0]
486            self.q[:self.nq, 1] = q_vectors[1]
487        else:
488            # Note: 32 rather than 31 because result is 1 longer than input.
489            width = ((self.nq+32)//32)*32
490            self.q = np.empty(width, dtype=dtype)
491            self.q[:self.nq] = q_vectors[0]
492        self.global_size = [self.q.shape[0]]
493        context = env.get_context(self.dtype)
494        #print("creating inputs of size", self.global_size)
495        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
496                             hostbuf=self.q)
497
498    def release(self):
499        # type: () -> None
500        """
501        Free the memory.
502        """
503        if self.q_b is not None:
504            self.q_b.release()
505            self.q_b = None
506
507    def __del__(self):
508        # type: () -> None
509        self.release()
510
511class GpuKernel(Kernel):
512    """
513    Callable SAS kernel.
514
515    *kernel* is the GpuKernel object to call
516
517    *model_info* is the module information
518
519    *q_vectors* is the q vectors at which the kernel should be evaluated
520
521    *dtype* is the kernel precision
522
523    The resulting call method takes the *pars*, a list of values for
524    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
525    vectors for the polydisperse parameters.  *cutoff* determines the
526    integration limits: any points with combined weight less than *cutoff*
527    will not be calculated.
528
529    Call :meth:`release` when done with the kernel instance.
530    """
531    def __init__(self, kernel, dtype, model_info, q_vectors):
532        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
533        q_input = GpuInput(q_vectors, dtype)
534        self.kernel = kernel
535        self.info = model_info
536        self.dtype = dtype
537        self.dim = '2d' if q_input.is_2d else '1d'
538        # plus three for the normalization values
539        self.result = np.empty(q_input.nq+1, dtype)
540
541        # Inputs and outputs for each kernel call
542        # Note: res may be shorter than res_b if global_size != nq
543        env = environment()
544        self.queue = env.get_queue(dtype)
545
546        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
547                                  q_input.global_size[0] * dtype.itemsize)
548        self.q_input = q_input # allocated by GpuInput above
549
550        self._need_release = [self.result_b, self.q_input]
551        self.real = (np.float32 if dtype == generate.F32
552                     else np.float64 if dtype == generate.F64
553                     else np.float16 if dtype == generate.F16
554                     else np.float32)  # will never get here, so use np.float32
555
556    def __call__(self, call_details, values, cutoff, magnetic):
557        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
558        context = self.queue.context
559        # Arrange data transfer to card
560        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
561                              hostbuf=call_details.buffer)
562        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
563                             hostbuf=values)
564
565        kernel = self.kernel[1 if magnetic else 0]
566        args = [
567            np.uint32(self.q_input.nq), None, None,
568            details_b, values_b, self.q_input.q_b, self.result_b,
569            self.real(cutoff),
570        ]
571        #print("Calling OpenCL")
572        #call_details.show(values)
573        # Call kernel and retrieve results
574        wait_for = None
575        last_nap = time.clock()
576        step = 1000000//self.q_input.nq + 1
577        for start in range(0, call_details.num_eval, step):
578            stop = min(start + step, call_details.num_eval)
579            #print("queuing",start,stop)
580            args[1:3] = [np.int32(start), np.int32(stop)]
581            wait_for = [kernel(self.queue, self.q_input.global_size, None,
582                               *args, wait_for=wait_for)]
583            if stop < call_details.num_eval:
584                # Allow other processes to run
585                wait_for[0].wait()
586                current_time = time.clock()
587                if current_time - last_nap > 0.5:
588                    time.sleep(0.05)
589                    last_nap = current_time
590        cl.enqueue_copy(self.queue, self.result, self.result_b)
591        #print("result", self.result)
592
593        # Free buffers
594        for v in (details_b, values_b):
595            if v is not None:
596                v.release()
597
598        pd_norm = self.result[self.q_input.nq]
599        scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0)
600        background = values[1]
601        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
602        return scale*self.result[:self.q_input.nq] + background
603        # return self.result[:self.q_input.nq]
604
605    def release(self):
606        # type: () -> None
607        """
608        Release resources associated with the kernel.
609        """
610        for v in self._need_release:
611            v.release()
612        self._need_release = []
613
614    def __del__(self):
615        # type: () -> None
616        self.release()
Note: See TracBrowser for help on using the repository browser.