source: sasmodels/sasmodels/kernelcl.py @ e59f60a

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since e59f60a was e59f60a, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

py.test needs kernelcl import to succeed even if pyopencl is not available

  • Property mode set to 100644
File size: 23.4 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
50"""
51from __future__ import print_function
52
53import os
54import warnings
55import logging
56import time
57
58import numpy as np  # type: ignore
59
60try:
61    if os.environ.get("SAS_OPENCL", "").lower() == "none":
62        HAVE_OPENCL = False
63    else:
64        import pyopencl as cl  # type: ignore
65        from pyopencl import mem_flags as mf
66        from pyopencl.characterize import get_fast_inaccurate_build_options
67        # Ask OpenCL for the default context so that we know that one exists
68        cl.create_some_context(interactive=False)
69        HAVE_OPENCL = True
70except Exception as exc:
71    warnings.warn("OpenCL startup failed with ***"
72                  + str(exc) + "***; using C compiler instead")
73    HAVE_OPENCL = False
74
75from . import generate
76from .kernel import KernelModel, Kernel
77
78# pylint: disable=unused-import
79try:
80    from typing import Tuple, Callable, Any
81    from .modelinfo import ModelInfo
82    from .details import CallDetails
83except ImportError:
84    pass
85# pylint: enable=unused-import
86
87# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
88def quote_path(v):
89    """
90    Quote the path if it is not already quoted.
91
92    If v starts with '-', then assume that it is a -I option or similar
93    and do not quote it.  This is fragile:  -Ipath with space needs to
94    be quoted.
95    """
96    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
97
98def fix_pyopencl_include():
99    """
100    Monkey patch pyopencl to allow spaces in include file path.
101    """
102    import pyopencl as cl
103    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
104        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
105
106if HAVE_OPENCL:
107    fix_pyopencl_include()
108
109# The max loops number is limited by the amount of local memory available
110# on the device.  You don't want to make this value too big because it will
111# waste resources, nor too small because it may interfere with users trying
112# to do their polydispersity calculations.  A value of 1024 should be much
113# larger than necessary given that cost grows as npts^k where k is the number
114# of polydisperse parameters.
115MAX_LOOPS = 2048
116
117
118# Pragmas for enable OpenCL features.  Be sure to protect them so that they
119# still compile even if OpenCL is not present.
120_F16_PRAGMA = """\
121#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
122#  pragma OPENCL EXTENSION cl_khr_fp16: enable
123#endif
124"""
125
126_F64_PRAGMA = """\
127#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
128#  pragma OPENCL EXTENSION cl_khr_fp64: enable
129#endif
130"""
131
132
133ENV = None
134def environment():
135    # type: () -> "GpuEnvironment"
136    """
137    Returns a singleton :class:`GpuEnvironment`.
138
139    This provides an OpenCL context and one queue per device.
140    """
141    global ENV
142    if ENV is None:
143        ENV = GpuEnvironment()
144    return ENV
145
146def has_type(device, dtype):
147    # type: (cl.Device, np.dtype) -> bool
148    """
149    Return true if device supports the requested precision.
150    """
151    if dtype == generate.F32:
152        return True
153    elif dtype == generate.F64:
154        return "cl_khr_fp64" in device.extensions
155    elif dtype == generate.F16:
156        return "cl_khr_fp16" in device.extensions
157    else:
158        return False
159
160def get_warp(kernel, queue):
161    # type: (cl.Kernel, cl.CommandQueue) -> int
162    """
163    Return the size of an execution batch for *kernel* running on *queue*.
164    """
165    return kernel.get_work_group_info(
166        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
167        queue.device)
168
169def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
170    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
171    """
172    Stretch an input vector to the correct boundary.
173
174    Performance on the kernels can drop by a factor of two or more if the
175    number of values to compute does not fall on a nice power of two
176    boundary.   The trailing additional vector elements are given a
177    value of *extra*, and so f(*extra*) will be computed for each of
178    them.  The returned array will thus be a subset of the computed array.
179
180    *boundary* should be a power of 2 which is at least 32 for good
181    performance on current platforms (as of Jan 2015).  It should
182    probably be the max of get_warp(kernel,queue) and
183    device.min_data_type_align_size//4.
184    """
185    remainder = vector.size % boundary
186    if remainder != 0:
187        size = vector.size + (boundary - remainder)
188        vector = np.hstack((vector, [extra] * (size - vector.size)))
189    return np.ascontiguousarray(vector, dtype=dtype)
190
191
192def compile_model(context, source, dtype, fast=False):
193    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
194    """
195    Build a model to run on the gpu.
196
197    Returns the compiled program and its type.  The returned type will
198    be float32 even if the desired type is float64 if any of the
199    devices in the context do not support the cl_khr_fp64 extension.
200    """
201    dtype = np.dtype(dtype)
202    if not all(has_type(d, dtype) for d in context.devices):
203        raise RuntimeError("%s not supported for devices"%dtype)
204
205    source_list = [generate.convert_type(source, dtype)]
206
207    if dtype == generate.F16:
208        source_list.insert(0, _F16_PRAGMA)
209    elif dtype == generate.F64:
210        source_list.insert(0, _F64_PRAGMA)
211
212    # Note: USE_SINCOS makes the intel cpu slower under opencl
213    if context.devices[0].type == cl.device_type.GPU:
214        source_list.insert(0, "#define USE_SINCOS\n")
215    options = (get_fast_inaccurate_build_options(context.devices[0])
216               if fast else [])
217    source = "\n".join(source_list)
218    program = cl.Program(context, source).build(options=options)
219    #print("done with "+program)
220    return program
221
222
223# for now, this returns one device in the context
224# TODO: create a context that contains all devices on all platforms
225class GpuEnvironment(object):
226    """
227    GPU context, with possibly many devices, and one queue per device.
228    """
229    def __init__(self):
230        # type: () -> None
231        # find gpu context
232        #self.context = cl.create_some_context()
233
234        self.context = None
235        if 'SAS_OPENCL' in os.environ:
236            #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
237            os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
238        if 'PYOPENCL_CTX' in os.environ:
239            self._create_some_context()
240
241        if not self.context:
242            self.context = _get_default_context()
243
244        # Byte boundary for data alignment
245        #self.data_boundary = max(d.min_data_type_align_size
246        #                         for d in self.context.devices)
247        self.queues = [cl.CommandQueue(context, context.devices[0])
248                       for context in self.context]
249        self.compiled = {}
250
251    def has_type(self, dtype):
252        # type: (np.dtype) -> bool
253        """
254        Return True if all devices support a given type.
255        """
256        return any(has_type(d, dtype)
257                   for context in self.context
258                   for d in context.devices)
259
260    def get_queue(self, dtype):
261        # type: (np.dtype) -> cl.CommandQueue
262        """
263        Return a command queue for the kernels of type dtype.
264        """
265        for context, queue in zip(self.context, self.queues):
266            if all(has_type(d, dtype) for d in context.devices):
267                return queue
268
269    def get_context(self, dtype):
270        # type: (np.dtype) -> cl.Context
271        """
272        Return a OpenCL context for the kernels of type dtype.
273        """
274        for context in self.context:
275            if all(has_type(d, dtype) for d in context.devices):
276                return context
277
278    def _create_some_context(self):
279        # type: () -> cl.Context
280        """
281        Protected call to cl.create_some_context without interactivity.  Use
282        this if SAS_OPENCL is set in the environment.  Sets the *context*
283        attribute.
284        """
285        try:
286            self.context = [cl.create_some_context(interactive=False)]
287        except Exception as exc:
288            warnings.warn(str(exc))
289            warnings.warn("pyopencl.create_some_context() failed")
290            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
291
292    def compile_program(self, name, source, dtype, fast, timestamp):
293        # type: (str, str, np.dtype, bool, float) -> cl.Program
294        """
295        Compile the program for the device in the given context.
296        """
297        # Note: PyOpenCL caches based on md5 hash of source, options and device
298        # so we don't really need to cache things for ourselves.  I'll do so
299        # anyway just to save some data munging time.
300        tag = generate.tag_source(source)
301        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
302        # Check timestamp on program
303        program, program_timestamp = self.compiled.get(key, (None, np.inf))
304        if program_timestamp < timestamp:
305            del self.compiled[key]
306        if key not in self.compiled:
307            context = self.get_context(dtype)
308            logging.info("building %s for OpenCL %s", key,
309                         context.devices[0].name.strip())
310            program = compile_model(self.get_context(dtype),
311                                    str(source), dtype, fast)
312            self.compiled[key] = (program, timestamp)
313        return program
314
315def _get_default_context():
316    # type: () -> List[cl.Context]
317    """
318    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
319    drivers over AMD drivers.
320    """
321    # Note: on mobile devices there is automatic clock scaling if either the
322    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
323    # it did, it would mean that putting a busy loop on the CPU while the GPU
324    # is running may increase throughput.
325    #
326    # Macbook pro, base install:
327    #     {'Apple': [Intel CPU, NVIDIA GPU]}
328    # Macbook pro, base install:
329    #     {'Apple': [Intel CPU, Intel GPU]}
330    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
331    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
332    gpu, cpu = None, None
333    for platform in cl.get_platforms():
334        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
335        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
336        # them over the integrated graphics driver that may have been supplied
337        # with the CPU chipset.
338        preferred_cpu = (platform.vendor.startswith('Intel')
339                         or platform.vendor.startswith('Apple'))
340        preferred_gpu = (platform.vendor.startswith('Advanced')
341                         or platform.vendor.startswith('NVIDIA'))
342        for device in platform.get_devices():
343            if device.type == cl.device_type.GPU:
344                # If the existing type is not GPU then it will be CUSTOM
345                # or ACCELERATOR so don't override it.
346                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
347                    gpu = device
348            elif device.type == cl.device_type.CPU:
349                if cpu is None or preferred_cpu:
350                    cpu = device
351            else:
352                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
353                # Intel Phi for example registers as an accelerator
354                # Since the user installed a custom device on their system
355                # and went through the pain of sorting out OpenCL drivers for
356                # it, lets assume they really do want to use it as their
357                # primary compute device.
358                gpu = device
359
360    # order the devices by gpu then by cpu; when searching for an available
361    # device by data type they will be checked in this order, which means
362    # that if the gpu supports double then the cpu will never be used (though
363    # we may make it possible to explicitly request the cpu at some point).
364    devices = []
365    if gpu is not None:
366        devices.append(gpu)
367    if cpu is not None:
368        devices.append(cpu)
369    return [cl.Context([d]) for d in devices]
370
371
372class GpuModel(KernelModel):
373    """
374    GPU wrapper for a single model.
375
376    *source* and *model_info* are the model source and interface as returned
377    from :func:`generate.make_source` and :func:`generate.make_model_info`.
378
379    *dtype* is the desired model precision.  Any numpy dtype for single
380    or double precision floats will do, such as 'f', 'float32' or 'single'
381    for single and 'd', 'float64' or 'double' for double.  Double precision
382    is an optional extension which may not be available on all devices.
383    Half precision ('float16','half') may be available on some devices.
384    Fast precision ('fast') is a loose version of single precision, indicating
385    that the compiler is allowed to take shortcuts.
386    """
387    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
388        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
389        self.info = model_info
390        self.source = source
391        self.dtype = dtype
392        self.fast = fast
393        self.program = None # delay program creation
394        self._kernels = None
395
396    def __getstate__(self):
397        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
398        return self.info, self.source, self.dtype, self.fast
399
400    def __setstate__(self, state):
401        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
402        self.info, self.source, self.dtype, self.fast = state
403        self.program = None
404
405    def make_kernel(self, q_vectors):
406        # type: (List[np.ndarray]) -> "GpuKernel"
407        if self.program is None:
408            compile_program = environment().compile_program
409            timestamp = generate.ocl_timestamp(self.info)
410            self.program = compile_program(
411                self.info.name,
412                self.source['opencl'],
413                self.dtype,
414                self.fast,
415                timestamp)
416            variants = ['Iq', 'Iqxy', 'Imagnetic']
417            names = [generate.kernel_name(self.info, k) for k in variants]
418            kernels = [getattr(self.program, k) for k in names]
419            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
420        is_2d = len(q_vectors) == 2
421        if is_2d:
422            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
423        else:
424            kernel = [self._kernels['Iq']]*2
425        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
426
427    def release(self):
428        # type: () -> None
429        """
430        Free the resources associated with the model.
431        """
432        if self.program is not None:
433            self.program = None
434
435    def __del__(self):
436        # type: () -> None
437        self.release()
438
439# TODO: check that we don't need a destructor for buffers which go out of scope
440class GpuInput(object):
441    """
442    Make q data available to the gpu.
443
444    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
445    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
446    to get the best performance on OpenCL, which may involve shifting and
447    stretching the array to better match the memory architecture.  Additional
448    points will be evaluated with *q=1e-3*.
449
450    *dtype* is the data type for the q vectors. The data type should be
451    set to match that of the kernel, which is an attribute of
452    :class:`GpuProgram`.  Note that not all kernels support double
453    precision, so even if the program was created for double precision,
454    the *GpuProgram.dtype* may be single precision.
455
456    Call :meth:`release` when complete.  Even if not called directly, the
457    buffer will be released when the data object is freed.
458    """
459    def __init__(self, q_vectors, dtype=generate.F32):
460        # type: (List[np.ndarray], np.dtype) -> None
461        # TODO: do we ever need double precision q?
462        env = environment()
463        self.nq = q_vectors[0].size
464        self.dtype = np.dtype(dtype)
465        self.is_2d = (len(q_vectors) == 2)
466        # TODO: stretch input based on get_warp()
467        # not doing it now since warp depends on kernel, which is not known
468        # at this point, so instead using 32, which is good on the set of
469        # architectures tested so far.
470        if self.is_2d:
471            # Note: 16 rather than 15 because result is 1 longer than input.
472            width = ((self.nq+16)//16)*16
473            self.q = np.empty((width, 2), dtype=dtype)
474            self.q[:self.nq, 0] = q_vectors[0]
475            self.q[:self.nq, 1] = q_vectors[1]
476        else:
477            # Note: 32 rather than 31 because result is 1 longer than input.
478            width = ((self.nq+32)//32)*32
479            self.q = np.empty(width, dtype=dtype)
480            self.q[:self.nq] = q_vectors[0]
481        self.global_size = [self.q.shape[0]]
482        context = env.get_context(self.dtype)
483        #print("creating inputs of size", self.global_size)
484        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
485                             hostbuf=self.q)
486
487    def release(self):
488        # type: () -> None
489        """
490        Free the memory.
491        """
492        if self.q_b is not None:
493            self.q_b.release()
494            self.q_b = None
495
496    def __del__(self):
497        # type: () -> None
498        self.release()
499
500class GpuKernel(Kernel):
501    """
502    Callable SAS kernel.
503
504    *kernel* is the GpuKernel object to call
505
506    *model_info* is the module information
507
508    *q_vectors* is the q vectors at which the kernel should be evaluated
509
510    *dtype* is the kernel precision
511
512    The resulting call method takes the *pars*, a list of values for
513    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
514    vectors for the polydisperse parameters.  *cutoff* determines the
515    integration limits: any points with combined weight less than *cutoff*
516    will not be calculated.
517
518    Call :meth:`release` when done with the kernel instance.
519    """
520    def __init__(self, kernel, dtype, model_info, q_vectors):
521        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
522        q_input = GpuInput(q_vectors, dtype)
523        self.kernel = kernel
524        self.info = model_info
525        self.dtype = dtype
526        self.dim = '2d' if q_input.is_2d else '1d'
527        # plus three for the normalization values
528        self.result = np.empty(q_input.nq+1, dtype)
529
530        # Inputs and outputs for each kernel call
531        # Note: res may be shorter than res_b if global_size != nq
532        env = environment()
533        self.queue = env.get_queue(dtype)
534
535        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
536                                  q_input.global_size[0] * dtype.itemsize)
537        self.q_input = q_input # allocated by GpuInput above
538
539        self._need_release = [self.result_b, self.q_input]
540        self.real = (np.float32 if dtype == generate.F32
541                     else np.float64 if dtype == generate.F64
542                     else np.float16 if dtype == generate.F16
543                     else np.float32)  # will never get here, so use np.float32
544
545    def __call__(self, call_details, values, cutoff, magnetic):
546        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
547        context = self.queue.context
548        # Arrange data transfer to card
549        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
550                              hostbuf=call_details.buffer)
551        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
552                             hostbuf=values)
553
554        kernel = self.kernel[1 if magnetic else 0]
555        args = [
556            np.uint32(self.q_input.nq), None, None,
557            details_b, values_b, self.q_input.q_b, self.result_b,
558            self.real(cutoff),
559        ]
560        #print("Calling OpenCL")
561        #call_details.show(values)
562        # Call kernel and retrieve results
563        wait_for = None
564        last_nap = time.clock()
565        step = 1000000//self.q_input.nq + 1
566        for start in range(0, call_details.num_eval, step):
567            stop = min(start + step, call_details.num_eval)
568            #print("queuing",start,stop)
569            args[1:3] = [np.int32(start), np.int32(stop)]
570            wait_for = [kernel(self.queue, self.q_input.global_size, None,
571                               *args, wait_for=wait_for)]
572            if stop < call_details.num_eval:
573                # Allow other processes to run
574                wait_for[0].wait()
575                current_time = time.clock()
576                if current_time - last_nap > 0.5:
577                    time.sleep(0.05)
578                    last_nap = current_time
579        cl.enqueue_copy(self.queue, self.result, self.result_b)
580        #print("result", self.result)
581
582        # Free buffers
583        for v in (details_b, values_b):
584            if v is not None:
585                v.release()
586
587        pd_norm = self.result[self.q_input.nq]
588        scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0)
589        background = values[1]
590        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
591        return scale*self.result[:self.q_input.nq] + background
592        # return self.result[:self.q_input.nq]
593
594    def release(self):
595        # type: () -> None
596        """
597        Release resources associated with the kernel.
598        """
599        for v in self._need_release:
600            v.release()
601        self._need_release = []
602
603    def __del__(self):
604        # type: () -> None
605        self.release()
Note: See TracBrowser for help on using the repository browser.