source: sasmodels/sasmodels/kernelcl.py @ f31815d

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since f31815d was f31815d, checked in by Paul Kienzle <pkienzle@…>, 6 years ago

missing sys import

  • Property mode set to 100644
File size: 24.0 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
50"""
51from __future__ import print_function
52
53import os
54import warnings
55import logging
56import time
57
58try:
59    from time import perf_counter as clock
60except ImportError: # CRUFT: python < 3.3
61    import sys
62    if sys.platform.count("darwin") > 0:
63        from time import time as clock
64    else:
65        from time import clock
66
67import numpy as np  # type: ignore
68
69# Attempt to setup opencl. This may fail if the opencl package is not
70# installed or if it is installed but there are no devices available.
71try:
72    import pyopencl as cl  # type: ignore
73    from pyopencl import mem_flags as mf
74    from pyopencl.characterize import get_fast_inaccurate_build_options
75    # Ask OpenCL for the default context so that we know that one exists
76    cl.create_some_context(interactive=False)
77    HAVE_OPENCL = True
78    OPENCL_ERROR = ""
79except Exception as exc:
80    HAVE_OPENCL = False
81    OPENCL_ERROR = str(exc)
82
83from . import generate
84from .kernel import KernelModel, Kernel
85
86# pylint: disable=unused-import
87try:
88    from typing import Tuple, Callable, Any
89    from .modelinfo import ModelInfo
90    from .details import CallDetails
91except ImportError:
92    pass
93# pylint: enable=unused-import
94
95# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
96def quote_path(v):
97    """
98    Quote the path if it is not already quoted.
99
100    If v starts with '-', then assume that it is a -I option or similar
101    and do not quote it.  This is fragile:  -Ipath with space needs to
102    be quoted.
103    """
104    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
105
106def fix_pyopencl_include():
107    """
108    Monkey patch pyopencl to allow spaces in include file path.
109    """
110    import pyopencl as cl
111    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
112        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
113
114if HAVE_OPENCL:
115    fix_pyopencl_include()
116
117# The max loops number is limited by the amount of local memory available
118# on the device.  You don't want to make this value too big because it will
119# waste resources, nor too small because it may interfere with users trying
120# to do their polydispersity calculations.  A value of 1024 should be much
121# larger than necessary given that cost grows as npts^k where k is the number
122# of polydisperse parameters.
123MAX_LOOPS = 2048
124
125
126# Pragmas for enable OpenCL features.  Be sure to protect them so that they
127# still compile even if OpenCL is not present.
128_F16_PRAGMA = """\
129#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
130#  pragma OPENCL EXTENSION cl_khr_fp16: enable
131#endif
132"""
133
134_F64_PRAGMA = """\
135#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
136#  pragma OPENCL EXTENSION cl_khr_fp64: enable
137#endif
138"""
139
140def use_opencl():
141    return HAVE_OPENCL and os.environ.get("SAS_OPENCL", "").lower() != "none"
142
143ENV = None
144def reset_environment():
145    """
146    Call to create a new OpenCL context, such as after a change to SAS_OPENCL.
147    """
148    global ENV
149    ENV = GpuEnvironment() if use_opencl() else None
150
151def environment():
152    # type: () -> "GpuEnvironment"
153    """
154    Returns a singleton :class:`GpuEnvironment`.
155
156    This provides an OpenCL context and one queue per device.
157    """
158    if ENV is None:
159        if not HAVE_OPENCL:
160            raise RuntimeError("OpenCL startup failed with ***"
161                               + OPENCL_ERROR + "***; using C compiler instead")
162        reset_environment()
163        if ENV is None:
164            raise RuntimeError("SAS_OPENCL=None in environment")
165    return ENV
166
167def has_type(device, dtype):
168    # type: (cl.Device, np.dtype) -> bool
169    """
170    Return true if device supports the requested precision.
171    """
172    if dtype == generate.F32:
173        return True
174    elif dtype == generate.F64:
175        return "cl_khr_fp64" in device.extensions
176    elif dtype == generate.F16:
177        return "cl_khr_fp16" in device.extensions
178    else:
179        return False
180
181def get_warp(kernel, queue):
182    # type: (cl.Kernel, cl.CommandQueue) -> int
183    """
184    Return the size of an execution batch for *kernel* running on *queue*.
185    """
186    return kernel.get_work_group_info(
187        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
188        queue.device)
189
190def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
191    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
192    """
193    Stretch an input vector to the correct boundary.
194
195    Performance on the kernels can drop by a factor of two or more if the
196    number of values to compute does not fall on a nice power of two
197    boundary.   The trailing additional vector elements are given a
198    value of *extra*, and so f(*extra*) will be computed for each of
199    them.  The returned array will thus be a subset of the computed array.
200
201    *boundary* should be a power of 2 which is at least 32 for good
202    performance on current platforms (as of Jan 2015).  It should
203    probably be the max of get_warp(kernel,queue) and
204    device.min_data_type_align_size//4.
205    """
206    remainder = vector.size % boundary
207    if remainder != 0:
208        size = vector.size + (boundary - remainder)
209        vector = np.hstack((vector, [extra] * (size - vector.size)))
210    return np.ascontiguousarray(vector, dtype=dtype)
211
212
213def compile_model(context, source, dtype, fast=False):
214    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
215    """
216    Build a model to run on the gpu.
217
218    Returns the compiled program and its type.
219
220    Raises an error if the desired precision is not available.
221    """
222    dtype = np.dtype(dtype)
223    if not all(has_type(d, dtype) for d in context.devices):
224        raise RuntimeError("%s not supported for devices"%dtype)
225
226    source_list = [generate.convert_type(source, dtype)]
227
228    if dtype == generate.F16:
229        source_list.insert(0, _F16_PRAGMA)
230    elif dtype == generate.F64:
231        source_list.insert(0, _F64_PRAGMA)
232
233    # Note: USE_SINCOS makes the intel cpu slower under opencl
234    if context.devices[0].type == cl.device_type.GPU:
235        source_list.insert(0, "#define USE_SINCOS\n")
236    options = (get_fast_inaccurate_build_options(context.devices[0])
237               if fast else [])
238    source = "\n".join(source_list)
239    program = cl.Program(context, source).build(options=options)
240    #print("done with "+program)
241    return program
242
243
244# for now, this returns one device in the context
245# TODO: create a context that contains all devices on all platforms
246class GpuEnvironment(object):
247    """
248    GPU context, with possibly many devices, and one queue per device.
249    """
250    def __init__(self):
251        # type: () -> None
252        # find gpu context
253        #self.context = cl.create_some_context()
254
255        self.context = None
256        if 'SAS_OPENCL' in os.environ:
257            #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
258            os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
259        if 'PYOPENCL_CTX' in os.environ:
260            self._create_some_context()
261
262        if not self.context:
263            self.context = _get_default_context()
264
265        # Byte boundary for data alignment
266        #self.data_boundary = max(d.min_data_type_align_size
267        #                         for d in self.context.devices)
268        self.queues = [cl.CommandQueue(context, context.devices[0])
269                       for context in self.context]
270        self.compiled = {}
271
272    def has_type(self, dtype):
273        # type: (np.dtype) -> bool
274        """
275        Return True if all devices support a given type.
276        """
277        return any(has_type(d, dtype)
278                   for context in self.context
279                   for d in context.devices)
280
281    def get_queue(self, dtype):
282        # type: (np.dtype) -> cl.CommandQueue
283        """
284        Return a command queue for the kernels of type dtype.
285        """
286        for context, queue in zip(self.context, self.queues):
287            if all(has_type(d, dtype) for d in context.devices):
288                return queue
289
290    def get_context(self, dtype):
291        # type: (np.dtype) -> cl.Context
292        """
293        Return a OpenCL context for the kernels of type dtype.
294        """
295        for context in self.context:
296            if all(has_type(d, dtype) for d in context.devices):
297                return context
298
299    def _create_some_context(self):
300        # type: () -> cl.Context
301        """
302        Protected call to cl.create_some_context without interactivity.  Use
303        this if SAS_OPENCL is set in the environment.  Sets the *context*
304        attribute.
305        """
306        try:
307            self.context = [cl.create_some_context(interactive=False)]
308        except Exception as exc:
309            warnings.warn(str(exc))
310            warnings.warn("pyopencl.create_some_context() failed")
311            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
312
313    def compile_program(self, name, source, dtype, fast, timestamp):
314        # type: (str, str, np.dtype, bool, float) -> cl.Program
315        """
316        Compile the program for the device in the given context.
317        """
318        # Note: PyOpenCL caches based on md5 hash of source, options and device
319        # so we don't really need to cache things for ourselves.  I'll do so
320        # anyway just to save some data munging time.
321        tag = generate.tag_source(source)
322        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
323        # Check timestamp on program
324        program, program_timestamp = self.compiled.get(key, (None, np.inf))
325        if program_timestamp < timestamp:
326            del self.compiled[key]
327        if key not in self.compiled:
328            context = self.get_context(dtype)
329            logging.info("building %s for OpenCL %s", key,
330                         context.devices[0].name.strip())
331            program = compile_model(self.get_context(dtype),
332                                    str(source), dtype, fast)
333            self.compiled[key] = (program, timestamp)
334        return program
335
336def _get_default_context():
337    # type: () -> List[cl.Context]
338    """
339    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
340    drivers over AMD drivers.
341    """
342    # Note: on mobile devices there is automatic clock scaling if either the
343    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
344    # it did, it would mean that putting a busy loop on the CPU while the GPU
345    # is running may increase throughput.
346    #
347    # Macbook pro, base install:
348    #     {'Apple': [Intel CPU, NVIDIA GPU]}
349    # Macbook pro, base install:
350    #     {'Apple': [Intel CPU, Intel GPU]}
351    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
352    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
353    gpu, cpu = None, None
354    for platform in cl.get_platforms():
355        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
356        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
357        # them over the integrated graphics driver that may have been supplied
358        # with the CPU chipset.
359        preferred_cpu = (platform.vendor.startswith('Intel')
360                         or platform.vendor.startswith('Apple'))
361        preferred_gpu = (platform.vendor.startswith('Advanced')
362                         or platform.vendor.startswith('NVIDIA'))
363        for device in platform.get_devices():
364            if device.type == cl.device_type.GPU:
365                # If the existing type is not GPU then it will be CUSTOM
366                # or ACCELERATOR so don't override it.
367                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
368                    gpu = device
369            elif device.type == cl.device_type.CPU:
370                if cpu is None or preferred_cpu:
371                    cpu = device
372            else:
373                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
374                # Intel Phi for example registers as an accelerator
375                # Since the user installed a custom device on their system
376                # and went through the pain of sorting out OpenCL drivers for
377                # it, lets assume they really do want to use it as their
378                # primary compute device.
379                gpu = device
380
381    # order the devices by gpu then by cpu; when searching for an available
382    # device by data type they will be checked in this order, which means
383    # that if the gpu supports double then the cpu will never be used (though
384    # we may make it possible to explicitly request the cpu at some point).
385    devices = []
386    if gpu is not None:
387        devices.append(gpu)
388    if cpu is not None:
389        devices.append(cpu)
390    return [cl.Context([d]) for d in devices]
391
392
393class GpuModel(KernelModel):
394    """
395    GPU wrapper for a single model.
396
397    *source* and *model_info* are the model source and interface as returned
398    from :func:`generate.make_source` and :func:`generate.make_model_info`.
399
400    *dtype* is the desired model precision.  Any numpy dtype for single
401    or double precision floats will do, such as 'f', 'float32' or 'single'
402    for single and 'd', 'float64' or 'double' for double.  Double precision
403    is an optional extension which may not be available on all devices.
404    Half precision ('float16','half') may be available on some devices.
405    Fast precision ('fast') is a loose version of single precision, indicating
406    that the compiler is allowed to take shortcuts.
407    """
408    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
409        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
410        self.info = model_info
411        self.source = source
412        self.dtype = dtype
413        self.fast = fast
414        self.program = None # delay program creation
415        self._kernels = None
416
417    def __getstate__(self):
418        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
419        return self.info, self.source, self.dtype, self.fast
420
421    def __setstate__(self, state):
422        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
423        self.info, self.source, self.dtype, self.fast = state
424        self.program = None
425
426    def make_kernel(self, q_vectors):
427        # type: (List[np.ndarray]) -> "GpuKernel"
428        if self.program is None:
429            compile_program = environment().compile_program
430            timestamp = generate.ocl_timestamp(self.info)
431            self.program = compile_program(
432                self.info.name,
433                self.source['opencl'],
434                self.dtype,
435                self.fast,
436                timestamp)
437            variants = ['Iq', 'Iqxy', 'Imagnetic']
438            names = [generate.kernel_name(self.info, k) for k in variants]
439            kernels = [getattr(self.program, k) for k in names]
440            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
441        is_2d = len(q_vectors) == 2
442        if is_2d:
443            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
444        else:
445            kernel = [self._kernels['Iq']]*2
446        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
447
448    def release(self):
449        # type: () -> None
450        """
451        Free the resources associated with the model.
452        """
453        if self.program is not None:
454            self.program = None
455
456    def __del__(self):
457        # type: () -> None
458        self.release()
459
460# TODO: check that we don't need a destructor for buffers which go out of scope
461class GpuInput(object):
462    """
463    Make q data available to the gpu.
464
465    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
466    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
467    to get the best performance on OpenCL, which may involve shifting and
468    stretching the array to better match the memory architecture.  Additional
469    points will be evaluated with *q=1e-3*.
470
471    *dtype* is the data type for the q vectors. The data type should be
472    set to match that of the kernel, which is an attribute of
473    :class:`GpuProgram`.  Note that not all kernels support double
474    precision, so even if the program was created for double precision,
475    the *GpuProgram.dtype* may be single precision.
476
477    Call :meth:`release` when complete.  Even if not called directly, the
478    buffer will be released when the data object is freed.
479    """
480    def __init__(self, q_vectors, dtype=generate.F32):
481        # type: (List[np.ndarray], np.dtype) -> None
482        # TODO: do we ever need double precision q?
483        env = environment()
484        self.nq = q_vectors[0].size
485        self.dtype = np.dtype(dtype)
486        self.is_2d = (len(q_vectors) == 2)
487        # TODO: stretch input based on get_warp()
488        # not doing it now since warp depends on kernel, which is not known
489        # at this point, so instead using 32, which is good on the set of
490        # architectures tested so far.
491        if self.is_2d:
492            # Note: 16 rather than 15 because result is 1 longer than input.
493            width = ((self.nq+16)//16)*16
494            self.q = np.empty((width, 2), dtype=dtype)
495            self.q[:self.nq, 0] = q_vectors[0]
496            self.q[:self.nq, 1] = q_vectors[1]
497        else:
498            # Note: 32 rather than 31 because result is 1 longer than input.
499            width = ((self.nq+32)//32)*32
500            self.q = np.empty(width, dtype=dtype)
501            self.q[:self.nq] = q_vectors[0]
502        self.global_size = [self.q.shape[0]]
503        context = env.get_context(self.dtype)
504        #print("creating inputs of size", self.global_size)
505        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
506                             hostbuf=self.q)
507
508    def release(self):
509        # type: () -> None
510        """
511        Free the memory.
512        """
513        if self.q_b is not None:
514            self.q_b.release()
515            self.q_b = None
516
517    def __del__(self):
518        # type: () -> None
519        self.release()
520
521class GpuKernel(Kernel):
522    """
523    Callable SAS kernel.
524
525    *kernel* is the GpuKernel object to call
526
527    *model_info* is the module information
528
529    *q_vectors* is the q vectors at which the kernel should be evaluated
530
531    *dtype* is the kernel precision
532
533    The resulting call method takes the *pars*, a list of values for
534    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
535    vectors for the polydisperse parameters.  *cutoff* determines the
536    integration limits: any points with combined weight less than *cutoff*
537    will not be calculated.
538
539    Call :meth:`release` when done with the kernel instance.
540    """
541    def __init__(self, kernel, dtype, model_info, q_vectors):
542        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
543        q_input = GpuInput(q_vectors, dtype)
544        self.kernel = kernel
545        self.info = model_info
546        self.dtype = dtype
547        self.dim = '2d' if q_input.is_2d else '1d'
548        # plus three for the normalization values
549        self.result = np.empty(q_input.nq+1, dtype)
550
551        # Inputs and outputs for each kernel call
552        # Note: res may be shorter than res_b if global_size != nq
553        env = environment()
554        self.queue = env.get_queue(dtype)
555
556        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
557                                  q_input.global_size[0] * dtype.itemsize)
558        self.q_input = q_input # allocated by GpuInput above
559
560        self._need_release = [self.result_b, self.q_input]
561        self.real = (np.float32 if dtype == generate.F32
562                     else np.float64 if dtype == generate.F64
563                     else np.float16 if dtype == generate.F16
564                     else np.float32)  # will never get here, so use np.float32
565
566    def __call__(self, call_details, values, cutoff, magnetic):
567        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
568        context = self.queue.context
569        # Arrange data transfer to card
570        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
571                              hostbuf=call_details.buffer)
572        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
573                             hostbuf=values)
574
575        kernel = self.kernel[1 if magnetic else 0]
576        args = [
577            np.uint32(self.q_input.nq), None, None,
578            details_b, values_b, self.q_input.q_b, self.result_b,
579            self.real(cutoff),
580        ]
581        #print("Calling OpenCL")
582        #call_details.show(values)
583        # Call kernel and retrieve results
584        wait_for = None
585        last_nap = clock()
586        step = 1000000//self.q_input.nq + 1
587        for start in range(0, call_details.num_eval, step):
588            stop = min(start + step, call_details.num_eval)
589            #print("queuing",start,stop)
590            args[1:3] = [np.int32(start), np.int32(stop)]
591            wait_for = [kernel(self.queue, self.q_input.global_size, None,
592                               *args, wait_for=wait_for)]
593            if stop < call_details.num_eval:
594                # Allow other processes to run
595                wait_for[0].wait()
596                current_time = clock()
597                if current_time - last_nap > 0.5:
598                    time.sleep(0.05)
599                    last_nap = current_time
600        cl.enqueue_copy(self.queue, self.result, self.result_b)
601        #print("result", self.result)
602
603        # Free buffers
604        for v in (details_b, values_b):
605            if v is not None:
606                v.release()
607
608        pd_norm = self.result[self.q_input.nq]
609        scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0)
610        background = values[1]
611        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
612        return scale*self.result[:self.q_input.nq] + background
613        # return self.result[:self.q_input.nq]
614
615    def release(self):
616        # type: () -> None
617        """
618        Release resources associated with the kernel.
619        """
620        for v in self._need_release:
621            v.release()
622        self._need_release = []
623
624    def __del__(self):
625        # type: () -> None
626        self.release()
Note: See TracBrowser for help on using the repository browser.