source: sasmodels/sasmodels/kernelcl.py @ 8b31efa

core_shell_microgelsmagnetic_modelticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 8b31efa was 8b31efa, checked in by pkienzle, 5 years ago

document cuda device selection; fix cuda speed issue

  • Property mode set to 100644
File size: 23.3 KB
Line 
1"""
2GPU driver for C kernels
3
4TODO: docs are out of date
5
6There should be a single GPU environment running on the system.  This
7environment is constructed on the first call to :func:`env`, and the
8same environment is returned on each call.
9
10After retrieving the environment, the next step is to create the kernel.
11This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
12returns the type of data used by the kernel.
13
14Next a :class:`GpuData` object should be created with the correct kind
15of data.  This data object can be used by multiple kernels, for example,
16if the target model is a weighted sum of multiple kernels.  The data
17should include any extra evaluation points required to compute the proper
18data smearing.  This need not match the square grid for 2D data if there
19is an index saying which q points are active.
20
21Together the GpuData, the program, and a device form a :class:`GpuKernel`.
22This kernel is used during fitting, receiving new sets of parameters and
23evaluating them.  The output value is stored in an output buffer on the
24devices, where it can be combined with other structure factors and form
25factors and have instrumental resolution effects applied.
26
27In order to use OpenCL for your models, you will need OpenCL drivers for
28your machine.  These should be available from your graphics card vendor.
29Intel provides OpenCL drivers for CPUs as well as their integrated HD
30graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
31this writing the performance is lacking compared to the Intel drivers.
32NVidia combines drivers for CUDA and OpenCL in one package.  The result
33is a bit messy if you have multiple drivers installed.  You can see which
34drivers are available by starting python and running:
35
36    import pyopencl as cl
37    cl.create_some_context(interactive=True)
38
39Once you have done that, it will show the available drivers which you
40can select.  It will then tell you that you can use these drivers
41automatically by setting the SAS_OPENCL environment variable, which is
42PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
43
44Some graphics cards have multiple devices on the same card.  You cannot
45yet use both of them concurrently to evaluate models, but you can run
46the program twice using a different device for each session.
47
48OpenCL kernels are compiled when needed by the device driver.  Some
49drivers produce compiler output even when there is no error.  You
50can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
51harmless, albeit annoying.
52"""
53from __future__ import print_function
54
55import os
56import warnings
57import logging
58import time
59
60import numpy as np  # type: ignore
61
62
63# Attempt to setup opencl. This may fail if the pyopencl package is not
64# installed or if it is installed but there are no devices available.
65try:
66    import pyopencl as cl  # type: ignore
67    from pyopencl import mem_flags as mf
68    from pyopencl.characterize import get_fast_inaccurate_build_options
69    # Ask OpenCL for the default context so that we know that one exists
70    cl.create_some_context(interactive=False)
71    HAVE_OPENCL = True
72    OPENCL_ERROR = ""
73except Exception as exc:
74    HAVE_OPENCL = False
75    OPENCL_ERROR = str(exc)
76
77from . import generate
78from .kernel import KernelModel, Kernel
79
80# pylint: disable=unused-import
81try:
82    from typing import Tuple, Callable, Any
83    from .modelinfo import ModelInfo
84    from .details import CallDetails
85except ImportError:
86    pass
87# pylint: enable=unused-import
88
89# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
90def quote_path(v):
91    """
92    Quote the path if it is not already quoted.
93
94    If v starts with '-', then assume that it is a -I option or similar
95    and do not quote it.  This is fragile:  -Ipath with space needs to
96    be quoted.
97    """
98    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
99
100def fix_pyopencl_include():
101    """
102    Monkey patch pyopencl to allow spaces in include file path.
103    """
104    import pyopencl as cl
105    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
106        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
107
108if HAVE_OPENCL:
109    fix_pyopencl_include()
110
111# The max loops number is limited by the amount of local memory available
112# on the device.  You don't want to make this value too big because it will
113# waste resources, nor too small because it may interfere with users trying
114# to do their polydispersity calculations.  A value of 1024 should be much
115# larger than necessary given that cost grows as npts^k where k is the number
116# of polydisperse parameters.
117MAX_LOOPS = 2048
118
119
120# Pragmas for enable OpenCL features.  Be sure to protect them so that they
121# still compile even if OpenCL is not present.
122_F16_PRAGMA = """\
123#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
124#  pragma OPENCL EXTENSION cl_khr_fp16: enable
125#endif
126"""
127
128_F64_PRAGMA = """\
129#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
130#  pragma OPENCL EXTENSION cl_khr_fp64: enable
131#endif
132"""
133
134def use_opencl():
135    env = os.environ.get("SAS_OPENCL", "").lower()
136    return HAVE_OPENCL and env != "none" and not env.startswith("cuda")
137
138ENV = None
139def reset_environment():
140    """
141    Call to create a new OpenCL context, such as after a change to SAS_OPENCL.
142    """
143    global ENV
144    ENV = GpuEnvironment() if use_opencl() else None
145
146def environment():
147    # type: () -> "GpuEnvironment"
148    """
149    Returns a singleton :class:`GpuEnvironment`.
150
151    This provides an OpenCL context and one queue per device.
152    """
153    if ENV is None:
154        if not HAVE_OPENCL:
155            raise RuntimeError("OpenCL startup failed with ***"
156                               + OPENCL_ERROR + "***; using C compiler instead")
157        reset_environment()
158        if ENV is None:
159            raise RuntimeError("SAS_OPENCL=None in environment")
160    return ENV
161
162def has_type(device, dtype):
163    # type: (cl.Device, np.dtype) -> bool
164    """
165    Return true if device supports the requested precision.
166    """
167    if dtype == generate.F32:
168        return True
169    elif dtype == generate.F64:
170        return "cl_khr_fp64" in device.extensions
171    elif dtype == generate.F16:
172        return "cl_khr_fp16" in device.extensions
173    else:
174        return False
175
176def get_warp(kernel, queue):
177    # type: (cl.Kernel, cl.CommandQueue) -> int
178    """
179    Return the size of an execution batch for *kernel* running on *queue*.
180    """
181    return kernel.get_work_group_info(
182        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
183        queue.device)
184
185def compile_model(context, source, dtype, fast=False):
186    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
187    """
188    Build a model to run on the gpu.
189
190    Returns the compiled program and its type.
191
192    Raises an error if the desired precision is not available.
193    """
194    dtype = np.dtype(dtype)
195    if not all(has_type(d, dtype) for d in context.devices):
196        raise RuntimeError("%s not supported for devices"%dtype)
197
198    source_list = [generate.convert_type(source, dtype)]
199
200    if dtype == generate.F16:
201        source_list.insert(0, _F16_PRAGMA)
202    elif dtype == generate.F64:
203        source_list.insert(0, _F64_PRAGMA)
204
205    # Note: USE_SINCOS makes the intel cpu slower under opencl
206    if context.devices[0].type == cl.device_type.GPU:
207        source_list.insert(0, "#define USE_SINCOS\n")
208    options = (get_fast_inaccurate_build_options(context.devices[0])
209               if fast else [])
210    source = "\n".join(source_list)
211    program = cl.Program(context, source).build(options=options)
212    #print("done with "+program)
213    return program
214
215
216# for now, this returns one device in the context
217# TODO: create a context that contains all devices on all platforms
218class GpuEnvironment(object):
219    """
220    GPU context, with possibly many devices, and one queue per device.
221    """
222    def __init__(self):
223        # type: () -> None
224        # find gpu context
225        #self.context = cl.create_some_context()
226
227        self.context = None
228        if 'SAS_OPENCL' in os.environ:
229            # Set the PyOpenCL environment variable PYOPENCL_CTX
230            # from SAS_OPENCL=driver:device.  Ignore the generic
231            # SAS_OPENCL=opencl, which is used to select the default
232            # OpenCL device.  Don't need to check for "none" or
233            # "cuda" since use_opencl() would return False if they
234            # were defined, and we wouldn't get here.
235            dev_str = os.environ["SAS_OPENCL"]
236            if dev_str and dev_str.lower() != "opencl":
237                os.environ["PYOPENCL_CTX"] = dev_str
238
239        if 'PYOPENCL_CTX' in os.environ:
240            self._create_some_context()
241
242        if not self.context:
243            self.context = _get_default_context()
244
245        # Byte boundary for data alignment
246        #self.data_boundary = max(d.min_data_type_align_size
247        #                         for d in self.context.devices)
248        self.queues = [cl.CommandQueue(context, context.devices[0])
249                       for context in self.context]
250        self.compiled = {}
251
252    def has_type(self, dtype):
253        # type: (np.dtype) -> bool
254        """
255        Return True if all devices support a given type.
256        """
257        return any(has_type(d, dtype)
258                   for context in self.context
259                   for d in context.devices)
260
261    def get_queue(self, dtype):
262        # type: (np.dtype) -> cl.CommandQueue
263        """
264        Return a command queue for the kernels of type dtype.
265        """
266        for context, queue in zip(self.context, self.queues):
267            if all(has_type(d, dtype) for d in context.devices):
268                return queue
269
270    def get_context(self, dtype):
271        # type: (np.dtype) -> cl.Context
272        """
273        Return a OpenCL context for the kernels of type dtype.
274        """
275        for context in self.context:
276            if all(has_type(d, dtype) for d in context.devices):
277                return context
278
279    def _create_some_context(self):
280        # type: () -> cl.Context
281        """
282        Protected call to cl.create_some_context without interactivity.  Use
283        this if SAS_OPENCL is set in the environment.  Sets the *context*
284        attribute.
285        """
286        try:
287            self.context = [cl.create_some_context(interactive=False)]
288        except Exception as exc:
289            warnings.warn(str(exc))
290            warnings.warn("pyopencl.create_some_context() failed")
291            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
292
293    def compile_program(self, name, source, dtype, fast, timestamp):
294        # type: (str, str, np.dtype, bool, float) -> cl.Program
295        """
296        Compile the program for the device in the given context.
297        """
298        # Note: PyOpenCL caches based on md5 hash of source, options and device
299        # so we don't really need to cache things for ourselves.  I'll do so
300        # anyway just to save some data munging time.
301        tag = generate.tag_source(source)
302        key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else ""))
303        # Check timestamp on program
304        program, program_timestamp = self.compiled.get(key, (None, np.inf))
305        if program_timestamp < timestamp:
306            del self.compiled[key]
307        if key not in self.compiled:
308            context = self.get_context(dtype)
309            logging.info("building %s for OpenCL %s", key,
310                         context.devices[0].name.strip())
311            program = compile_model(self.get_context(dtype),
312                                    str(source), dtype, fast)
313            self.compiled[key] = (program, timestamp)
314        return program
315
316def _get_default_context():
317    # type: () -> List[cl.Context]
318    """
319    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
320    drivers over AMD drivers.
321    """
322    # Note: on mobile devices there is automatic clock scaling if either the
323    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
324    # it did, it would mean that putting a busy loop on the CPU while the GPU
325    # is running may increase throughput.
326    #
327    # Macbook pro, base install:
328    #     {'Apple': [Intel CPU, NVIDIA GPU]}
329    # Macbook pro, base install:
330    #     {'Apple': [Intel CPU, Intel GPU]}
331    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
332    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
333    gpu, cpu = None, None
334    for platform in cl.get_platforms():
335        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
336        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
337        # them over the integrated graphics driver that may have been supplied
338        # with the CPU chipset.
339        preferred_cpu = (platform.vendor.startswith('Intel')
340                         or platform.vendor.startswith('Apple'))
341        preferred_gpu = (platform.vendor.startswith('Advanced')
342                         or platform.vendor.startswith('NVIDIA'))
343        for device in platform.get_devices():
344            if device.type == cl.device_type.GPU:
345                # If the existing type is not GPU then it will be CUSTOM
346                # or ACCELERATOR so don't override it.
347                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
348                    gpu = device
349            elif device.type == cl.device_type.CPU:
350                if cpu is None or preferred_cpu:
351                    cpu = device
352            else:
353                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
354                # Intel Phi for example registers as an accelerator
355                # Since the user installed a custom device on their system
356                # and went through the pain of sorting out OpenCL drivers for
357                # it, lets assume they really do want to use it as their
358                # primary compute device.
359                gpu = device
360
361    # order the devices by gpu then by cpu; when searching for an available
362    # device by data type they will be checked in this order, which means
363    # that if the gpu supports double then the cpu will never be used (though
364    # we may make it possible to explicitly request the cpu at some point).
365    devices = []
366    if gpu is not None:
367        devices.append(gpu)
368    if cpu is not None:
369        devices.append(cpu)
370    return [cl.Context([d]) for d in devices]
371
372
373class GpuModel(KernelModel):
374    """
375    GPU wrapper for a single model.
376
377    *source* and *model_info* are the model source and interface as returned
378    from :func:`generate.make_source` and :func:`generate.make_model_info`.
379
380    *dtype* is the desired model precision.  Any numpy dtype for single
381    or double precision floats will do, such as 'f', 'float32' or 'single'
382    for single and 'd', 'float64' or 'double' for double.  Double precision
383    is an optional extension which may not be available on all devices.
384    Half precision ('float16','half') may be available on some devices.
385    Fast precision ('fast') is a loose version of single precision, indicating
386    that the compiler is allowed to take shortcuts.
387    """
388    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
389        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
390        self.info = model_info
391        self.source = source
392        self.dtype = dtype
393        self.fast = fast
394        self.program = None # delay program creation
395        self._kernels = None
396
397    def __getstate__(self):
398        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
399        return self.info, self.source, self.dtype, self.fast
400
401    def __setstate__(self, state):
402        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
403        self.info, self.source, self.dtype, self.fast = state
404        self.program = None
405
406    def make_kernel(self, q_vectors):
407        # type: (List[np.ndarray]) -> "GpuKernel"
408        if self.program is None:
409            compile_program = environment().compile_program
410            timestamp = generate.ocl_timestamp(self.info)
411            self.program = compile_program(
412                self.info.name,
413                self.source['opencl'],
414                self.dtype,
415                self.fast,
416                timestamp)
417            variants = ['Iq', 'Iqxy', 'Imagnetic']
418            names = [generate.kernel_name(self.info, k) for k in variants]
419            kernels = [getattr(self.program, k) for k in names]
420            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
421        is_2d = len(q_vectors) == 2
422        if is_2d:
423            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
424        else:
425            kernel = [self._kernels['Iq']]*2
426        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
427
428    def release(self):
429        # type: () -> None
430        """
431        Free the resources associated with the model.
432        """
433        if self.program is not None:
434            self.program = None
435
436    def __del__(self):
437        # type: () -> None
438        self.release()
439
440# TODO: check that we don't need a destructor for buffers which go out of scope
441class GpuInput(object):
442    """
443    Make q data available to the gpu.
444
445    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
446    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
447    to get the best performance on OpenCL, which may involve shifting and
448    stretching the array to better match the memory architecture.  Additional
449    points will be evaluated with *q=1e-3*.
450
451    *dtype* is the data type for the q vectors. The data type should be
452    set to match that of the kernel, which is an attribute of
453    :class:`GpuProgram`.  Note that not all kernels support double
454    precision, so even if the program was created for double precision,
455    the *GpuProgram.dtype* may be single precision.
456
457    Call :meth:`release` when complete.  Even if not called directly, the
458    buffer will be released when the data object is freed.
459    """
460    def __init__(self, q_vectors, dtype=generate.F32):
461        # type: (List[np.ndarray], np.dtype) -> None
462        # TODO: do we ever need double precision q?
463        env = environment()
464        self.nq = q_vectors[0].size
465        self.dtype = np.dtype(dtype)
466        self.is_2d = (len(q_vectors) == 2)
467        # TODO: stretch input based on get_warp()
468        # not doing it now since warp depends on kernel, which is not known
469        # at this point, so instead using 32, which is good on the set of
470        # architectures tested so far.
471        if self.is_2d:
472            # Note: 16 rather than 15 because result is 1 longer than input.
473            width = ((self.nq+16)//16)*16
474            self.q = np.empty((width, 2), dtype=dtype)
475            self.q[:self.nq, 0] = q_vectors[0]
476            self.q[:self.nq, 1] = q_vectors[1]
477        else:
478            # Note: 32 rather than 31 because result is 1 longer than input.
479            width = ((self.nq+32)//32)*32
480            self.q = np.empty(width, dtype=dtype)
481            self.q[:self.nq] = q_vectors[0]
482        self.global_size = [self.q.shape[0]]
483        context = env.get_context(self.dtype)
484        #print("creating inputs of size", self.global_size)
485        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
486                             hostbuf=self.q)
487
488    def release(self):
489        # type: () -> None
490        """
491        Free the memory.
492        """
493        if self.q_b is not None:
494            self.q_b.release()
495            self.q_b = None
496
497    def __del__(self):
498        # type: () -> None
499        self.release()
500
501class GpuKernel(Kernel):
502    """
503    Callable SAS kernel.
504
505    *kernel* is the GpuKernel object to call
506
507    *model_info* is the module information
508
509    *q_vectors* is the q vectors at which the kernel should be evaluated
510
511    *dtype* is the kernel precision
512
513    The resulting call method takes the *pars*, a list of values for
514    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
515    vectors for the polydisperse parameters.  *cutoff* determines the
516    integration limits: any points with combined weight less than *cutoff*
517    will not be calculated.
518
519    Call :meth:`release` when done with the kernel instance.
520    """
521    def __init__(self, kernel, dtype, model_info, q_vectors):
522        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
523        q_input = GpuInput(q_vectors, dtype)
524        self.kernel = kernel
525        self.info = model_info
526        self.dtype = dtype
527        self.dim = '2d' if q_input.is_2d else '1d'
528        # plus three for the normalization values
529        self.result = np.empty(q_input.nq+1, dtype)
530
531        # Inputs and outputs for each kernel call
532        # Note: res may be shorter than res_b if global_size != nq
533        env = environment()
534        self.queue = env.get_queue(dtype)
535
536        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
537                                  q_input.global_size[0] * dtype.itemsize)
538        self.q_input = q_input # allocated by GpuInput above
539
540        self._need_release = [self.result_b, self.q_input]
541        self.real = (np.float32 if dtype == generate.F32
542                     else np.float64 if dtype == generate.F64
543                     else np.float16 if dtype == generate.F16
544                     else np.float32)  # will never get here, so use np.float32
545
546    def __call__(self, call_details, values, cutoff, magnetic):
547        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
548        context = self.queue.context
549        # Arrange data transfer to card
550        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
551                              hostbuf=call_details.buffer)
552        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
553                             hostbuf=values)
554
555        kernel = self.kernel[1 if magnetic else 0]
556        args = [
557            np.uint32(self.q_input.nq), None, None,
558            details_b, values_b, self.q_input.q_b, self.result_b,
559            self.real(cutoff),
560        ]
561        #print("Calling OpenCL")
562        #call_details.show(values)
563        # Call kernel and retrieve results
564        wait_for = None
565        last_nap = time.clock()
566        step = 1000000//self.q_input.nq + 1
567        for start in range(0, call_details.num_eval, step):
568            stop = min(start + step, call_details.num_eval)
569            #print("queuing",start,stop)
570            args[1:3] = [np.int32(start), np.int32(stop)]
571            wait_for = [kernel(self.queue, self.q_input.global_size, None,
572                               *args, wait_for=wait_for)]
573            if stop < call_details.num_eval:
574                # Allow other processes to run
575                wait_for[0].wait()
576                current_time = time.clock()
577                if current_time - last_nap > 0.5:
578                    time.sleep(0.001)
579                    last_nap = current_time
580        cl.enqueue_copy(self.queue, self.result, self.result_b)
581        #print("result", self.result)
582
583        # Free buffers
584        for v in (details_b, values_b):
585            if v is not None:
586                v.release()
587
588        pd_norm = self.result[self.q_input.nq]
589        scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0)
590        background = values[1]
591        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
592        return scale*self.result[:self.q_input.nq] + background
593        # return self.result[:self.q_input.nq]
594
595    def release(self):
596        # type: () -> None
597        """
598        Release resources associated with the kernel.
599        """
600        for v in self._need_release:
601            v.release()
602        self._need_release = []
603
604    def __del__(self):
605        # type: () -> None
606        self.release()
Note: See TracBrowser for help on using the repository browser.