source: sasmodels/sasmodels/kernelcl.py @ 5fd684d

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 5fd684d was a557a99, checked in by wojciech, 8 years ago

A few fixes to make code cleaner and add PYOPENCL_CTX control

  • Property mode set to 100644
File size: 22.9 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
50"""
51from __future__ import print_function
52
53import os
54import warnings
55import logging
56
57import numpy as np  # type: ignore
58
59try:
60    #raise NotImplementedError("OpenCL not yet implemented for new kernel template")
61    import pyopencl as cl  # type: ignore
62    # Ask OpenCL for the default context so that we know that one exists
63    cl.create_some_context(interactive=False)
64except Exception as exc:
65    warnings.warn("OpenCL startup failed with ***"
66                  + str(exc) + "***; using C compiler instead")
67    raise RuntimeError("OpenCL not available")
68
69from pyopencl import mem_flags as mf
70from pyopencl.characterize import get_fast_inaccurate_build_options
71
72from . import generate
73from .kernel import KernelModel, Kernel
74
75try:
76    from typing import Tuple, Callable, Any
77    from .modelinfo import ModelInfo
78    from .details import CallDetails
79except ImportError:
80    pass
81
82# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
83def quote_path(v):
84    """
85    Quote the path if it is not already quoted.
86
87    If v starts with '-', then assume that it is a -I option or similar
88    and do not quote it.  This is fragile:  -Ipath with space needs to
89    be quoted.
90    """
91    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
92
93def fix_pyopencl_include():
94    """
95    Monkey patch pyopencl to allow spaces in include file path.
96    """
97    import pyopencl as cl
98    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
99        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
100
101fix_pyopencl_include()
102
103
104# The max loops number is limited by the amount of local memory available
105# on the device.  You don't want to make this value too big because it will
106# waste resources, nor too small because it may interfere with users trying
107# to do their polydispersity calculations.  A value of 1024 should be much
108# larger than necessary given that cost grows as npts^k where k is the number
109# of polydisperse parameters.
110MAX_LOOPS = 2048
111
112
113# Pragmas for enable OpenCL features.  Be sure to protect them so that they
114# still compile even if OpenCL is not present.
115_F16_PRAGMA = """\
116#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
117#  pragma OPENCL EXTENSION cl_khr_fp16: enable
118#endif
119"""
120
121_F64_PRAGMA = """\
122#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
123#  pragma OPENCL EXTENSION cl_khr_fp64: enable
124#endif
125"""
126
127
128ENV = None
129def environment():
130    # type: () -> "GpuEnvironment"
131    """
132    Returns a singleton :class:`GpuEnvironment`.
133
134    This provides an OpenCL context and one queue per device.
135    """
136    global ENV
137    if ENV is None:
138        ENV = GpuEnvironment()
139    return ENV
140
141def has_type(device, dtype):
142    # type: (cl.Device, np.dtype) -> bool
143    """
144    Return true if device supports the requested precision.
145    """
146    if dtype == generate.F32:
147        return True
148    elif dtype == generate.F64:
149        return "cl_khr_fp64" in device.extensions
150    elif dtype == generate.F16:
151        return "cl_khr_fp16" in device.extensions
152    else:
153        return False
154
155def get_warp(kernel, queue):
156    # type: (cl.Kernel, cl.CommandQueue) -> int
157    """
158    Return the size of an execution batch for *kernel* running on *queue*.
159    """
160    return kernel.get_work_group_info(
161        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
162        queue.device)
163
164def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
165    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
166    """
167    Stretch an input vector to the correct boundary.
168
169    Performance on the kernels can drop by a factor of two or more if the
170    number of values to compute does not fall on a nice power of two
171    boundary.   The trailing additional vector elements are given a
172    value of *extra*, and so f(*extra*) will be computed for each of
173    them.  The returned array will thus be a subset of the computed array.
174
175    *boundary* should be a power of 2 which is at least 32 for good
176    performance on current platforms (as of Jan 2015).  It should
177    probably be the max of get_warp(kernel,queue) and
178    device.min_data_type_align_size//4.
179    """
180    remainder = vector.size % boundary
181    if remainder != 0:
182        size = vector.size + (boundary - remainder)
183        vector = np.hstack((vector, [extra] * (size - vector.size)))
184    return np.ascontiguousarray(vector, dtype=dtype)
185
186
187def compile_model(context, source, dtype, fast=False):
188    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
189    """
190    Build a model to run on the gpu.
191
192    Returns the compiled program and its type.  The returned type will
193    be float32 even if the desired type is float64 if any of the
194    devices in the context do not support the cl_khr_fp64 extension.
195    """
196    dtype = np.dtype(dtype)
197    if not all(has_type(d, dtype) for d in context.devices):
198        raise RuntimeError("%s not supported for devices"%dtype)
199
200    source_list = [generate.convert_type(source, dtype)]
201
202    if dtype == generate.F16:
203        source_list.insert(0, _F16_PRAGMA)
204    elif dtype == generate.F64:
205        source_list.insert(0, _F64_PRAGMA)
206
207    # Note: USE_SINCOS makes the intel cpu slower under opencl
208    if context.devices[0].type == cl.device_type.GPU:
209        source_list.insert(0, "#define USE_SINCOS\n")
210    options = (get_fast_inaccurate_build_options(context.devices[0])
211               if fast else [])
212    source = "\n".join(source_list)
213    program = cl.Program(context, source).build(options=options)
214    #print("done with "+program)
215    return program
216
217
218# for now, this returns one device in the context
219# TODO: create a context that contains all devices on all platforms
220class GpuEnvironment(object):
221    """
222    GPU context, with possibly many devices, and one queue per device.
223    """
224    def __init__(self):
225        # type: () -> None
226        # find gpu context
227        #self.context = cl.create_some_context()
228
229        self.context = None
230        if 'SAS_OPENCL' in os.environ:
231            #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
232            os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
233        if 'PYOPENCL_CTX' in os.environ:
234            self._create_some_context()
235
236        if not self.context:
237            self.context = _get_default_context()
238
239        # Byte boundary for data alignment
240        #self.data_boundary = max(d.min_data_type_align_size
241        #                         for d in self.context.devices)
242        self.queues = [cl.CommandQueue(context, context.devices[0])
243                       for context in self.context]
244        self.compiled = {}
245
246    def has_type(self, dtype):
247        # type: (np.dtype) -> bool
248        """
249        Return True if all devices support a given type.
250        """
251        return any(has_type(d, dtype)
252                   for context in self.context
253                   for d in context.devices)
254
255    def get_queue(self, dtype):
256        # type: (np.dtype) -> cl.CommandQueue
257        """
258        Return a command queue for the kernels of type dtype.
259        """
260        for context, queue in zip(self.context, self.queues):
261            if all(has_type(d, dtype) for d in context.devices):
262                return queue
263
264    def get_context(self, dtype):
265        # type: (np.dtype) -> cl.Context
266        """
267        Return a OpenCL context for the kernels of type dtype.
268        """
269        for context in self.context:
270            if all(has_type(d, dtype) for d in context.devices):
271                return context
272
273    def _create_some_context(self):
274        # type: () -> cl.Context
275        """
276        Protected call to cl.create_some_context without interactivity.  Use
277        this if SAS_OPENCL is set in the environment.  Sets the *context*
278        attribute.
279        """
280        try:
281            self.context = [cl.create_some_context(interactive=False)]
282        except Exception as exc:
283            warnings.warn(str(exc))
284            warnings.warn("pyopencl.create_some_context() failed")
285            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
286
287    def compile_program(self, name, source, dtype, fast, timestamp):
288        # type: (str, str, np.dtype, bool, float) -> cl.Program
289        """
290        Compile the program for the device in the given context.
291        """
292        # Note: PyOpenCL caches based on md5 hash of source, options and device
293        # so we don't really need to cache things for ourselves.  I'll do so
294        # anyway just to save some data munging time.
295        key = "%s-%s%s"%(name, dtype, ("-fast" if fast else ""))
296        # Check timestamp on program
297        program, program_timestamp = self.compiled.get(key, (None, np.inf))
298        if program_timestamp < timestamp:
299            del self.compiled[key]
300        if key not in self.compiled:
301            context = self.get_context(dtype)
302            logging.info("building %s for OpenCL %s", key,
303                         context.devices[0].name.strip())
304            program = compile_model(self.get_context(dtype),
305                                    str(source), dtype, fast)
306            self.compiled[key] = (program, timestamp)
307        return program
308
309def _get_default_context():
310    # type: () -> List[cl.Context]
311    """
312    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
313    drivers over AMD drivers.
314    """
315    # Note: on mobile devices there is automatic clock scaling if either the
316    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
317    # it did, it would mean that putting a busy loop on the CPU while the GPU
318    # is running may increase throughput.
319    #
320    # Macbook pro, base install:
321    #     {'Apple': [Intel CPU, NVIDIA GPU]}
322    # Macbook pro, base install:
323    #     {'Apple': [Intel CPU, Intel GPU]}
324    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
325    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
326    gpu, cpu = None, None
327    for platform in cl.get_platforms():
328        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
329        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
330        # them over the integrated graphics driver that may have been supplied
331        # with the CPU chipset.
332        preferred_cpu = (platform.vendor.startswith('Intel')
333                         or platform.vendor.startswith('Apple'))
334        preferred_gpu = (platform.vendor.startswith('Advanced')
335                         or platform.vendor.startswith('NVIDIA'))
336        for device in platform.get_devices():
337            if device.type == cl.device_type.GPU:
338                # If the existing type is not GPU then it will be CUSTOM
339                # or ACCELERATOR so don't override it.
340                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
341                    gpu = device
342            elif device.type == cl.device_type.CPU:
343                if cpu is None or preferred_cpu:
344                    cpu = device
345            else:
346                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
347                # Intel Phi for example registers as an accelerator
348                # Since the user installed a custom device on their system
349                # and went through the pain of sorting out OpenCL drivers for
350                # it, lets assume they really do want to use it as their
351                # primary compute device.
352                gpu = device
353
354    # order the devices by gpu then by cpu; when searching for an available
355    # device by data type they will be checked in this order, which means
356    # that if the gpu supports double then the cpu will never be used (though
357    # we may make it possible to explicitly request the cpu at some point).
358    devices = []
359    if gpu is not None:
360        devices.append(gpu)
361    if cpu is not None:
362        devices.append(cpu)
363    return [cl.Context([d]) for d in devices]
364
365
366class GpuModel(KernelModel):
367    """
368    GPU wrapper for a single model.
369
370    *source* and *model_info* are the model source and interface as returned
371    from :func:`generate.make_source` and :func:`generate.make_model_info`.
372
373    *dtype* is the desired model precision.  Any numpy dtype for single
374    or double precision floats will do, such as 'f', 'float32' or 'single'
375    for single and 'd', 'float64' or 'double' for double.  Double precision
376    is an optional extension which may not be available on all devices.
377    Half precision ('float16','half') may be available on some devices.
378    Fast precision ('fast') is a loose version of single precision, indicating
379    that the compiler is allowed to take shortcuts.
380    """
381    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
382        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
383        self.info = model_info
384        self.source = source
385        self.dtype = dtype
386        self.fast = fast
387        self.program = None # delay program creation
388        self._kernels = None
389
390    def __getstate__(self):
391        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
392        return self.info, self.source, self.dtype, self.fast
393
394    def __setstate__(self, state):
395        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
396        self.info, self.source, self.dtype, self.fast = state
397        self.program = None
398
399    def make_kernel(self, q_vectors):
400        # type: (List[np.ndarray]) -> "GpuKernel"
401        if self.program is None:
402            compile_program = environment().compile_program
403            timestamp = generate.ocl_timestamp(self.info)
404            self.program = compile_program(
405                self.info.name,
406                self.source['opencl'],
407                self.dtype,
408                self.fast,
409                timestamp)
410            variants = ['Iq', 'Iqxy', 'Imagnetic']
411            names = [generate.kernel_name(self.info, k) for k in variants]
412            kernels = [getattr(self.program, k) for k in names]
413            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
414        is_2d = len(q_vectors) == 2
415        if is_2d:
416            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
417        else:
418            kernel = [self._kernels['Iq']]*2
419        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
420
421    def release(self):
422        # type: () -> None
423        """
424        Free the resources associated with the model.
425        """
426        if self.program is not None:
427            self.program = None
428
429    def __del__(self):
430        # type: () -> None
431        self.release()
432
433# TODO: check that we don't need a destructor for buffers which go out of scope
434class GpuInput(object):
435    """
436    Make q data available to the gpu.
437
438    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
439    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
440    to get the best performance on OpenCL, which may involve shifting and
441    stretching the array to better match the memory architecture.  Additional
442    points will be evaluated with *q=1e-3*.
443
444    *dtype* is the data type for the q vectors. The data type should be
445    set to match that of the kernel, which is an attribute of
446    :class:`GpuProgram`.  Note that not all kernels support double
447    precision, so even if the program was created for double precision,
448    the *GpuProgram.dtype* may be single precision.
449
450    Call :meth:`release` when complete.  Even if not called directly, the
451    buffer will be released when the data object is freed.
452    """
453    def __init__(self, q_vectors, dtype=generate.F32):
454        # type: (List[np.ndarray], np.dtype) -> None
455        # TODO: do we ever need double precision q?
456        env = environment()
457        self.nq = q_vectors[0].size
458        self.dtype = np.dtype(dtype)
459        self.is_2d = (len(q_vectors) == 2)
460        # TODO: stretch input based on get_warp()
461        # not doing it now since warp depends on kernel, which is not known
462        # at this point, so instead using 32, which is good on the set of
463        # architectures tested so far.
464        if self.is_2d:
465            # Note: 17 rather than 15 because results is 2 elements
466            # longer than input.
467            width = ((self.nq+17)//16)*16
468            self.q = np.empty((width, 2), dtype=dtype)
469            self.q[:self.nq, 0] = q_vectors[0]
470            self.q[:self.nq, 1] = q_vectors[1]
471        else:
472            # Note: 33 rather than 31 because results is 2 elements
473            # longer than input.
474            width = ((self.nq+33)//32)*32
475            self.q = np.empty(width, dtype=dtype)
476            self.q[:self.nq] = q_vectors[0]
477        self.global_size = [self.q.shape[0]]
478        context = env.get_context(self.dtype)
479        #print("creating inputs of size", self.global_size)
480        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
481                             hostbuf=self.q)
482
483    def release(self):
484        # type: () -> None
485        """
486        Free the memory.
487        """
488        if self.q_b is not None:
489            self.q_b.release()
490            self.q_b = None
491
492    def __del__(self):
493        # type: () -> None
494        self.release()
495
496class GpuKernel(Kernel):
497    """
498    Callable SAS kernel.
499
500    *kernel* is the GpuKernel object to call
501
502    *model_info* is the module information
503
504    *q_vectors* is the q vectors at which the kernel should be evaluated
505
506    *dtype* is the kernel precision
507
508    The resulting call method takes the *pars*, a list of values for
509    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
510    vectors for the polydisperse parameters.  *cutoff* determines the
511    integration limits: any points with combined weight less than *cutoff*
512    will not be calculated.
513
514    Call :meth:`release` when done with the kernel instance.
515    """
516    def __init__(self, kernel, dtype, model_info, q_vectors):
517        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
518        q_input = GpuInput(q_vectors, dtype)
519        self.kernel = kernel
520        self.info = model_info
521        self.dtype = dtype
522        self.dim = '2d' if q_input.is_2d else '1d'
523        # plus three for the normalization values
524        self.result = np.empty(q_input.nq+3, dtype)
525
526        # Inputs and outputs for each kernel call
527        # Note: res may be shorter than res_b if global_size != nq
528        env = environment()
529        self.queue = env.get_queue(dtype)
530
531        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
532                                  q_input.global_size[0] * dtype.itemsize)
533        self.q_input = q_input # allocated by GpuInput above
534
535        self._need_release = [self.result_b, self.q_input]
536        self.real = (np.float32 if dtype == generate.F32
537                     else np.float64 if dtype == generate.F64
538                     else np.float16 if dtype == generate.F16
539                     else np.float32)  # will never get here, so use np.float32
540
541    def __call__(self, call_details, values, cutoff, magnetic):
542        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
543        context = self.queue.context
544        # Arrange data transfer to card
545        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
546                              hostbuf=call_details.buffer)
547        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
548                             hostbuf=values)
549
550        kernel = self.kernel[1 if magnetic else 0]
551        args = [
552            np.uint32(self.q_input.nq), None, None,
553            details_b, values_b, self.q_input.q_b, self.result_b,
554            self.real(cutoff),
555        ]
556        #print("Calling OpenCL")
557        #call_details.show(values)
558        # Call kernel and retrieve results
559        last_call = None
560        step = 100
561        for start in range(0, call_details.num_eval, step):
562            stop = min(start + step, call_details.num_eval)
563            #print("queuing",start,stop)
564            args[1:3] = [np.int32(start), np.int32(stop)]
565            last_call = [kernel(self.queue, self.q_input.global_size,
566                                None, *args, wait_for=last_call)]
567        cl.enqueue_copy(self.queue, self.result, self.result_b)
568        #print("result", self.result)
569
570        # Free buffers
571        for v in (details_b, values_b):
572            if v is not None: v.release()
573
574        pd_norm = self.result[self.q_input.nq]
575        scale = values[0]/(pd_norm if pd_norm!=0.0 else 1.0)
576        background = values[1]
577        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
578        return scale*self.result[:self.q_input.nq] + background
579        # return self.result[:self.q_input.nq]
580
581    def release(self):
582        # type: () -> None
583        """
584        Release resources associated with the kernel.
585        """
586        for v in self._need_release:
587            v.release()
588        self._need_release = []
589
590    def __del__(self):
591        # type: () -> None
592        self.release()
Note: See TracBrowser for help on using the repository browser.