source: sasmodels/sasmodels/kernelcl.py @ ba32cdd

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since ba32cdd was ba32cdd, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

fix opencl compile; still doesn't run

  • Property mode set to 100644
File size: 19.5 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50from __future__ import print_function
51import os
52import warnings
53
54import numpy as np
55
56try:
57    import pyopencl as cl
58    # Ask OpenCL for the default context so that we know that one exists
59    cl.create_some_context(interactive=False)
60except Exception as exc:
61    warnings.warn(str(exc))
62    raise RuntimeError("OpenCL not available")
63
64from pyopencl import mem_flags as mf
65from pyopencl.characterize import get_fast_inaccurate_build_options
66
67from . import generate
68
69# The max loops number is limited by the amount of local memory available
70# on the device.  You don't want to make this value too big because it will
71# waste resources, nor too small because it may interfere with users trying
72# to do their polydispersity calculations.  A value of 1024 should be much
73# larger than necessary given that cost grows as npts^k where k is the number
74# of polydisperse parameters.
75MAX_LOOPS = 2048
76
77
78# Pragmas for enable OpenCL features.  Be sure to protect them so that they
79# still compile even if OpenCL is not present.
80_F16_PRAGMA = """\
81#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
82#  pragma OPENCL EXTENSION cl_khr_fp16: enable
83#endif
84"""
85
86_F64_PRAGMA = """\
87#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
88#  pragma OPENCL EXTENSION cl_khr_fp64: enable
89#endif
90"""
91
92
93ENV = None
94def environment():
95    """
96    Returns a singleton :class:`GpuEnvironment`.
97
98    This provides an OpenCL context and one queue per device.
99    """
100    global ENV
101    if ENV is None:
102        ENV = GpuEnvironment()
103    return ENV
104
105def has_type(device, dtype):
106    """
107    Return true if device supports the requested precision.
108    """
109    if dtype == generate.F32:
110        return True
111    elif dtype == generate.F64:
112        return "cl_khr_fp64" in device.extensions
113    elif dtype == generate.F16:
114        return "cl_khr_fp16" in device.extensions
115    else:
116        return False
117
118def get_warp(kernel, queue):
119    """
120    Return the size of an execution batch for *kernel* running on *queue*.
121    """
122    return kernel.get_work_group_info(
123        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
124        queue.device)
125
126def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
127    """
128    Stretch an input vector to the correct boundary.
129
130    Performance on the kernels can drop by a factor of two or more if the
131    number of values to compute does not fall on a nice power of two
132    boundary.   The trailing additional vector elements are given a
133    value of *extra*, and so f(*extra*) will be computed for each of
134    them.  The returned array will thus be a subset of the computed array.
135
136    *boundary* should be a power of 2 which is at least 32 for good
137    performance on current platforms (as of Jan 2015).  It should
138    probably be the max of get_warp(kernel,queue) and
139    device.min_data_type_align_size//4.
140    """
141    remainder = vector.size % boundary
142    if remainder != 0:
143        size = vector.size + (boundary - remainder)
144        vector = np.hstack((vector, [extra] * (size - vector.size)))
145    return np.ascontiguousarray(vector, dtype=dtype)
146
147
148def compile_model(context, source, dtype, fast=False):
149    """
150    Build a model to run on the gpu.
151
152    Returns the compiled program and its type.  The returned type will
153    be float32 even if the desired type is float64 if any of the
154    devices in the context do not support the cl_khr_fp64 extension.
155    """
156    dtype = np.dtype(dtype)
157    if not all(has_type(d, dtype) for d in context.devices):
158        raise RuntimeError("%s not supported for devices"%dtype)
159
160    source_list = [generate.convert_type(source, dtype)]
161
162    if dtype == generate.F16:
163        source_list.insert(0, _F16_PRAGMA)
164    elif dtype == generate.F64:
165        source_list.insert(0, _F64_PRAGMA)
166
167    # Note: USE_SINCOS makes the intel cpu slower under opencl
168    if context.devices[0].type == cl.device_type.GPU:
169        source_list.insert(0, "#define USE_SINCOS\n")
170    options = (get_fast_inaccurate_build_options(context.devices[0])
171               if fast else [])
172    source = "\n".join(source_list)
173    program = cl.Program(context, source).build(options=options)
174    return program
175
176
177# for now, this returns one device in the context
178# TODO: create a context that contains all devices on all platforms
179class GpuEnvironment(object):
180    """
181    GPU context, with possibly many devices, and one queue per device.
182    """
183    def __init__(self):
184        # find gpu context
185        #self.context = cl.create_some_context()
186
187        self.context = None
188        if 'PYOPENCL_CTX' in os.environ:
189            self._create_some_context()
190
191        if not self.context:
192            self.context = _get_default_context()
193
194        # Byte boundary for data alignment
195        #self.data_boundary = max(d.min_data_type_align_size
196        #                         for d in self.context.devices)
197        self.queues = [cl.CommandQueue(context, context.devices[0])
198                       for context in self.context]
199        self.compiled = {}
200
201    def has_type(self, dtype):
202        """
203        Return True if all devices support a given type.
204        """
205        dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
206        return any(has_type(d, dtype)
207                   for context in self.context
208                   for d in context.devices)
209
210    def get_queue(self, dtype):
211        """
212        Return a command queue for the kernels of type dtype.
213        """
214        for context, queue in zip(self.context, self.queues):
215            if all(has_type(d, dtype) for d in context.devices):
216                return queue
217
218    def get_context(self, dtype):
219        """
220        Return a OpenCL context for the kernels of type dtype.
221        """
222        for context, queue in zip(self.context, self.queues):
223            if all(has_type(d, dtype) for d in context.devices):
224                return context
225
226    def _create_some_context(self):
227        """
228        Protected call to cl.create_some_context without interactivity.  Use
229        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
230        attribute.
231        """
232        try:
233            self.context = [cl.create_some_context(interactive=False)]
234        except Exception as exc:
235            warnings.warn(str(exc))
236            warnings.warn("pyopencl.create_some_context() failed")
237            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
238
239    def compile_program(self, name, source, dtype, fast=False):
240        """
241        Compile the program for the device in the given context.
242        """
243        key = "%s-%s-%s"%(name, dtype, fast)
244        if key not in self.compiled:
245            print("compiling",name)
246            dtype = np.dtype(dtype)
247            program = compile_model(self.get_context(dtype),
248                                    str(source), dtype, fast)
249            self.compiled[key] = program
250        return self.compiled[key]
251
252    def release_program(self, name):
253        """
254        Free memory associated with the program on the device.
255        """
256        if name in self.compiled:
257            self.compiled[name].release()
258            del self.compiled[name]
259
260def _get_default_context():
261    """
262    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
263    drivers over AMD drivers.
264    """
265    # Note: on mobile devices there is automatic clock scaling if either the
266    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
267    # it did, it would mean that putting a busy loop on the CPU while the GPU
268    # is running may increase throughput.
269    #
270    # Macbook pro, base install:
271    #     {'Apple': [Intel CPU, NVIDIA GPU]}
272    # Macbook pro, base install:
273    #     {'Apple': [Intel CPU, Intel GPU]}
274    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
275    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
276    gpu, cpu = None, None
277    for platform in cl.get_platforms():
278        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
279        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
280        # graphics driver that may have been supplied with the CPU chipset.
281        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
282        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
283        for device in platform.get_devices():
284            if device.type == cl.device_type.GPU:
285                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
286                # so don't override it.
287                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
288                    gpu = device
289            elif device.type == cl.device_type.CPU:
290                if cpu is None or preferred_cpu:
291                    cpu = device
292            else:
293                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
294                # Intel Phi for example registers as an accelerator
295                # Since the user installed a custom device on their system and went through the
296                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
297                # use it as their primary compute device.
298                gpu = device
299
300    # order the devices by gpu then by cpu; when searching for an available device by data type they
301    # will be checked in this order, which means that if the gpu supports double then the cpu will never
302    # be used (though we may make it possible to explicitly request the cpu at some point).
303    devices = []
304    if gpu is not None:
305        devices.append(gpu)
306    if cpu is not None:
307        devices.append(cpu)
308    return [cl.Context([d]) for d in devices]
309
310
311class GpuModel(object):
312    """
313    GPU wrapper for a single model.
314
315    *source* and *model_info* are the model source and interface as returned
316    from :func:`generate.make_source` and :func:`generate.make_model_info`.
317
318    *dtype* is the desired model precision.  Any numpy dtype for single
319    or double precision floats will do, such as 'f', 'float32' or 'single'
320    for single and 'd', 'float64' or 'double' for double.  Double precision
321    is an optional extension which may not be available on all devices.
322    Half precision ('float16','half') may be available on some devices.
323    Fast precision ('fast') is a loose version of single precision, indicating
324    that the compiler is allowed to take shortcuts.
325    """
326    def __init__(self, source, model_info, dtype=generate.F32):
327        self.info = model_info
328        self.source = source
329        self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
330        self.fast = (dtype == 'fast')
331        self.program = None # delay program creation
332
333    def __getstate__(self):
334        return self.info, self.source, self.dtype, self.fast
335
336    def __setstate__(self, state):
337        self.info, self.source, self.dtype, self.fast = state
338        self.program = None
339
340    def make_kernel(self, q_vectors):
341        if self.program is None:
342            compiler = environment().compile_program
343            self.program = compiler(self.info['name'], self.source,
344                                    self.dtype, self.fast)
345        is_2d = len(q_vectors) == 2
346        kernel_name = generate.kernel_name(self.info, is_2d)
347        kernel = getattr(self.program, kernel_name)
348        return GpuKernel(kernel, self.info, q_vectors, self.dtype)
349
350    def release(self):
351        """
352        Free the resources associated with the model.
353        """
354        if self.program is not None:
355            environment().release_program(self.info['name'])
356            self.program = None
357
358    def __del__(self):
359        self.release()
360
361# TODO: check that we don't need a destructor for buffers which go out of scope
362class GpuInput(object):
363    """
364    Make q data available to the gpu.
365
366    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
367    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
368    to get the best performance on OpenCL, which may involve shifting and
369    stretching the array to better match the memory architecture.  Additional
370    points will be evaluated with *q=1e-3*.
371
372    *dtype* is the data type for the q vectors. The data type should be
373    set to match that of the kernel, which is an attribute of
374    :class:`GpuProgram`.  Note that not all kernels support double
375    precision, so even if the program was created for double precision,
376    the *GpuProgram.dtype* may be single precision.
377
378    Call :meth:`release` when complete.  Even if not called directly, the
379    buffer will be released when the data object is freed.
380    """
381    def __init__(self, q_vectors, dtype=generate.F32):
382        # TODO: do we ever need double precision q?
383        env = environment()
384        self.nq = q_vectors[0].size
385        self.dtype = np.dtype(dtype)
386        self.is_2d = (len(q_vectors) == 2)
387        # TODO: stretch input based on get_warp()
388        # not doing it now since warp depends on kernel, which is not known
389        # at this point, so instead using 32, which is good on the set of
390        # architectures tested so far.
391        if self.is_2d:
392            # Note: 17 rather than 15 because results is 2 elements
393            # longer than input.
394            width = ((self.nq+17)//16)*16
395            self.q = np.empty((width, 2), dtype=dtype)
396            self.q[:self.nq, 0] = q_vectors[0]
397            self.q[:self.nq, 1] = q_vectors[1]
398        else:
399            # Note: 33 rather than 31 because results is 2 elements
400            # longer than input.
401            width = ((self.nq+33)//32)*32
402            self.q = np.empty(width, dtype=dtype)
403            self.q[:self.nq] = q_vectors[0]
404        self.global_size = [self.q.shape[0]]
405        context = env.get_context(self.dtype)
406        #print("creating inputs of size", self.global_size)
407        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
408                             hostbuf=self.q)
409
410    def release(self):
411        """
412        Free the memory.
413        """
414        if self.q is not None:
415            self.q.release()
416            self.q = None
417
418    def __del__(self):
419        self.release()
420
421class GpuKernel(object):
422    """
423    Callable SAS kernel.
424
425    *kernel* is the GpuKernel object to call
426
427    *model_info* is the module information
428
429    *q_vectors* is the q vectors at which the kernel should be evaluated
430
431    *dtype* is the kernel precision
432
433    The resulting call method takes the *pars*, a list of values for
434    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
435    vectors for the polydisperse parameters.  *cutoff* determines the
436    integration limits: any points with combined weight less than *cutoff*
437    will not be calculated.
438
439    Call :meth:`release` when done with the kernel instance.
440    """
441    def __init__(self, kernel, model_info, q_vectors, dtype):
442        max_pd = model_info['max_pd']
443        npars = len(model_info['parameters'])-2
444        q_input = GpuInput(q_vectors, dtype)
445        self.dtype = dtype
446        self.dim = '2d' if q_input.is_2d else '1d'
447        self.kernel = kernel
448        self.info = model_info
449        self.pd_stop_index = 4*max_pd-1
450        # plus three for the normalization values
451        self.result = np.empty(q_input.nq+3, q_input.dtype)
452
453        # Inputs and outputs for each kernel call
454        # Note: res may be shorter than res_b if global_size != nq
455        env = environment()
456        self.queue = env.get_queue(dtype)
457
458        # details is int32 data, padded to an 8 integer boundary
459        size = ((max_pd*5 + npars*3 + 2 + 7)//8)*8
460        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
461                               q_input.global_size[0] * q_input.dtype.itemsize)
462        self.q_input = q_input # allocated by GpuInput above
463
464        self._need_release = [ self.result_b, self.q_input ]
465
466    def __call__(self, details, weights, values, cutoff):
467        real = (np.float32 if self.q_input.dtype == generate.F32
468                else np.float64 if self.q_input.dtype == generate.F64
469                else np.float16 if self.q_input.dtype == generate.F16
470                else np.float32)  # will never get here, so use np.float32
471        assert details.dtype == np.int32
472        assert weights.dtype == real and values.dtype == real
473
474        context = self.queue.context
475        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
476                              hostbuf=details)
477        weights_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
478                              hostbuf=weights)
479        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
480                             hostbuf=values)
481
482        start, stop = 0, self.details[self.pd_stop_index]
483        args = [
484            np.uint32(self.q_input.nq), np.uint32(start), np.uint32(stop),
485            self.details_b, self.weights_b, self.values_b,
486            self.q_input.q_b, self.result_b, real(cutoff),
487        ]
488        self.kernel(self.queue, self.q_input.global_size, None, *args)
489        cl.enqueue_copy(self.queue, self.result, self.result_b)
490        [v.release() for v in details_b, weights_b, values_b]
491
492        return self.result[:self.nq]
493
494    def release(self):
495        """
496        Release resources associated with the kernel.
497        """
498        for v in self._need_release:
499            v.release()
500        self._need_release = []
501
502    def __del__(self):
503        self.release()
Note: See TracBrowser for help on using the repository browser.