source: sasmodels/sasmodels/kernelcl.py @ a5b8477

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since a5b8477 was a5b8477, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

update docs to work with the new ModelInfo/ParameterTable? classes

  • Property mode set to 100644
File size: 20.1 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50from __future__ import print_function
51
52import os
53import warnings
54
55import numpy as np  # type: ignore
56
57try:
58    raise NotImplementedError("OpenCL not yet implemented for new kernel template")
59    import pyopencl as cl  # type: ignore
60    # Ask OpenCL for the default context so that we know that one exists
61    cl.create_some_context(interactive=False)
62except Exception as exc:
63    warnings.warn(str(exc))
64    raise RuntimeError("OpenCL not available")
65
66from pyopencl import mem_flags as mf  # type: ignore
67from pyopencl.characterize import get_fast_inaccurate_build_options  # type: ignore
68
69from . import generate
70from .kernel import KernelModel, Kernel
71
72try:
73    from typing import Tuple, Callable, Any
74    from .modelinfo import ModelInfo
75    from .details import CallDetails
76except ImportError:
77    pass
78
79# The max loops number is limited by the amount of local memory available
80# on the device.  You don't want to make this value too big because it will
81# waste resources, nor too small because it may interfere with users trying
82# to do their polydispersity calculations.  A value of 1024 should be much
83# larger than necessary given that cost grows as npts^k where k is the number
84# of polydisperse parameters.
85MAX_LOOPS = 2048
86
87
88# Pragmas for enable OpenCL features.  Be sure to protect them so that they
89# still compile even if OpenCL is not present.
90_F16_PRAGMA = """\
91#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
92#  pragma OPENCL EXTENSION cl_khr_fp16: enable
93#endif
94"""
95
96_F64_PRAGMA = """\
97#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
98#  pragma OPENCL EXTENSION cl_khr_fp64: enable
99#endif
100"""
101
102
103ENV = None
104def environment():
105    """
106    Returns a singleton :class:`GpuEnvironment`.
107
108    This provides an OpenCL context and one queue per device.
109    """
110    global ENV
111    if ENV is None:
112        ENV = GpuEnvironment()
113    return ENV
114
115def has_type(device, dtype):
116    """
117    Return true if device supports the requested precision.
118    """
119    if dtype == generate.F32:
120        return True
121    elif dtype == generate.F64:
122        return "cl_khr_fp64" in device.extensions
123    elif dtype == generate.F16:
124        return "cl_khr_fp16" in device.extensions
125    else:
126        return False
127
128def get_warp(kernel, queue):
129    """
130    Return the size of an execution batch for *kernel* running on *queue*.
131    """
132    return kernel.get_work_group_info(
133        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
134        queue.device)
135
136def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
137    """
138    Stretch an input vector to the correct boundary.
139
140    Performance on the kernels can drop by a factor of two or more if the
141    number of values to compute does not fall on a nice power of two
142    boundary.   The trailing additional vector elements are given a
143    value of *extra*, and so f(*extra*) will be computed for each of
144    them.  The returned array will thus be a subset of the computed array.
145
146    *boundary* should be a power of 2 which is at least 32 for good
147    performance on current platforms (as of Jan 2015).  It should
148    probably be the max of get_warp(kernel,queue) and
149    device.min_data_type_align_size//4.
150    """
151    remainder = vector.size % boundary
152    if remainder != 0:
153        size = vector.size + (boundary - remainder)
154        vector = np.hstack((vector, [extra] * (size - vector.size)))
155    return np.ascontiguousarray(vector, dtype=dtype)
156
157
158def compile_model(context, source, dtype, fast=False):
159    """
160    Build a model to run on the gpu.
161
162    Returns the compiled program and its type.  The returned type will
163    be float32 even if the desired type is float64 if any of the
164    devices in the context do not support the cl_khr_fp64 extension.
165    """
166    dtype = np.dtype(dtype)
167    if not all(has_type(d, dtype) for d in context.devices):
168        raise RuntimeError("%s not supported for devices"%dtype)
169
170    source_list = [generate.convert_type(source, dtype)]
171
172    if dtype == generate.F16:
173        source_list.insert(0, _F16_PRAGMA)
174    elif dtype == generate.F64:
175        source_list.insert(0, _F64_PRAGMA)
176
177    # Note: USE_SINCOS makes the intel cpu slower under opencl
178    if context.devices[0].type == cl.device_type.GPU:
179        source_list.insert(0, "#define USE_SINCOS\n")
180    options = (get_fast_inaccurate_build_options(context.devices[0])
181               if fast else [])
182    source = "\n".join(source_list)
183    program = cl.Program(context, source).build(options=options)
184    return program
185
186
187# for now, this returns one device in the context
188# TODO: create a context that contains all devices on all platforms
189class GpuEnvironment(object):
190    """
191    GPU context, with possibly many devices, and one queue per device.
192    """
193    def __init__(self):
194        # find gpu context
195        #self.context = cl.create_some_context()
196
197        self.context = None
198        if 'PYOPENCL_CTX' in os.environ:
199            self._create_some_context()
200
201        if not self.context:
202            self.context = _get_default_context()
203
204        # Byte boundary for data alignment
205        #self.data_boundary = max(d.min_data_type_align_size
206        #                         for d in self.context.devices)
207        self.queues = [cl.CommandQueue(context, context.devices[0])
208                       for context in self.context]
209        self.compiled = {}
210
211    def has_type(self, dtype):
212        """
213        Return True if all devices support a given type.
214        """
215        dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
216        return any(has_type(d, dtype)
217                   for context in self.context
218                   for d in context.devices)
219
220    def get_queue(self, dtype):
221        """
222        Return a command queue for the kernels of type dtype.
223        """
224        for context, queue in zip(self.context, self.queues):
225            if all(has_type(d, dtype) for d in context.devices):
226                return queue
227
228    def get_context(self, dtype):
229        """
230        Return a OpenCL context for the kernels of type dtype.
231        """
232        for context, queue in zip(self.context, self.queues):
233            if all(has_type(d, dtype) for d in context.devices):
234                return context
235
236    def _create_some_context(self):
237        """
238        Protected call to cl.create_some_context without interactivity.  Use
239        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
240        attribute.
241        """
242        try:
243            self.context = [cl.create_some_context(interactive=False)]
244        except Exception as exc:
245            warnings.warn(str(exc))
246            warnings.warn("pyopencl.create_some_context() failed")
247            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
248
249    def compile_program(self, name, source, dtype, fast=False):
250        """
251        Compile the program for the device in the given context.
252        """
253        key = "%s-%s-%s"%(name, dtype, fast)
254        if key not in self.compiled:
255            print("compiling",name)
256            dtype = np.dtype(dtype)
257            program = compile_model(self.get_context(dtype),
258                                    str(source), dtype, fast)
259            self.compiled[key] = program
260        return self.compiled[key]
261
262    def release_program(self, name):
263        """
264        Free memory associated with the program on the device.
265        """
266        if name in self.compiled:
267            self.compiled[name].release()
268            del self.compiled[name]
269
270def _get_default_context():
271    """
272    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
273    drivers over AMD drivers.
274    """
275    # Note: on mobile devices there is automatic clock scaling if either the
276    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
277    # it did, it would mean that putting a busy loop on the CPU while the GPU
278    # is running may increase throughput.
279    #
280    # Macbook pro, base install:
281    #     {'Apple': [Intel CPU, NVIDIA GPU]}
282    # Macbook pro, base install:
283    #     {'Apple': [Intel CPU, Intel GPU]}
284    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
285    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
286    gpu, cpu = None, None
287    for platform in cl.get_platforms():
288        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
289        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
290        # graphics driver that may have been supplied with the CPU chipset.
291        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
292        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
293        for device in platform.get_devices():
294            if device.type == cl.device_type.GPU:
295                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
296                # so don't override it.
297                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
298                    gpu = device
299            elif device.type == cl.device_type.CPU:
300                if cpu is None or preferred_cpu:
301                    cpu = device
302            else:
303                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
304                # Intel Phi for example registers as an accelerator
305                # Since the user installed a custom device on their system and went through the
306                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
307                # use it as their primary compute device.
308                gpu = device
309
310    # order the devices by gpu then by cpu; when searching for an available device by data type they
311    # will be checked in this order, which means that if the gpu supports double then the cpu will never
312    # be used (though we may make it possible to explicitly request the cpu at some point).
313    devices = []
314    if gpu is not None:
315        devices.append(gpu)
316    if cpu is not None:
317        devices.append(cpu)
318    return [cl.Context([d]) for d in devices]
319
320
321class GpuModel(KernelModel):
322    """
323    GPU wrapper for a single model.
324
325    *source* and *model_info* are the model source and interface as returned
326    from :func:`generate.make_source` and :func:`generate.make_model_info`.
327
328    *dtype* is the desired model precision.  Any numpy dtype for single
329    or double precision floats will do, such as 'f', 'float32' or 'single'
330    for single and 'd', 'float64' or 'double' for double.  Double precision
331    is an optional extension which may not be available on all devices.
332    Half precision ('float16','half') may be available on some devices.
333    Fast precision ('fast') is a loose version of single precision, indicating
334    that the compiler is allowed to take shortcuts.
335    """
336    def __init__(self, source, model_info, dtype=generate.F32):
337        self.info = model_info
338        self.source = source
339        self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
340        self.fast = (dtype == 'fast')
341        self.program = None # delay program creation
342
343    def __getstate__(self):
344        return self.info, self.source, self.dtype, self.fast
345
346    def __setstate__(self, state):
347        self.info, self.source, self.dtype, self.fast = state
348        self.program = None
349
350    def make_kernel(self, q_vectors):
351        if self.program is None:
352            compiler = environment().compile_program
353            self.program = compiler(self.info.name, self.source,
354                                    self.dtype, self.fast)
355        is_2d = len(q_vectors) == 2
356        kernel_name = generate.kernel_name(self.info, is_2d)
357        kernel = getattr(self.program, kernel_name)
358        return GpuKernel(kernel, self.info, q_vectors, self.dtype)
359
360    def release(self):
361        """
362        Free the resources associated with the model.
363        """
364        if self.program is not None:
365            environment().release_program(self.info.name)
366            self.program = None
367
368    def __del__(self):
369        self.release()
370
371# TODO: check that we don't need a destructor for buffers which go out of scope
372class GpuInput(object):
373    """
374    Make q data available to the gpu.
375
376    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
377    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
378    to get the best performance on OpenCL, which may involve shifting and
379    stretching the array to better match the memory architecture.  Additional
380    points will be evaluated with *q=1e-3*.
381
382    *dtype* is the data type for the q vectors. The data type should be
383    set to match that of the kernel, which is an attribute of
384    :class:`GpuProgram`.  Note that not all kernels support double
385    precision, so even if the program was created for double precision,
386    the *GpuProgram.dtype* may be single precision.
387
388    Call :meth:`release` when complete.  Even if not called directly, the
389    buffer will be released when the data object is freed.
390    """
391    def __init__(self, q_vectors, dtype=generate.F32):
392        # TODO: do we ever need double precision q?
393        env = environment()
394        self.nq = q_vectors[0].size
395        self.dtype = np.dtype(dtype)
396        self.is_2d = (len(q_vectors) == 2)
397        # TODO: stretch input based on get_warp()
398        # not doing it now since warp depends on kernel, which is not known
399        # at this point, so instead using 32, which is good on the set of
400        # architectures tested so far.
401        if self.is_2d:
402            # Note: 17 rather than 15 because results is 2 elements
403            # longer than input.
404            width = ((self.nq+17)//16)*16
405            self.q = np.empty((width, 2), dtype=dtype)
406            self.q[:self.nq, 0] = q_vectors[0]
407            self.q[:self.nq, 1] = q_vectors[1]
408        else:
409            # Note: 33 rather than 31 because results is 2 elements
410            # longer than input.
411            width = ((self.nq+33)//32)*32
412            self.q = np.empty(width, dtype=dtype)
413            self.q[:self.nq] = q_vectors[0]
414        self.global_size = [self.q.shape[0]]
415        context = env.get_context(self.dtype)
416        #print("creating inputs of size", self.global_size)
417        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
418                             hostbuf=self.q)
419
420    def release(self):
421        """
422        Free the memory.
423        """
424        if self.q is not None:
425            self.q.release()
426            self.q = None
427
428    def __del__(self):
429        self.release()
430
431class GpuKernel(Kernel):
432    """
433    Callable SAS kernel.
434
435    *kernel* is the GpuKernel object to call
436
437    *model_info* is the module information
438
439    *q_vectors* is the q vectors at which the kernel should be evaluated
440
441    *dtype* is the kernel precision
442
443    The resulting call method takes the *pars*, a list of values for
444    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
445    vectors for the polydisperse parameters.  *cutoff* determines the
446    integration limits: any points with combined weight less than *cutoff*
447    will not be calculated.
448
449    Call :meth:`release` when done with the kernel instance.
450    """
451    def __init__(self, kernel, model_info, q_vectors):
452        # type: (KernelModel, ModelInfo, List[np.ndarray]) -> None
453        max_pd = model_info.parameters.max_pd
454        npars = len(model_info.parameters.kernel_parameters)-2
455        q_input = GpuInput(q_vectors, kernel.dtype)
456        self.kernel = kernel
457        self.info = model_info
458        self.dtype = kernel.dtype
459        self.dim = '2d' if q_input.is_2d else '1d'
460        self.pd_stop_index = 4*max_pd-1
461        # plus three for the normalization values
462        self.result = np.empty(q_input.nq+3, q_input.dtype)
463
464        # Inputs and outputs for each kernel call
465        # Note: res may be shorter than res_b if global_size != nq
466        env = environment()
467        self.queue = env.get_queue(kernel.dtype)
468
469        # details is int32 data, padded to an 8 integer boundary
470        size = ((max_pd*5 + npars*3 + 2 + 7)//8)*8
471        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
472                               q_input.global_size[0] * kernel.dtype.itemsize)
473        self.q_input = q_input # allocated by GpuInput above
474
475        self._need_release = [ self.result_b, self.q_input ]
476
477    def __call__(self, call_details, weights, values, cutoff):
478        # type: (CallDetails, np.ndarray, np.ndarray, float) -> np.ndarray
479        real = (np.float32 if self.q_input.dtype == generate.F32
480                else np.float64 if self.q_input.dtype == generate.F64
481                else np.float16 if self.q_input.dtype == generate.F16
482                else np.float32)  # will never get here, so use np.float32
483        assert call_details.dtype == np.int32
484        assert weights.dtype == real and values.dtype == real
485
486        context = self.queue.context
487        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
488                              hostbuf=call_details)
489        weights_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
490                              hostbuf=weights)
491        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
492                             hostbuf=values)
493
494        start, stop = 0, self.details[self.pd_stop_index]
495        args = [
496            np.uint32(self.q_input.nq), np.uint32(start), np.uint32(stop),
497            self.details_b, self.weights_b, self.values_b,
498            self.q_input.q_b, self.result_b, real(cutoff),
499        ]
500        self.kernel(self.queue, self.q_input.global_size, None, *args)
501        cl.enqueue_copy(self.queue, self.result, self.result_b)
502        [v.release() for v in (details_b, weights_b, values_b)]
503
504        return self.result[:self.nq]
505
506    def release(self):
507        """
508        Release resources associated with the kernel.
509        """
510        for v in self._need_release:
511            v.release()
512        self._need_release = []
513
514    def __del__(self):
515        self.release()
Note: See TracBrowser for help on using the repository browser.