source: sasmodels/sasmodels/kernelcl.py @ 7ae2b7f

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 7ae2b7f was 7ae2b7f, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

still more linting; ignore numpy types

  • Property mode set to 100644
File size: 19.7 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50from __future__ import print_function
51import os
52import warnings
53
54import numpy as np  # type: ignore
55
56try:
57    raise NotImplementedError("OpenCL not yet implemented for new kernel template")
58    import pyopencl as cl  # type: ignore
59    # Ask OpenCL for the default context so that we know that one exists
60    cl.create_some_context(interactive=False)
61except Exception as exc:
62    warnings.warn(str(exc))
63    raise RuntimeError("OpenCL not available")
64
65from pyopencl import mem_flags as mf  # type: ignore
66from pyopencl.characterize import get_fast_inaccurate_build_options  # type: ignore
67
68from . import generate
69from .kernel import KernelModel, Kernel
70
71# The max loops number is limited by the amount of local memory available
72# on the device.  You don't want to make this value too big because it will
73# waste resources, nor too small because it may interfere with users trying
74# to do their polydispersity calculations.  A value of 1024 should be much
75# larger than necessary given that cost grows as npts^k where k is the number
76# of polydisperse parameters.
77MAX_LOOPS = 2048
78
79
80# Pragmas for enable OpenCL features.  Be sure to protect them so that they
81# still compile even if OpenCL is not present.
82_F16_PRAGMA = """\
83#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
84#  pragma OPENCL EXTENSION cl_khr_fp16: enable
85#endif
86"""
87
88_F64_PRAGMA = """\
89#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
90#  pragma OPENCL EXTENSION cl_khr_fp64: enable
91#endif
92"""
93
94
95ENV = None
96def environment():
97    """
98    Returns a singleton :class:`GpuEnvironment`.
99
100    This provides an OpenCL context and one queue per device.
101    """
102    global ENV
103    if ENV is None:
104        ENV = GpuEnvironment()
105    return ENV
106
107def has_type(device, dtype):
108    """
109    Return true if device supports the requested precision.
110    """
111    if dtype == generate.F32:
112        return True
113    elif dtype == generate.F64:
114        return "cl_khr_fp64" in device.extensions
115    elif dtype == generate.F16:
116        return "cl_khr_fp16" in device.extensions
117    else:
118        return False
119
120def get_warp(kernel, queue):
121    """
122    Return the size of an execution batch for *kernel* running on *queue*.
123    """
124    return kernel.get_work_group_info(
125        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
126        queue.device)
127
128def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
129    """
130    Stretch an input vector to the correct boundary.
131
132    Performance on the kernels can drop by a factor of two or more if the
133    number of values to compute does not fall on a nice power of two
134    boundary.   The trailing additional vector elements are given a
135    value of *extra*, and so f(*extra*) will be computed for each of
136    them.  The returned array will thus be a subset of the computed array.
137
138    *boundary* should be a power of 2 which is at least 32 for good
139    performance on current platforms (as of Jan 2015).  It should
140    probably be the max of get_warp(kernel,queue) and
141    device.min_data_type_align_size//4.
142    """
143    remainder = vector.size % boundary
144    if remainder != 0:
145        size = vector.size + (boundary - remainder)
146        vector = np.hstack((vector, [extra] * (size - vector.size)))
147    return np.ascontiguousarray(vector, dtype=dtype)
148
149
150def compile_model(context, source, dtype, fast=False):
151    """
152    Build a model to run on the gpu.
153
154    Returns the compiled program and its type.  The returned type will
155    be float32 even if the desired type is float64 if any of the
156    devices in the context do not support the cl_khr_fp64 extension.
157    """
158    dtype = np.dtype(dtype)
159    if not all(has_type(d, dtype) for d in context.devices):
160        raise RuntimeError("%s not supported for devices"%dtype)
161
162    source_list = [generate.convert_type(source, dtype)]
163
164    if dtype == generate.F16:
165        source_list.insert(0, _F16_PRAGMA)
166    elif dtype == generate.F64:
167        source_list.insert(0, _F64_PRAGMA)
168
169    # Note: USE_SINCOS makes the intel cpu slower under opencl
170    if context.devices[0].type == cl.device_type.GPU:
171        source_list.insert(0, "#define USE_SINCOS\n")
172    options = (get_fast_inaccurate_build_options(context.devices[0])
173               if fast else [])
174    source = "\n".join(source_list)
175    program = cl.Program(context, source).build(options=options)
176    return program
177
178
179# for now, this returns one device in the context
180# TODO: create a context that contains all devices on all platforms
181class GpuEnvironment(object):
182    """
183    GPU context, with possibly many devices, and one queue per device.
184    """
185    def __init__(self):
186        # find gpu context
187        #self.context = cl.create_some_context()
188
189        self.context = None
190        if 'PYOPENCL_CTX' in os.environ:
191            self._create_some_context()
192
193        if not self.context:
194            self.context = _get_default_context()
195
196        # Byte boundary for data alignment
197        #self.data_boundary = max(d.min_data_type_align_size
198        #                         for d in self.context.devices)
199        self.queues = [cl.CommandQueue(context, context.devices[0])
200                       for context in self.context]
201        self.compiled = {}
202
203    def has_type(self, dtype):
204        """
205        Return True if all devices support a given type.
206        """
207        dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
208        return any(has_type(d, dtype)
209                   for context in self.context
210                   for d in context.devices)
211
212    def get_queue(self, dtype):
213        """
214        Return a command queue for the kernels of type dtype.
215        """
216        for context, queue in zip(self.context, self.queues):
217            if all(has_type(d, dtype) for d in context.devices):
218                return queue
219
220    def get_context(self, dtype):
221        """
222        Return a OpenCL context for the kernels of type dtype.
223        """
224        for context, queue in zip(self.context, self.queues):
225            if all(has_type(d, dtype) for d in context.devices):
226                return context
227
228    def _create_some_context(self):
229        """
230        Protected call to cl.create_some_context without interactivity.  Use
231        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
232        attribute.
233        """
234        try:
235            self.context = [cl.create_some_context(interactive=False)]
236        except Exception as exc:
237            warnings.warn(str(exc))
238            warnings.warn("pyopencl.create_some_context() failed")
239            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
240
241    def compile_program(self, name, source, dtype, fast=False):
242        """
243        Compile the program for the device in the given context.
244        """
245        key = "%s-%s-%s"%(name, dtype, fast)
246        if key not in self.compiled:
247            print("compiling",name)
248            dtype = np.dtype(dtype)
249            program = compile_model(self.get_context(dtype),
250                                    str(source), dtype, fast)
251            self.compiled[key] = program
252        return self.compiled[key]
253
254    def release_program(self, name):
255        """
256        Free memory associated with the program on the device.
257        """
258        if name in self.compiled:
259            self.compiled[name].release()
260            del self.compiled[name]
261
262def _get_default_context():
263    """
264    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
265    drivers over AMD drivers.
266    """
267    # Note: on mobile devices there is automatic clock scaling if either the
268    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
269    # it did, it would mean that putting a busy loop on the CPU while the GPU
270    # is running may increase throughput.
271    #
272    # Macbook pro, base install:
273    #     {'Apple': [Intel CPU, NVIDIA GPU]}
274    # Macbook pro, base install:
275    #     {'Apple': [Intel CPU, Intel GPU]}
276    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
277    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
278    gpu, cpu = None, None
279    for platform in cl.get_platforms():
280        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
281        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
282        # graphics driver that may have been supplied with the CPU chipset.
283        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
284        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
285        for device in platform.get_devices():
286            if device.type == cl.device_type.GPU:
287                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
288                # so don't override it.
289                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
290                    gpu = device
291            elif device.type == cl.device_type.CPU:
292                if cpu is None or preferred_cpu:
293                    cpu = device
294            else:
295                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
296                # Intel Phi for example registers as an accelerator
297                # Since the user installed a custom device on their system and went through the
298                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
299                # use it as their primary compute device.
300                gpu = device
301
302    # order the devices by gpu then by cpu; when searching for an available device by data type they
303    # will be checked in this order, which means that if the gpu supports double then the cpu will never
304    # be used (though we may make it possible to explicitly request the cpu at some point).
305    devices = []
306    if gpu is not None:
307        devices.append(gpu)
308    if cpu is not None:
309        devices.append(cpu)
310    return [cl.Context([d]) for d in devices]
311
312
313class GpuModel(KernelModel):
314    """
315    GPU wrapper for a single model.
316
317    *source* and *model_info* are the model source and interface as returned
318    from :func:`generate.make_source` and :func:`generate.make_model_info`.
319
320    *dtype* is the desired model precision.  Any numpy dtype for single
321    or double precision floats will do, such as 'f', 'float32' or 'single'
322    for single and 'd', 'float64' or 'double' for double.  Double precision
323    is an optional extension which may not be available on all devices.
324    Half precision ('float16','half') may be available on some devices.
325    Fast precision ('fast') is a loose version of single precision, indicating
326    that the compiler is allowed to take shortcuts.
327    """
328    def __init__(self, source, model_info, dtype=generate.F32):
329        self.info = model_info
330        self.source = source
331        self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
332        self.fast = (dtype == 'fast')
333        self.program = None # delay program creation
334
335    def __getstate__(self):
336        return self.info, self.source, self.dtype, self.fast
337
338    def __setstate__(self, state):
339        self.info, self.source, self.dtype, self.fast = state
340        self.program = None
341
342    def make_kernel(self, q_vectors):
343        if self.program is None:
344            compiler = environment().compile_program
345            self.program = compiler(self.info.name, self.source,
346                                    self.dtype, self.fast)
347        is_2d = len(q_vectors) == 2
348        kernel_name = generate.kernel_name(self.info, is_2d)
349        kernel = getattr(self.program, kernel_name)
350        return GpuKernel(kernel, self.info, q_vectors, self.dtype)
351
352    def release(self):
353        """
354        Free the resources associated with the model.
355        """
356        if self.program is not None:
357            environment().release_program(self.info.name)
358            self.program = None
359
360    def __del__(self):
361        self.release()
362
363# TODO: check that we don't need a destructor for buffers which go out of scope
364class GpuInput(object):
365    """
366    Make q data available to the gpu.
367
368    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
369    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
370    to get the best performance on OpenCL, which may involve shifting and
371    stretching the array to better match the memory architecture.  Additional
372    points will be evaluated with *q=1e-3*.
373
374    *dtype* is the data type for the q vectors. The data type should be
375    set to match that of the kernel, which is an attribute of
376    :class:`GpuProgram`.  Note that not all kernels support double
377    precision, so even if the program was created for double precision,
378    the *GpuProgram.dtype* may be single precision.
379
380    Call :meth:`release` when complete.  Even if not called directly, the
381    buffer will be released when the data object is freed.
382    """
383    def __init__(self, q_vectors, dtype=generate.F32):
384        # TODO: do we ever need double precision q?
385        env = environment()
386        self.nq = q_vectors[0].size
387        self.dtype = np.dtype(dtype)
388        self.is_2d = (len(q_vectors) == 2)
389        # TODO: stretch input based on get_warp()
390        # not doing it now since warp depends on kernel, which is not known
391        # at this point, so instead using 32, which is good on the set of
392        # architectures tested so far.
393        if self.is_2d:
394            # Note: 17 rather than 15 because results is 2 elements
395            # longer than input.
396            width = ((self.nq+17)//16)*16
397            self.q = np.empty((width, 2), dtype=dtype)
398            self.q[:self.nq, 0] = q_vectors[0]
399            self.q[:self.nq, 1] = q_vectors[1]
400        else:
401            # Note: 33 rather than 31 because results is 2 elements
402            # longer than input.
403            width = ((self.nq+33)//32)*32
404            self.q = np.empty(width, dtype=dtype)
405            self.q[:self.nq] = q_vectors[0]
406        self.global_size = [self.q.shape[0]]
407        context = env.get_context(self.dtype)
408        #print("creating inputs of size", self.global_size)
409        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
410                             hostbuf=self.q)
411
412    def release(self):
413        """
414        Free the memory.
415        """
416        if self.q is not None:
417            self.q.release()
418            self.q = None
419
420    def __del__(self):
421        self.release()
422
423class GpuKernel(Kernel):
424    """
425    Callable SAS kernel.
426
427    *kernel* is the GpuKernel object to call
428
429    *model_info* is the module information
430
431    *q_vectors* is the q vectors at which the kernel should be evaluated
432
433    *dtype* is the kernel precision
434
435    The resulting call method takes the *pars*, a list of values for
436    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
437    vectors for the polydisperse parameters.  *cutoff* determines the
438    integration limits: any points with combined weight less than *cutoff*
439    will not be calculated.
440
441    Call :meth:`release` when done with the kernel instance.
442    """
443    def __init__(self, kernel, model_info, q_vectors, dtype):
444        max_pd = model_info.max_pd
445        npars = len(model_info.parameters)-2
446        q_input = GpuInput(q_vectors, dtype)
447        self.dtype = dtype
448        self.dim = '2d' if q_input.is_2d else '1d'
449        self.kernel = kernel
450        self.info = model_info
451        self.pd_stop_index = 4*max_pd-1
452        # plus three for the normalization values
453        self.result = np.empty(q_input.nq+3, q_input.dtype)
454
455        # Inputs and outputs for each kernel call
456        # Note: res may be shorter than res_b if global_size != nq
457        env = environment()
458        self.queue = env.get_queue(dtype)
459
460        # details is int32 data, padded to an 8 integer boundary
461        size = ((max_pd*5 + npars*3 + 2 + 7)//8)*8
462        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
463                               q_input.global_size[0] * q_input.dtype.itemsize)
464        self.q_input = q_input # allocated by GpuInput above
465
466        self._need_release = [ self.result_b, self.q_input ]
467
468    def __call__(self, call_details, weights, values, cutoff):
469        real = (np.float32 if self.q_input.dtype == generate.F32
470                else np.float64 if self.q_input.dtype == generate.F64
471                else np.float16 if self.q_input.dtype == generate.F16
472                else np.float32)  # will never get here, so use np.float32
473        assert call_details.dtype == np.int32
474        assert weights.dtype == real and values.dtype == real
475
476        context = self.queue.context
477        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
478                              hostbuf=call_details)
479        weights_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
480                              hostbuf=weights)
481        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
482                             hostbuf=values)
483
484        start, stop = 0, self.details[self.pd_stop_index]
485        args = [
486            np.uint32(self.q_input.nq), np.uint32(start), np.uint32(stop),
487            self.details_b, self.weights_b, self.values_b,
488            self.q_input.q_b, self.result_b, real(cutoff),
489        ]
490        self.kernel(self.queue, self.q_input.global_size, None, *args)
491        cl.enqueue_copy(self.queue, self.result, self.result_b)
492        [v.release() for v in (details_b, weights_b, values_b)]
493
494        return self.result[:self.nq]
495
496    def release(self):
497        """
498        Release resources associated with the kernel.
499        """
500        for v in self._need_release:
501            v.release()
502        self._need_release = []
503
504    def __del__(self):
505        self.release()
Note: See TracBrowser for help on using the repository browser.