source: sasmodels/sasmodels/kernelcl.py @ 445d1c0

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 445d1c0 was 445d1c0, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

restrict polydispersity checks in bumps to 1d parameters for 1d data

  • Property mode set to 100644
File size: 19.9 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50import os
51import warnings
52
53import numpy as np
54
55try:
56    import pyopencl as cl
57    # Ask OpenCL for the default context so that we know that one exists
58    cl.create_some_context(interactive=False)
59except Exception as exc:
60    warnings.warn(str(exc))
61    raise RuntimeError("OpenCL not available")
62
63from pyopencl import mem_flags as mf
64from pyopencl.characterize import get_fast_inaccurate_build_options
65
66from . import generate
67
68# The max loops number is limited by the amount of local memory available
69# on the device.  You don't want to make this value too big because it will
70# waste resources, nor too small because it may interfere with users trying
71# to do their polydispersity calculations.  A value of 1024 should be much
72# larger than necessary given that cost grows as npts^k where k is the number
73# of polydisperse parameters.
74MAX_LOOPS = 2048
75
76
77# Pragmas for enable OpenCL features.  Be sure to protect them so that they
78# still compile even if OpenCL is not present.
79_F16_PRAGMA = """\
80#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
81#  pragma OPENCL EXTENSION cl_khr_fp16: enable
82#endif
83"""
84
85_F64_PRAGMA = """\
86#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
87#  pragma OPENCL EXTENSION cl_khr_fp64: enable
88#endif
89"""
90
91
92ENV = None
93def environment():
94    """
95    Returns a singleton :class:`GpuEnvironment`.
96
97    This provides an OpenCL context and one queue per device.
98    """
99    global ENV
100    if ENV is None:
101        ENV = GpuEnvironment()
102    return ENV
103
104def has_type(device, dtype):
105    """
106    Return true if device supports the requested precision.
107    """
108    if dtype == generate.F32:
109        return True
110    elif dtype == generate.F64:
111        return "cl_khr_fp64" in device.extensions
112    elif dtype == generate.F16:
113        return "cl_khr_fp16" in device.extensions
114    else:
115        return False
116
117def get_warp(kernel, queue):
118    """
119    Return the size of an execution batch for *kernel* running on *queue*.
120    """
121    return kernel.get_work_group_info(
122        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
123        queue.device)
124
125def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
126    """
127    Stretch an input vector to the correct boundary.
128
129    Performance on the kernels can drop by a factor of two or more if the
130    number of values to compute does not fall on a nice power of two
131    boundary.   The trailing additional vector elements are given a
132    value of *extra*, and so f(*extra*) will be computed for each of
133    them.  The returned array will thus be a subset of the computed array.
134
135    *boundary* should be a power of 2 which is at least 32 for good
136    performance on current platforms (as of Jan 2015).  It should
137    probably be the max of get_warp(kernel,queue) and
138    device.min_data_type_align_size//4.
139    """
140    remainder = vector.size % boundary
141    if remainder != 0:
142        size = vector.size + (boundary - remainder)
143        vector = np.hstack((vector, [extra] * (size - vector.size)))
144    return np.ascontiguousarray(vector, dtype=dtype)
145
146
147def compile_model(context, source, dtype, fast=False):
148    """
149    Build a model to run on the gpu.
150
151    Returns the compiled program and its type.  The returned type will
152    be float32 even if the desired type is float64 if any of the
153    devices in the context do not support the cl_khr_fp64 extension.
154    """
155    dtype = np.dtype(dtype)
156    if not all(has_type(d, dtype) for d in context.devices):
157        raise RuntimeError("%s not supported for devices"%dtype)
158
159    source_list = [generate.convert_type(source, dtype)]
160
161    if dtype == generate.F16:
162        source_list.insert(0, _F16_PRAGMA)
163    elif dtype == generate.F64:
164        source_list.insert(0, _F64_PRAGMA)
165
166    # Note: USE_SINCOS makes the intel cpu slower under opencl
167    if context.devices[0].type == cl.device_type.GPU:
168        source_list.insert(0, "#define USE_SINCOS\n")
169    options = (get_fast_inaccurate_build_options(context.devices[0])
170               if fast else [])
171    source = "\n".join(source)
172    program = cl.Program(context, source).build(options=options)
173    return program
174
175
176# for now, this returns one device in the context
177# TODO: create a context that contains all devices on all platforms
178class GpuEnvironment(object):
179    """
180    GPU context, with possibly many devices, and one queue per device.
181    """
182    def __init__(self):
183        # find gpu context
184        #self.context = cl.create_some_context()
185
186        self.context = None
187        if 'PYOPENCL_CTX' in os.environ:
188            self._create_some_context()
189
190        if not self.context:
191            self.context = _get_default_context()
192
193        # Byte boundary for data alignment
194        #self.data_boundary = max(d.min_data_type_align_size
195        #                         for d in self.context.devices)
196        self.queues = [cl.CommandQueue(context, context.devices[0])
197                       for context in self.context]
198        self.compiled = {}
199
200    def has_type(self, dtype):
201        """
202        Return True if all devices support a given type.
203        """
204        dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
205        return any(has_type(d, dtype)
206                   for context in self.context
207                   for d in context.devices)
208
209    def get_queue(self, dtype):
210        """
211        Return a command queue for the kernels of type dtype.
212        """
213        for context, queue in zip(self.context, self.queues):
214            if all(has_type(d, dtype) for d in context.devices):
215                return queue
216
217    def get_context(self, dtype):
218        """
219        Return a OpenCL context for the kernels of type dtype.
220        """
221        for context, queue in zip(self.context, self.queues):
222            if all(has_type(d, dtype) for d in context.devices):
223                return context
224
225    def _create_some_context(self):
226        """
227        Protected call to cl.create_some_context without interactivity.  Use
228        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
229        attribute.
230        """
231        try:
232            self.context = [cl.create_some_context(interactive=False)]
233        except Exception as exc:
234            warnings.warn(str(exc))
235            warnings.warn("pyopencl.create_some_context() failed")
236            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
237
238    def compile_program(self, name, source, dtype, fast=False):
239        """
240        Compile the program for the device in the given context.
241        """
242        key = "%s-%s-%s"%(name, dtype, fast)
243        if key not in self.compiled:
244            #print("compiling",name)
245            dtype = np.dtype(dtype)
246            program = compile_model(self.get_context(dtype), source, dtype, fast)
247            self.compiled[key] = program
248        return self.compiled[key]
249
250    def release_program(self, name):
251        """
252        Free memory associated with the program on the device.
253        """
254        if name in self.compiled:
255            self.compiled[name].release()
256            del self.compiled[name]
257
258def _get_default_context():
259    """
260    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
261    drivers over AMD drivers.
262    """
263    # Note: on mobile devices there is automatic clock scaling if either the
264    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
265    # it did, it would mean that putting a busy loop on the CPU while the GPU
266    # is running may increase throughput.
267    #
268    # Macbook pro, base install:
269    #     {'Apple': [Intel CPU, NVIDIA GPU]}
270    # Macbook pro, base install:
271    #     {'Apple': [Intel CPU, Intel GPU]}
272    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
273    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
274    gpu, cpu = None, None
275    for platform in cl.get_platforms():
276        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
277        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
278        # graphics driver that may have been supplied with the CPU chipset.
279        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
280        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
281        for device in platform.get_devices():
282            if device.type == cl.device_type.GPU:
283                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
284                # so don't override it.
285                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
286                    gpu = device
287            elif device.type == cl.device_type.CPU:
288                if cpu is None or preferred_cpu:
289                    cpu = device
290            else:
291                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
292                # Intel Phi for example registers as an accelerator
293                # Since the user installed a custom device on their system and went through the
294                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
295                # use it as their primary compute device.
296                gpu = device
297
298    # order the devices by gpu then by cpu; when searching for an available device by data type they
299    # will be checked in this order, which means that if the gpu supports double then the cpu will never
300    # be used (though we may make it possible to explicitly request the cpu at some point).
301    devices = []
302    if gpu is not None:
303        devices.append(gpu)
304    if cpu is not None:
305        devices.append(cpu)
306    return [cl.Context([d]) for d in devices]
307
308
309class GpuModel(object):
310    """
311    GPU wrapper for a single model.
312
313    *source* and *model_info* are the model source and interface as returned
314    from :func:`generate.make_source` and :func:`generate.make_model_info`.
315
316    *dtype* is the desired model precision.  Any numpy dtype for single
317    or double precision floats will do, such as 'f', 'float32' or 'single'
318    for single and 'd', 'float64' or 'double' for double.  Double precision
319    is an optional extension which may not be available on all devices.
320    Half precision ('float16','half') may be available on some devices.
321    Fast precision ('fast') is a loose version of single precision, indicating
322    that the compiler is allowed to take shortcuts.
323    """
324    def __init__(self, source, model_info, dtype=generate.F32):
325        self.info = model_info
326        self.source = source
327        self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
328        self.fast = (dtype == 'fast')
329        self.program = None # delay program creation
330
331    def __getstate__(self):
332        return self.info, self.source, self.dtype, self.fast
333
334    def __setstate__(self, state):
335        self.info, self.source, self.dtype, self.fast = state
336        self.program = None
337
338    def make_calculator(self, q_vectors, details):
339        if self.program is None:
340            compiler = environment().compile_program
341            self.program = compiler(self.info['name'], self.source,
342                                    self.dtype, self.fast)
343        is_2d = len(q_vectors) == 2
344        kernel_name = generate.kernel_name(self.info, is_2d)
345        kernel = getattr(self.program, kernel_name)
346        return GpuKernel(kernel, self.info, q_vectors, details, self.dtype)
347
348    def release(self):
349        """
350        Free the resources associated with the model.
351        """
352        if self.program is not None:
353            environment().release_program(self.info['name'])
354            self.program = None
355
356    def __del__(self):
357        self.release()
358
359# TODO: check that we don't need a destructor for buffers which go out of scope
360class GpuInput(object):
361    """
362    Make q data available to the gpu.
363
364    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
365    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
366    to get the best performance on OpenCL, which may involve shifting and
367    stretching the array to better match the memory architecture.  Additional
368    points will be evaluated with *q=1e-3*.
369
370    *dtype* is the data type for the q vectors. The data type should be
371    set to match that of the kernel, which is an attribute of
372    :class:`GpuProgram`.  Note that not all kernels support double
373    precision, so even if the program was created for double precision,
374    the *GpuProgram.dtype* may be single precision.
375
376    Call :meth:`release` when complete.  Even if not called directly, the
377    buffer will be released when the data object is freed.
378    """
379    def __init__(self, q_vectors, dtype=generate.F32):
380        # TODO: do we ever need double precision q?
381        env = environment()
382        self.nq = q_vectors[0].size
383        self.dtype = np.dtype(dtype)
384        self.is_2d = (len(q_vectors) == 2)
385        # TODO: stretch input based on get_warp()
386        # not doing it now since warp depends on kernel, which is not known
387        # at this point, so instead using 32, which is good on the set of
388        # architectures tested so far.
389        if self.is_2d:
390            # Note: 17 rather than 15 because results is 2 elements
391            # longer than input.
392            width = ((self.nq+17)//16)*16
393            self.q = np.empty((width, 2), dtype=dtype)
394            self.q[:self.nq, 0] = q_vectors[0]
395            self.q[:self.nq, 1] = q_vectors[1]
396        else:
397            # Note: 33 rather than 31 because results is 2 elements
398            # longer than input.
399            width = ((self.nq+33)//32)*32
400            self.q = np.empty(width, dtype=dtype)
401            self.q[:self.nq] = q_vectors[0]
402        self.global_size = [self.q.shape[0]]
403        context = env.get_context(self.dtype)
404        #print("creating inputs of size", self.global_size)
405        # COPY_HOST_PTR initiates transfer as necessary?
406        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
407                             hostbuf=self.q)
408
409    def release(self):
410        """
411        Free the memory.
412        """
413        if self.q is not None:
414            self.q.release()
415            self.q = None
416
417    def __del__(self):
418        self.release()
419
420class GpuKernel(object):
421    """
422    Callable SAS kernel.
423
424    *kernel* is the GpuKernel object to call
425
426    *model_info* is the module information
427
428    *q_vectors* is the q vectors at which the kernel should be evaluated
429
430    *dtype* is the kernel precision
431
432    The resulting call method takes the *pars*, a list of values for
433    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
434    vectors for the polydisperse parameters.  *cutoff* determines the
435    integration limits: any points with combined weight less than *cutoff*
436    will not be calculated.
437
438    Call :meth:`release` when done with the kernel instance.
439    """
440    def __init__(self, kernel, model_info, q_vectors, details, dtype):
441        if details.dtype != np.int32:
442            raise TypeError("numeric type does not match the kernel type")
443
444        max_pd = self.info['max_pd']
445        npars = len(model_info['parameters'])-2
446        q_input = GpuInput(q_vectors, dtype)
447        self.dtype = dtype
448        self.dim = '2d' if q_input.is_2d else '1d'
449        self.kernel = kernel
450        self.info = model_info
451        self.details = details
452        self.pd_stop_index = 4*max_pd-1
453        # plus three for the normalization values
454        self.result = np.empty(q_input.nq+3, q_input.dtype)
455
456        # Inputs and outputs for each kernel call
457        # Note: res may be shorter than res_b if global_size != nq
458        env = environment()
459        self.queue = env.get_queue(dtype)
460
461        # details is int32 data, padded to a 32 integer boundary
462        size = 4*((self.info['mono'].size+7)//8)*8 # padded to 32 byte boundary
463        self.details_b = cl.Buffer(self.queue.context,
464                                   mf.READ_ONLY | mf.COPY_HOST_PTR,
465                                   hostbuf=details)
466        size = np.sum(details[max_pd:2*max_pd])
467        self.weights_b = cl.Buffer(self.queue.context, mf.READ_ONLY, size)
468        size = np.sum(details[max_pd:2*max_pd])+npars
469        self.values_b = cl.Buffer(self.queue.context, mf.READ_ONLY, size)
470        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
471                               q_input.global_size[0] * q_input.dtype.itemsize)
472        self.q_input = q_input # allocated by GpuInput above
473
474        self._need_release = [
475            self.details_b, self.weights_b, self.values_b, self.result_b,
476            self.q_input,
477        ]
478
479    def __call__(self, weights, values, cutoff):
480        real = (np.float32 if self.q_input.dtype == generate.F32
481                else np.float64 if self.q_input.dtype == generate.F64
482                else np.float16 if self.q_input.dtype == generate.F16
483                else np.float32)  # will never get here, so use np.float32
484
485        if weights.dtype != real or values.dtype != real:
486            raise TypeError("numeric type does not match the kernel type")
487
488        cl.enqueue_copy(self.queue, self.weights_b, weights)
489        cl.enqueue_copy(self.queue, self.values_b, values)
490
491        args = [
492            np.uint32(self.q_input.nq),
493            np.uint32(0),
494            np.uint32(self.details[self.pd_stop_index]),
495            self.details_b,
496            self.weights_b,
497            self.values_b,
498            self.q_input.q_b,
499            self.result_b,
500            real(cutoff),
501        ]
502        self.kernel(self.queue, self.q_input.global_size, None, *args)
503        cl.enqueue_copy(self.queue, self.result, self.result_b)
504
505        return self.result[:self.nq]
506
507    def release(self):
508        """
509        Release resources associated with the kernel.
510        """
511        for v in self._need_release:
512            v.release()
513        self._need_release = []
514
515    def __del__(self):
516        self.release()
Note: See TracBrowser for help on using the repository browser.