source: sasmodels/sasmodels/kernelcl.py @ 821a9c6

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 821a9c6 was 821a9c6, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

log the compile commands

  • Property mode set to 100644
File size: 18.8 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50import os
51import warnings
52import logging
53
54import numpy as np
55
56try:
57    import pyopencl as cl
58    # Ask OpenCL for the default context so that we know that one exists
59    cl.create_some_context(interactive=False)
60except Exception as exc:
61    warnings.warn(str(exc))
62    raise RuntimeError("OpenCL not available")
63
64from pyopencl import mem_flags as mf
65from pyopencl.characterize import get_fast_inaccurate_build_options
66
67from . import generate
68
69# The max loops number is limited by the amount of local memory available
70# on the device.  You don't want to make this value too big because it will
71# waste resources, nor too small because it may interfere with users trying
72# to do their polydispersity calculations.  A value of 1024 should be much
73# larger than necessary given that cost grows as npts^k where k is the number
74# of polydisperse parameters.
75MAX_LOOPS = 2048
76
77
78ENV = None
79def environment():
80    """
81    Returns a singleton :class:`GpuEnvironment`.
82
83    This provides an OpenCL context and one queue per device.
84    """
85    global ENV
86    if ENV is None:
87        ENV = GpuEnvironment()
88    return ENV
89
90def has_type(device, dtype):
91    """
92    Return true if device supports the requested precision.
93    """
94    if dtype == generate.F32:
95        return True
96    elif dtype == generate.F64:
97        return "cl_khr_fp64" in device.extensions
98    elif dtype == generate.F16:
99        return "cl_khr_fp16" in device.extensions
100    else:
101        return False
102
103def get_warp(kernel, queue):
104    """
105    Return the size of an execution batch for *kernel* running on *queue*.
106    """
107    return kernel.get_work_group_info(
108        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
109        queue.device)
110
111def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
112    """
113    Stretch an input vector to the correct boundary.
114
115    Performance on the kernels can drop by a factor of two or more if the
116    number of values to compute does not fall on a nice power of two
117    boundary.   The trailing additional vector elements are given a
118    value of *extra*, and so f(*extra*) will be computed for each of
119    them.  The returned array will thus be a subset of the computed array.
120
121    *boundary* should be a power of 2 which is at least 32 for good
122    performance on current platforms (as of Jan 2015).  It should
123    probably be the max of get_warp(kernel,queue) and
124    device.min_data_type_align_size//4.
125    """
126    remainder = vector.size % boundary
127    if remainder != 0:
128        size = vector.size + (boundary - remainder)
129        vector = np.hstack((vector, [extra] * (size - vector.size)))
130    return np.ascontiguousarray(vector, dtype=dtype)
131
132
133def compile_model(context, source, dtype, fast=False):
134    """
135    Build a model to run on the gpu.
136
137    Returns the compiled program and its type.  The returned type will
138    be float32 even if the desired type is float64 if any of the
139    devices in the context do not support the cl_khr_fp64 extension.
140    """
141    dtype = np.dtype(dtype)
142    if not all(has_type(d, dtype) for d in context.devices):
143        raise RuntimeError("%s not supported for devices"%dtype)
144
145    source = generate.convert_type(source, dtype)
146    # Note: USE_SINCOS makes the intel cpu slower under opencl
147    if context.devices[0].type == cl.device_type.GPU:
148        source = "#define USE_SINCOS\n" + source
149    options = (get_fast_inaccurate_build_options(context.devices[0])
150               if fast else [])
151    program = cl.Program(context, source).build(options=options)
152    #print("done with "+program)
153    return program
154
155
156# for now, this returns one device in the context
157# TODO: create a context that contains all devices on all platforms
158class GpuEnvironment(object):
159    """
160    GPU context, with possibly many devices, and one queue per device.
161    """
162    def __init__(self):
163        # find gpu context
164        #self.context = cl.create_some_context()
165
166        self.context = None
167        if 'PYOPENCL_CTX' in os.environ:
168            self._create_some_context()
169
170        if not self.context:
171            self.context = _get_default_context()
172
173        # Byte boundary for data alignment
174        #self.data_boundary = max(d.min_data_type_align_size
175        #                         for d in self.context.devices)
176        self.queues = [cl.CommandQueue(context, context.devices[0])
177                       for context in self.context]
178        self.compiled = {}
179
180    def has_type(self, dtype):
181        """
182        Return True if all devices support a given type.
183        """
184        dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
185        return any(has_type(d, dtype)
186                   for context in self.context
187                   for d in context.devices)
188
189    def get_queue(self, dtype):
190        """
191        Return a command queue for the kernels of type dtype.
192        """
193        for context, queue in zip(self.context, self.queues):
194            if all(has_type(d, dtype) for d in context.devices):
195                return queue
196
197    def get_context(self, dtype):
198        """
199        Return a OpenCL context for the kernels of type dtype.
200        """
201        for context, queue in zip(self.context, self.queues):
202            if all(has_type(d, dtype) for d in context.devices):
203                return context
204
205    def _create_some_context(self):
206        """
207        Protected call to cl.create_some_context without interactivity.  Use
208        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
209        attribute.
210        """
211        try:
212            self.context = [cl.create_some_context(interactive=False)]
213        except Exception as exc:
214            warnings.warn(str(exc))
215            warnings.warn("pyopencl.create_some_context() failed")
216            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
217
218    def compile_program(self, name, source, dtype, fast=False):
219        """
220        Compile the program for the device in the given context.
221        """
222        key = "%s-%s%s"%(name, dtype, ("-fast" if fast else ""))
223        if key not in self.compiled:
224            context = self.get_context(dtype)
225            logging.info("building %s for OpenCL %s"
226                         % (key, context.devices[0].name))
227            program = compile_model(context, source, np.dtype(dtype), fast)
228            self.compiled[key] = program
229        return self.compiled[key]
230
231    def release_program(self, name):
232        """
233        Free memory associated with the program on the device.
234        """
235        if name in self.compiled:
236            self.compiled[name].release()
237            del self.compiled[name]
238
239def _get_default_context():
240    """
241    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
242    drivers over AMD drivers.
243    """
244    # Note: on mobile devices there is automatic clock scaling if either the
245    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
246    # it did, it would mean that putting a busy loop on the CPU while the GPU
247    # is running may increase throughput.
248    #
249    # Macbook pro, base install:
250    #     {'Apple': [Intel CPU, NVIDIA GPU]}
251    # Macbook pro, base install:
252    #     {'Apple': [Intel CPU, Intel GPU]}
253    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
254    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
255    gpu, cpu = None, None
256    for platform in cl.get_platforms():
257        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
258        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
259        # graphics driver that may have been supplied with the CPU chipset.
260        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
261        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
262        for device in platform.get_devices():
263            if device.type == cl.device_type.GPU:
264                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
265                # so don't override it.
266                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
267                    gpu = device
268            elif device.type == cl.device_type.CPU:
269                if cpu is None or preferred_cpu:
270                    cpu = device
271            else:
272                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
273                # Intel Phi for example registers as an accelerator
274                # Since the user installed a custom device on their system and went through the
275                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
276                # use it as their primary compute device.
277                gpu = device
278
279    # order the devices by gpu then by cpu; when searching for an available device by data type they
280    # will be checked in this order, which means that if the gpu supports double then the cpu will never
281    # be used (though we may make it possible to explicitly request the cpu at some point).
282    devices = []
283    if gpu is not None:
284        devices.append(gpu)
285    if cpu is not None:
286        devices.append(cpu)
287    return [cl.Context([d]) for d in devices]
288
289
290class GpuModel(object):
291    """
292    GPU wrapper for a single model.
293
294    *source* and *model_info* are the model source and interface as returned
295    from :func:`generate.make_source` and :func:`generate.make_model_info`.
296
297    *dtype* is the desired model precision.  Any numpy dtype for single
298    or double precision floats will do, such as 'f', 'float32' or 'single'
299    for single and 'd', 'float64' or 'double' for double.  Double precision
300    is an optional extension which may not be available on all devices.
301    Half precision ('float16','half') may be available on some devices.
302    Fast precision ('fast') is a loose version of single precision, indicating
303    that the compiler is allowed to take shortcuts.
304    """
305    def __init__(self, source, model_info, dtype=generate.F32):
306        self.info = model_info
307        self.source = source
308        self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
309        self.fast = (dtype == 'fast')
310        self.program = None # delay program creation
311
312    def __getstate__(self):
313        return self.info, self.source, self.dtype, self.fast
314
315    def __setstate__(self, state):
316        self.info, self.source, self.dtype, self.fast = state
317        self.program = None
318
319    def make_kernel(self, q_vectors):
320        if self.program is None:
321            compiler = environment().compile_program
322            self.program = compiler(self.info['name'], self.source, self.dtype,
323                                    self.fast)
324        is_2d = len(q_vectors) == 2
325        kernel_name = generate.kernel_name(self.info, is_2d)
326        kernel = getattr(self.program, kernel_name)
327        return GpuKernel(kernel, self.info, q_vectors, self.dtype)
328
329    def release(self):
330        """
331        Free the resources associated with the model.
332        """
333        if self.program is not None:
334            environment().release_program(self.info['name'])
335            self.program = None
336
337    def __del__(self):
338        self.release()
339
340# TODO: check that we don't need a destructor for buffers which go out of scope
341class GpuInput(object):
342    """
343    Make q data available to the gpu.
344
345    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
346    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
347    to get the best performance on OpenCL, which may involve shifting and
348    stretching the array to better match the memory architecture.  Additional
349    points will be evaluated with *q=1e-3*.
350
351    *dtype* is the data type for the q vectors. The data type should be
352    set to match that of the kernel, which is an attribute of
353    :class:`GpuProgram`.  Note that not all kernels support double
354    precision, so even if the program was created for double precision,
355    the *GpuProgram.dtype* may be single precision.
356
357    Call :meth:`release` when complete.  Even if not called directly, the
358    buffer will be released when the data object is freed.
359    """
360    def __init__(self, q_vectors, dtype=generate.F32):
361        # TODO: do we ever need double precision q?
362        env = environment()
363        self.nq = q_vectors[0].size
364        self.dtype = np.dtype(dtype)
365        self.is_2d = (len(q_vectors) == 2)
366        # TODO: stretch input based on get_warp()
367        # not doing it now since warp depends on kernel, which is not known
368        # at this point, so instead using 32, which is good on the set of
369        # architectures tested so far.
370        self.q_vectors = [_stretch_input(q, self.dtype, 32) for q in q_vectors]
371        context = env.get_context(self.dtype)
372        self.global_size = [self.q_vectors[0].size]
373        #print("creating inputs of size", self.global_size)
374        self.q_buffers = [
375            cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q)
376            for q in self.q_vectors
377        ]
378
379    def release(self):
380        """
381        Free the memory.
382        """
383        for b in self.q_buffers:
384            b.release()
385        self.q_buffers = []
386
387    def __del__(self):
388        self.release()
389
390class GpuKernel(object):
391    """
392    Callable SAS kernel.
393
394    *kernel* is the GpuKernel object to call
395
396    *model_info* is the module information
397
398    *q_vectors* is the q vectors at which the kernel should be evaluated
399
400    *dtype* is the kernel precision
401
402    The resulting call method takes the *pars*, a list of values for
403    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
404    vectors for the polydisperse parameters.  *cutoff* determines the
405    integration limits: any points with combined weight less than *cutoff*
406    will not be calculated.
407
408    Call :meth:`release` when done with the kernel instance.
409    """
410    def __init__(self, kernel, model_info, q_vectors, dtype):
411        q_input = GpuInput(q_vectors, dtype)
412        self.kernel = kernel
413        self.info = model_info
414        self.res = np.empty(q_input.nq, q_input.dtype)
415        dim = '2d' if q_input.is_2d else '1d'
416        self.fixed_pars = model_info['partype']['fixed-' + dim]
417        self.pd_pars = model_info['partype']['pd-' + dim]
418
419        # Inputs and outputs for each kernel call
420        # Note: res may be shorter than res_b if global_size != nq
421        env = environment()
422        self.queue = env.get_queue(dtype)
423        self.loops_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
424                                 2 * MAX_LOOPS * q_input.dtype.itemsize)
425        self.res_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
426                               q_input.global_size[0] * q_input.dtype.itemsize)
427        self.q_input = q_input
428
429        self._need_release = [self.loops_b, self.res_b, self.q_input]
430
431    def __call__(self, fixed_pars, pd_pars, cutoff):
432        real = (np.float32 if self.q_input.dtype == generate.F32
433                else np.float64 if self.q_input.dtype == generate.F64
434                else np.float16 if self.q_input.dtype == generate.F16
435                else np.float32)  # will never get here, so use np.float32
436
437        #print "pars", fixed_pars, pd_pars
438        res_bi = self.res_b
439        nq = np.uint32(self.q_input.nq)
440        if pd_pars:
441            cutoff = real(cutoff)
442            loops_N = [np.uint32(len(p[0])) for p in pd_pars]
443            loops = np.hstack(pd_pars) \
444                if pd_pars else np.empty(0, dtype=self.q_input.dtype)
445            loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten()
446            #print("loops",Nloops, loops)
447
448            #import sys; print("opencl eval",pars)
449            #print("opencl eval",pars)
450            if len(loops) > 2 * MAX_LOOPS:
451                raise ValueError("too many polydispersity points")
452
453            loops_bi = self.loops_b
454            cl.enqueue_copy(self.queue, loops_bi, loops)
455            loops_l = cl.LocalMemory(len(loops.data))
456            #ctx = environment().context
457            #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops)
458            dispersed = [loops_bi, loops_l, cutoff] + loops_N
459        else:
460            dispersed = []
461        fixed = [real(p) for p in fixed_pars]
462        args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed
463        self.kernel(self.queue, self.q_input.global_size, None, *args)
464        cl.enqueue_copy(self.queue, self.res, res_bi)
465
466        return self.res
467
468    def release(self):
469        """
470        Release resources associated with the kernel.
471        """
472        for v in self._need_release:
473            v.release()
474        self._need_release = []
475
476    def __del__(self):
477        self.release()
Note: See TracBrowser for help on using the repository browser.