source: sasmodels/sasmodels/kernelcl.py @ c094758

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since c094758 was c094758, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

fix autogen 2d plots (mask passes 0 not 1)

  • Property mode set to 100644
File size: 18.6 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50import os
51import warnings
52
53import numpy as np
54
55try:
56    import pyopencl as cl
57    # Ask OpenCL for the default context so that we know that one exists
58    cl.create_some_context(interactive=False)
59except Exception as exc:
60    warnings.warn(str(exc))
61    raise RuntimeError("OpenCL not available")
62
63from pyopencl import mem_flags as mf
64from pyopencl.characterize import get_fast_inaccurate_build_options
65
66from . import generate
67
68# The max loops number is limited by the amount of local memory available
69# on the device.  You don't want to make this value too big because it will
70# waste resources, nor too small because it may interfere with users trying
71# to do their polydispersity calculations.  A value of 1024 should be much
72# larger than necessary given that cost grows as npts^k where k is the number
73# of polydisperse parameters.
74MAX_LOOPS = 2048
75
76
77ENV = None
78def environment():
79    """
80    Returns a singleton :class:`GpuEnvironment`.
81
82    This provides an OpenCL context and one queue per device.
83    """
84    global ENV
85    if ENV is None:
86        ENV = GpuEnvironment()
87    return ENV
88
89def has_type(device, dtype):
90    """
91    Return true if device supports the requested precision.
92    """
93    if dtype == generate.F32:
94        return True
95    elif dtype == generate.F64:
96        return "cl_khr_fp64" in device.extensions
97    elif dtype == generate.F16:
98        return "cl_khr_fp16" in device.extensions
99    else:
100        return False
101
102def get_warp(kernel, queue):
103    """
104    Return the size of an execution batch for *kernel* running on *queue*.
105    """
106    return kernel.get_work_group_info(
107        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
108        queue.device)
109
110def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
111    """
112    Stretch an input vector to the correct boundary.
113
114    Performance on the kernels can drop by a factor of two or more if the
115    number of values to compute does not fall on a nice power of two
116    boundary.   The trailing additional vector elements are given a
117    value of *extra*, and so f(*extra*) will be computed for each of
118    them.  The returned array will thus be a subset of the computed array.
119
120    *boundary* should be a power of 2 which is at least 32 for good
121    performance on current platforms (as of Jan 2015).  It should
122    probably be the max of get_warp(kernel,queue) and
123    device.min_data_type_align_size//4.
124    """
125    remainder = vector.size % boundary
126    if remainder != 0:
127        size = vector.size + (boundary - remainder)
128        vector = np.hstack((vector, [extra] * (size - vector.size)))
129    return np.ascontiguousarray(vector, dtype=dtype)
130
131
132def compile_model(context, source, dtype, fast=False):
133    """
134    Build a model to run on the gpu.
135
136    Returns the compiled program and its type.  The returned type will
137    be float32 even if the desired type is float64 if any of the
138    devices in the context do not support the cl_khr_fp64 extension.
139    """
140    dtype = np.dtype(dtype)
141    if not all(has_type(d, dtype) for d in context.devices):
142        raise RuntimeError("%s not supported for devices"%dtype)
143
144    source = generate.convert_type(source, dtype)
145    # Note: USE_SINCOS makes the intel cpu slower under opencl
146    if context.devices[0].type == cl.device_type.GPU:
147        source = "#define USE_SINCOS\n" + source
148    options = (get_fast_inaccurate_build_options(context.devices[0])
149               if fast else [])
150    program = cl.Program(context, source).build(options=options)
151    return program
152
153
154# for now, this returns one device in the context
155# TODO: create a context that contains all devices on all platforms
156class GpuEnvironment(object):
157    """
158    GPU context, with possibly many devices, and one queue per device.
159    """
160    def __init__(self):
161        # find gpu context
162        #self.context = cl.create_some_context()
163
164        self.context = None
165        if 'PYOPENCL_CTX' in os.environ:
166            self._create_some_context()
167
168        if not self.context:
169            self.context = _get_default_context()
170
171        # Byte boundary for data alignment
172        #self.data_boundary = max(d.min_data_type_align_size
173        #                         for d in self.context.devices)
174        self.queues = [cl.CommandQueue(context, context.devices[0])
175                       for context in self.context]
176        self.compiled = {}
177
178    def has_type(self, dtype):
179        """
180        Return True if all devices support a given type.
181        """
182        dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
183        return any(has_type(d, dtype)
184                   for context in self.context
185                   for d in context.devices)
186
187    def get_queue(self, dtype):
188        """
189        Return a command queue for the kernels of type dtype.
190        """
191        for context, queue in zip(self.context, self.queues):
192            if all(has_type(d, dtype) for d in context.devices):
193                return queue
194
195    def get_context(self, dtype):
196        """
197        Return a OpenCL context for the kernels of type dtype.
198        """
199        for context, queue in zip(self.context, self.queues):
200            if all(has_type(d, dtype) for d in context.devices):
201                return context
202
203    def _create_some_context(self):
204        """
205        Protected call to cl.create_some_context without interactivity.  Use
206        this if PYOPENCL_CTX is set in the environment.  Sets the *context*
207        attribute.
208        """
209        try:
210            self.context = [cl.create_some_context(interactive=False)]
211        except Exception as exc:
212            warnings.warn(str(exc))
213            warnings.warn("pyopencl.create_some_context() failed")
214            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
215
216    def compile_program(self, name, source, dtype, fast=False):
217        """
218        Compile the program for the device in the given context.
219        """
220        key = "%s-%s-%s"%(name, dtype, fast)
221        if key not in self.compiled:
222            #print("compiling",name)
223            dtype = np.dtype(dtype)
224            program = compile_model(self.get_context(dtype), source, dtype, fast)
225            self.compiled[key] = program
226        return self.compiled[key]
227
228    def release_program(self, name):
229        """
230        Free memory associated with the program on the device.
231        """
232        if name in self.compiled:
233            self.compiled[name].release()
234            del self.compiled[name]
235
236def _get_default_context():
237    """
238    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
239    drivers over AMD drivers.
240    """
241    # Note: on mobile devices there is automatic clock scaling if either the
242    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
243    # it did, it would mean that putting a busy loop on the CPU while the GPU
244    # is running may increase throughput.
245    #
246    # Macbook pro, base install:
247    #     {'Apple': [Intel CPU, NVIDIA GPU]}
248    # Macbook pro, base install:
249    #     {'Apple': [Intel CPU, Intel GPU]}
250    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
251    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
252    gpu, cpu = None, None
253    for platform in cl.get_platforms():
254        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
255        # If someone has bothered to install the AMD/NVIDIA drivers, prefer them over the integrated
256        # graphics driver that may have been supplied with the CPU chipset.
257        preferred_cpu = platform.vendor.startswith('Intel') or platform.vendor.startswith('Apple')
258        preferred_gpu = platform.vendor.startswith('Advanced') or platform.vendor.startswith('NVIDIA')
259        for device in platform.get_devices():
260            if device.type == cl.device_type.GPU:
261                # If the existing type is not GPU then it will be CUSTOM or ACCELERATOR,
262                # so don't override it.
263                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
264                    gpu = device
265            elif device.type == cl.device_type.CPU:
266                if cpu is None or preferred_cpu:
267                    cpu = device
268            else:
269                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
270                # Intel Phi for example registers as an accelerator
271                # Since the user installed a custom device on their system and went through the
272                # pain of sorting out OpenCL drivers for it, lets assume they really do want to
273                # use it as their primary compute device.
274                gpu = device
275
276    # order the devices by gpu then by cpu; when searching for an available device by data type they
277    # will be checked in this order, which means that if the gpu supports double then the cpu will never
278    # be used (though we may make it possible to explicitly request the cpu at some point).
279    devices = []
280    if gpu is not None:
281        devices.append(gpu)
282    if cpu is not None:
283        devices.append(cpu)
284    return [cl.Context([d]) for d in devices]
285
286
287class GpuModel(object):
288    """
289    GPU wrapper for a single model.
290
291    *source* and *model_info* are the model source and interface as returned
292    from :func:`generate.make_source` and :func:`generate.make_model_info`.
293
294    *dtype* is the desired model precision.  Any numpy dtype for single
295    or double precision floats will do, such as 'f', 'float32' or 'single'
296    for single and 'd', 'float64' or 'double' for double.  Double precision
297    is an optional extension which may not be available on all devices.
298    Half precision ('float16','half') may be available on some devices.
299    Fast precision ('fast') is a loose version of single precision, indicating
300    that the compiler is allowed to take shortcuts.
301    """
302    def __init__(self, source, model_info, dtype=generate.F32):
303        self.info = model_info
304        self.source = source
305        self.dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype)
306        self.fast = (dtype == 'fast')
307        self.program = None # delay program creation
308
309    def __getstate__(self):
310        return self.info, self.source, self.dtype, self.fast
311
312    def __setstate__(self, state):
313        self.info, self.source, self.dtype, self.fast = state
314        self.program = None
315
316    def __call__(self, q_vectors):
317        if self.program is None:
318            compiler = environment().compile_program
319            self.program = compiler(self.info['name'], self.source, self.dtype,
320                                    self.fast)
321        is_2d = len(q_vectors) == 2
322        kernel_name = generate.kernel_name(self.info, is_2d)
323        kernel = getattr(self.program, kernel_name)
324        return GpuKernel(kernel, self.info, q_vectors, self.dtype)
325
326    def release(self):
327        """
328        Free the resources associated with the model.
329        """
330        if self.program is not None:
331            environment().release_program(self.info['name'])
332            self.program = None
333
334    def __del__(self):
335        self.release()
336
337# TODO: check that we don't need a destructor for buffers which go out of scope
338class GpuInput(object):
339    """
340    Make q data available to the gpu.
341
342    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
343    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
344    to get the best performance on OpenCL, which may involve shifting and
345    stretching the array to better match the memory architecture.  Additional
346    points will be evaluated with *q=1e-3*.
347
348    *dtype* is the data type for the q vectors. The data type should be
349    set to match that of the kernel, which is an attribute of
350    :class:`GpuProgram`.  Note that not all kernels support double
351    precision, so even if the program was created for double precision,
352    the *GpuProgram.dtype* may be single precision.
353
354    Call :meth:`release` when complete.  Even if not called directly, the
355    buffer will be released when the data object is freed.
356    """
357    def __init__(self, q_vectors, dtype=generate.F32):
358        # TODO: do we ever need double precision q?
359        env = environment()
360        self.nq = q_vectors[0].size
361        self.dtype = np.dtype(dtype)
362        self.is_2d = (len(q_vectors) == 2)
363        # TODO: stretch input based on get_warp()
364        # not doing it now since warp depends on kernel, which is not known
365        # at this point, so instead using 32, which is good on the set of
366        # architectures tested so far.
367        self.q_vectors = [_stretch_input(q, self.dtype, 32) for q in q_vectors]
368        context = env.get_context(self.dtype)
369        self.global_size = [self.q_vectors[0].size]
370        #print("creating inputs of size", self.global_size)
371        self.q_buffers = [
372            cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q)
373            for q in self.q_vectors
374        ]
375
376    def release(self):
377        """
378        Free the memory.
379        """
380        for b in self.q_buffers:
381            b.release()
382        self.q_buffers = []
383
384    def __del__(self):
385        self.release()
386
387class GpuKernel(object):
388    """
389    Callable SAS kernel.
390
391    *kernel* is the GpuKernel object to call
392
393    *model_info* is the module information
394
395    *q_vectors* is the q vectors at which the kernel should be evaluated
396
397    *dtype* is the kernel precision
398
399    The resulting call method takes the *pars*, a list of values for
400    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
401    vectors for the polydisperse parameters.  *cutoff* determines the
402    integration limits: any points with combined weight less than *cutoff*
403    will not be calculated.
404
405    Call :meth:`release` when done with the kernel instance.
406    """
407    def __init__(self, kernel, model_info, q_vectors, dtype):
408        q_input = GpuInput(q_vectors, dtype)
409        self.kernel = kernel
410        self.info = model_info
411        self.res = np.empty(q_input.nq, q_input.dtype)
412        dim = '2d' if q_input.is_2d else '1d'
413        self.fixed_pars = model_info['partype']['fixed-' + dim]
414        self.pd_pars = model_info['partype']['pd-' + dim]
415
416        # Inputs and outputs for each kernel call
417        # Note: res may be shorter than res_b if global_size != nq
418        env = environment()
419        self.queue = env.get_queue(dtype)
420        self.loops_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
421                                 2 * MAX_LOOPS * q_input.dtype.itemsize)
422        self.res_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
423                               q_input.global_size[0] * q_input.dtype.itemsize)
424        self.q_input = q_input
425
426        self._need_release = [self.loops_b, self.res_b, self.q_input]
427
428    def __call__(self, fixed_pars, pd_pars, cutoff=1e-5):
429        real = (np.float32 if self.q_input.dtype == generate.F32
430                else np.float64 if self.q_input.dtype == generate.F64
431                else np.float16 if self.q_input.dtype == generate.F16
432                else np.float32)  # will never get here, so use np.float32
433
434        #print "pars", fixed_pars, pd_pars
435        res_bi = self.res_b
436        nq = np.uint32(self.q_input.nq)
437        if pd_pars:
438            cutoff = real(cutoff)
439            loops_N = [np.uint32(len(p[0])) for p in pd_pars]
440            loops = np.hstack(pd_pars) \
441                if pd_pars else np.empty(0, dtype=self.q_input.dtype)
442            loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten()
443            #print("loops",Nloops, loops)
444
445            #import sys; print("opencl eval",pars)
446            #print("opencl eval",pars)
447            if len(loops) > 2 * MAX_LOOPS:
448                raise ValueError("too many polydispersity points")
449
450            loops_bi = self.loops_b
451            cl.enqueue_copy(self.queue, loops_bi, loops)
452            loops_l = cl.LocalMemory(len(loops.data))
453            #ctx = environment().context
454            #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops)
455            dispersed = [loops_bi, loops_l, cutoff] + loops_N
456        else:
457            dispersed = []
458        fixed = [real(p) for p in fixed_pars]
459        args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed
460        self.kernel(self.queue, self.q_input.global_size, None, *args)
461        cl.enqueue_copy(self.queue, self.res, res_bi)
462
463        return self.res
464
465    def release(self):
466        """
467        Release resources associated with the kernel.
468        """
469        for v in self._need_release:
470            v.release()
471        self._need_release = []
472
473    def __del__(self):
474        self.release()
Note: See TracBrowser for help on using the repository browser.