source: sasmodels/sasmodels/kernelcl.py @ 5d316e9

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 5d316e9 was 5d316e9, checked in by Paul Kienzle <pkienzle@…>, 8 years ago

support fast and loose single precision and half precision

  • Property mode set to 100644
File size: 15.2 KB
Line 
1"""
2GPU support through OpenCL
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the PYOPENCL_CTX environment variable.
40
41Some graphics cards have multiple devices on the same card.  You cannot
42yet use both of them concurrently to evaluate models, but you can run
43the program twice using a different device for each session.
44
45OpenCL kernels are compiled when needed by the device driver.  Some
46drivers produce compiler output even when there is no error.  You
47can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
48harmless, albeit annoying.
49"""
50import os
51import warnings
52
53import numpy as np
54
55try:
56    import pyopencl as cl
57    # Ask OpenCL for the default context so that we know that one exists
58    cl.create_some_context(interactive=False)
59except Exception as exc:
60    warnings.warn(str(exc))
61    raise RuntimeError("OpenCL not available")
62
63from pyopencl import mem_flags as mf
64from pyopencl.characterize import get_fast_inaccurate_build_options
65
66from . import generate
67
68# The max loops number is limited by the amount of local memory available
69# on the device.  You don't want to make this value too big because it will
70# waste resources, nor too small because it may interfere with users trying
71# to do their polydispersity calculations.  A value of 1024 should be much
72# larger than necessary given that cost grows as npts^k where k is the number
73# of polydisperse parameters.
74MAX_LOOPS = 2048
75
76
77ENV = None
78def environment():
79    """
80    Returns a singleton :class:`GpuEnvironment`.
81
82    This provides an OpenCL context and one queue per device.
83    """
84    global ENV
85    if ENV is None:
86        ENV = GpuEnvironment()
87    return ENV
88
89def has_type(device, dtype):
90    """
91    Return true if device supports the requested precision.
92    """
93    if dtype == generate.F32:
94        return True
95    elif dtype == generate.F64:
96        return "cl_khr_fp64" in device.extensions
97    elif dtype == generate.F16:
98        return "cl_khr_fp16" in device.extensions
99    else:
100        return False
101
102def get_warp(kernel, queue):
103    """
104    Return the size of an execution batch for *kernel* running on *queue*.
105    """
106    return kernel.get_work_group_info(
107        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
108        queue.device)
109
110def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
111    """
112    Stretch an input vector to the correct boundary.
113
114    Performance on the kernels can drop by a factor of two or more if the
115    number of values to compute does not fall on a nice power of two
116    boundary.   The trailing additional vector elements are given a
117    value of *extra*, and so f(*extra*) will be computed for each of
118    them.  The returned array will thus be a subset of the computed array.
119
120    *boundary* should be a power of 2 which is at least 32 for good
121    performance on current platforms (as of Jan 2015).  It should
122    probably be the max of get_warp(kernel,queue) and
123    device.min_data_type_align_size//4.
124    """
125    remainder = vector.size % boundary
126    if remainder != 0:
127        size = vector.size + (boundary - remainder)
128        vector = np.hstack((vector, [extra] * (size - vector.size)))
129    return np.ascontiguousarray(vector, dtype=dtype)
130
131
132def compile_model(context, source, dtype, fast=False):
133    """
134    Build a model to run on the gpu.
135
136    Returns the compiled program and its type.  The returned type will
137    be float32 even if the desired type is float64 if any of the
138    devices in the context do not support the cl_khr_fp64 extension.
139    """
140    dtype = np.dtype(dtype)
141    if not all(has_type(d, dtype) for d in context.devices):
142        raise RuntimeError("%s not supported for devices"%dtype)
143
144    source = generate.convert_type(source, dtype)
145    # Note: USE_SINCOS makes the intel cpu slower under opencl
146    if context.devices[0].type == cl.device_type.GPU:
147        source = "#define USE_SINCOS\n" + source
148    options = (get_fast_inaccurate_build_options(context.devices[0])
149               if fast else [])
150    program = cl.Program(context, source).build(options=options)
151    return program
152
153
154def make_result(self, size):
155    self.res = np.empty(size, dtype=self.dtype)
156    self.res_b = cl.Buffer(self.program.context, mf.READ_WRITE, self.res.nbytes)
157    return self.res, self.res_b
158
159
160# for now, this returns one device in the context
161# TODO: create a context that contains all devices on all platforms
162class GpuEnvironment(object):
163    """
164    GPU context, with possibly many devices, and one queue per device.
165    """
166    def __init__(self):
167        # find gpu context
168        #self.context = cl.create_some_context()
169
170        self.context = None
171        if 'PYOPENCL_CTX' in os.environ:
172            self._create_some_context()
173
174        if not self.context:
175            self.context = _get_default_context()
176
177        # Byte boundary for data alignment
178        #self.data_boundary = max(d.min_data_type_align_size
179        #                         for d in self.context.devices)
180        self.queues = [cl.CommandQueue(self.context, d)
181                       for d in self.context.devices]
182        self.compiled = {}
183
184    def has_type(self, dtype):
185        dtype = np.dtype(dtype)
186        return all(has_type(d, dtype) for d in self.context.devices)
187
188    def _create_some_context(self):
189        try:
190            self.context = cl.create_some_context(interactive=False)
191        except Exception as exc:
192            warnings.warn(str(exc))
193            warnings.warn("pyopencl.create_some_context() failed")
194            warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly")
195
196    def compile_program(self, name, source, dtype, fast=False):
197        if name not in self.compiled:
198            #print("compiling",name)
199            self.compiled[name] = compile_model(self.context, source, dtype,
200                                                fast)
201        return self.compiled[name]
202
203    def release_program(self, name):
204        if name in self.compiled:
205            self.compiled[name].release()
206            del self.compiled[name]
207
208def _get_default_context():
209    default = None
210    for platform in cl.get_platforms():
211        for device in platform.get_devices():
212            if device.type == cl.device_type.GPU:
213                return cl.Context([device])
214            if default is None:
215                default = device
216
217    if not default:
218        raise RuntimeError("OpenCL device not found")
219
220    return cl.Context([default])
221
222
223class GpuModel(object):
224    """
225    GPU wrapper for a single model.
226
227    *source* and *info* are the model source and interface as returned
228    from :func:`gen.make`.
229
230    *dtype* is the desired model precision.  Any numpy dtype for single
231    or double precision floats will do, such as 'f', 'float32' or 'single'
232    for single and 'd', 'float64' or 'double' for double.  Double precision
233    is an optional extension which may not be available on all devices.
234
235    *fast* is True if fast inaccurate math is acceptable (40% speed increase)
236    """
237    def __init__(self, source, info, dtype=generate.F32, fast=False):
238        self.info = info
239        self.source = source
240        self.dtype = np.dtype(dtype)
241        self.fast = fast
242        self.program = None # delay program creation
243
244    def __getstate__(self):
245        state = self.__dict__.copy()
246        state['program'] = None
247        return state
248
249    def __setstate__(self, state):
250        self.__dict__ = state.copy()
251
252    def __call__(self, q_input):
253        if self.dtype != q_input.dtype:
254            raise TypeError("data is %s kernel is %s"
255                            % (q_input.dtype, self.dtype))
256        if self.program is None:
257            compiler = environment().compile_program
258            self.program = compiler(self.info['name'], self.source, self.dtype,
259                                    self.fast)
260        kernel_name = generate.kernel_name(self.info, q_input.is_2D)
261        kernel = getattr(self.program, kernel_name)
262        return GpuKernel(kernel, self.info, q_input)
263
264    def release(self):
265        if self.program is not None:
266            environment().release_program(self.info['name'])
267            self.program = None
268
269    def make_input(self, q_vectors):
270        """
271        Make q input vectors available to the model.
272
273        Note that each model needs its own q vector even if the case of
274        mixture models because some models may be OpenCL, some may be
275        ctypes and some may be pure python.
276        """
277        return GpuInput(q_vectors, dtype=self.dtype)
278
279# TODO: check that we don't need a destructor for buffers which go out of scope
280class GpuInput(object):
281    """
282    Make q data available to the gpu.
283
284    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
285    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
286    to get the best performance on OpenCL, which may involve shifting and
287    stretching the array to better match the memory architecture.  Additional
288    points will be evaluated with *q=1e-3*.
289
290    *dtype* is the data type for the q vectors. The data type should be
291    set to match that of the kernel, which is an attribute of
292    :class:`GpuProgram`.  Note that not all kernels support double
293    precision, so even if the program was created for double precision,
294    the *GpuProgram.dtype* may be single precision.
295
296    Call :meth:`release` when complete.  Even if not called directly, the
297    buffer will be released when the data object is freed.
298    """
299    def __init__(self, q_vectors, dtype=generate.F32):
300        env = environment()
301        self.nq = q_vectors[0].size
302        self.dtype = np.dtype(dtype)
303        self.is_2D = (len(q_vectors) == 2)
304        # TODO: stretch input based on get_warp()
305        # not doing it now since warp depends on kernel, which is not known
306        # at this point, so instead using 32, which is good on the set of
307        # architectures tested so far.
308        self.q_vectors = [_stretch_input(q, self.dtype, 32) for q in q_vectors]
309        self.q_buffers = [
310            cl.Buffer(env.context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q)
311            for q in self.q_vectors
312        ]
313        self.global_size = [self.q_vectors[0].size]
314
315    def release(self):
316        for b in self.q_buffers:
317            b.release()
318        self.q_buffers = []
319
320class GpuKernel(object):
321    """
322    Callable SAS kernel.
323
324    *kernel* is the GpuKernel object to call.
325
326    *info* is the module information
327
328    *q_input* is the DllInput q vectors at which the kernel should be
329    evaluated.
330
331    The resulting call method takes the *pars*, a list of values for
332    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
333    vectors for the polydisperse parameters.  *cutoff* determines the
334    integration limits: any points with combined weight less than *cutoff*
335    will not be calculated.
336
337    Call :meth:`release` when done with the kernel instance.
338    """
339    def __init__(self, kernel, info, q_input):
340        self.q_input = q_input
341        self.kernel = kernel
342        self.info = info
343        self.res = np.empty(q_input.nq, q_input.dtype)
344        dim = '2d' if q_input.is_2D else '1d'
345        self.fixed_pars = info['partype']['fixed-' + dim]
346        self.pd_pars = info['partype']['pd-' + dim]
347
348        # Inputs and outputs for each kernel call
349        # Note: res may be shorter than res_b if global_size != nq
350        env = environment()
351        self.loops_b = [cl.Buffer(env.context, mf.READ_WRITE,
352                                  2 * MAX_LOOPS * q_input.dtype.itemsize)
353                        for _ in env.queues]
354        self.res_b = [cl.Buffer(env.context, mf.READ_WRITE,
355                                q_input.global_size[0] * q_input.dtype.itemsize)
356                      for _ in env.queues]
357
358
359    def __call__(self, fixed_pars, pd_pars, cutoff=1e-5):
360        real = (np.float32 if self.q_input.dtype == generate.F32
361                else np.float64 if self.q_input.dtype == generate.F64
362                else np.float16 if self.q_input.dtype == generate.F16
363                else np.float32)  # will never get here, so use np.float32
364
365        device_num = 0
366        queuei = environment().queues[device_num]
367        res_bi = self.res_b[device_num]
368        nq = np.uint32(self.q_input.nq)
369        if pd_pars:
370            cutoff = real(cutoff)
371            loops_N = [np.uint32(len(p[0])) for p in pd_pars]
372            loops = np.hstack(pd_pars) \
373                if pd_pars else np.empty(0, dtype=self.q_input.dtype)
374            loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten()
375            #print("loops",Nloops, loops)
376
377            #import sys; print("opencl eval",pars)
378            #print("opencl eval",pars)
379            if len(loops) > 2 * MAX_LOOPS:
380                raise ValueError("too many polydispersity points")
381
382            loops_bi = self.loops_b[device_num]
383            cl.enqueue_copy(queuei, loops_bi, loops)
384            loops_l = cl.LocalMemory(len(loops.data))
385            #ctx = environment().context
386            #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops)
387            dispersed = [loops_bi, loops_l, cutoff] + loops_N
388        else:
389            dispersed = []
390        fixed = [real(p) for p in fixed_pars]
391        args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed
392        self.kernel(queuei, self.q_input.global_size, None, *args)
393        cl.enqueue_copy(queuei, self.res, res_bi)
394
395        return self.res
396
397    def release(self):
398        for b in self.loops_b:
399            b.release()
400        self.loops_b = []
401        for b in self.res_b:
402            b.release()
403        self.res_b = []
404
405    def __del__(self):
406        self.release()
Note: See TracBrowser for help on using the repository browser.