source: sasmodels/sasmodels/kernelcl.py @ 6e5b2a7

core_shell_microgelscostrafo411magnetic_modelrelease_v0.94release_v0.95ticket-1257-vesicle-productticket_1156ticket_1265_superballticket_822_more_unit_tests
Last change on this file since 6e5b2a7 was 6e5b2a7, checked in by ajj, 8 years ago

ticket 363: Allow other processes to run during long computations (#9)

  • Allow other processes to run during long computations

( erroneous branch: fixing the tests in pringle model and Revert "and fixing the tests in pringle model".

  • Property mode set to 100644
File size: 23.3 KB
Line 
1"""
2GPU driver for C kernels
3
4There should be a single GPU environment running on the system.  This
5environment is constructed on the first call to :func:`env`, and the
6same environment is returned on each call.
7
8After retrieving the environment, the next step is to create the kernel.
9This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
10returns the type of data used by the kernel.
11
12Next a :class:`GpuData` object should be created with the correct kind
13of data.  This data object can be used by multiple kernels, for example,
14if the target model is a weighted sum of multiple kernels.  The data
15should include any extra evaluation points required to compute the proper
16data smearing.  This need not match the square grid for 2D data if there
17is an index saying which q points are active.
18
19Together the GpuData, the program, and a device form a :class:`GpuKernel`.
20This kernel is used during fitting, receiving new sets of parameters and
21evaluating them.  The output value is stored in an output buffer on the
22devices, where it can be combined with other structure factors and form
23factors and have instrumental resolution effects applied.
24
25In order to use OpenCL for your models, you will need OpenCL drivers for
26your machine.  These should be available from your graphics card vendor.
27Intel provides OpenCL drivers for CPUs as well as their integrated HD
28graphics chipsets.  AMD also provides drivers for Intel CPUs, but as of
29this writing the performance is lacking compared to the Intel drivers.
30NVidia combines drivers for CUDA and OpenCL in one package.  The result
31is a bit messy if you have multiple drivers installed.  You can see which
32drivers are available by starting python and running:
33
34    import pyopencl as cl
35    cl.create_some_context(interactive=True)
36
37Once you have done that, it will show the available drivers which you
38can select.  It will then tell you that you can use these drivers
39automatically by setting the SAS_OPENCL environment variable, which is
40PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
41
42Some graphics cards have multiple devices on the same card.  You cannot
43yet use both of them concurrently to evaluate models, but you can run
44the program twice using a different device for each session.
45
46OpenCL kernels are compiled when needed by the device driver.  Some
47drivers produce compiler output even when there is no error.  You
48can see the output by setting PYOPENCL_COMPILER_OUTPUT=1.  It should be
49harmless, albeit annoying.
50"""
51from __future__ import print_function
52
53import os
54import warnings
55import logging
56import time
57
58import numpy as np  # type: ignore
59
60try:
61    #raise NotImplementedError("OpenCL not yet implemented for new kernel template")
62    import pyopencl as cl  # type: ignore
63    # Ask OpenCL for the default context so that we know that one exists
64    cl.create_some_context(interactive=False)
65except Exception as exc:
66    warnings.warn("OpenCL startup failed with ***"
67                  + str(exc) + "***; using C compiler instead")
68    raise RuntimeError("OpenCL not available")
69
70from pyopencl import mem_flags as mf
71from pyopencl.characterize import get_fast_inaccurate_build_options
72
73from . import generate
74from .kernel import KernelModel, Kernel
75
76try:
77    from typing import Tuple, Callable, Any
78    from .modelinfo import ModelInfo
79    from .details import CallDetails
80except ImportError:
81    pass
82
83# CRUFT: pyopencl < 2017.1  (as of June 2016 needs quotes around include path)
84def quote_path(v):
85    """
86    Quote the path if it is not already quoted.
87
88    If v starts with '-', then assume that it is a -I option or similar
89    and do not quote it.  This is fragile:  -Ipath with space needs to
90    be quoted.
91    """
92    return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
93
94def fix_pyopencl_include():
95    """
96    Monkey patch pyopencl to allow spaces in include file path.
97    """
98    import pyopencl as cl
99    if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'):
100        cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS]
101
102fix_pyopencl_include()
103
104
105# The max loops number is limited by the amount of local memory available
106# on the device.  You don't want to make this value too big because it will
107# waste resources, nor too small because it may interfere with users trying
108# to do their polydispersity calculations.  A value of 1024 should be much
109# larger than necessary given that cost grows as npts^k where k is the number
110# of polydisperse parameters.
111MAX_LOOPS = 2048
112
113
114# Pragmas for enable OpenCL features.  Be sure to protect them so that they
115# still compile even if OpenCL is not present.
116_F16_PRAGMA = """\
117#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
118#  pragma OPENCL EXTENSION cl_khr_fp16: enable
119#endif
120"""
121
122_F64_PRAGMA = """\
123#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
124#  pragma OPENCL EXTENSION cl_khr_fp64: enable
125#endif
126"""
127
128
129ENV = None
130def environment():
131    # type: () -> "GpuEnvironment"
132    """
133    Returns a singleton :class:`GpuEnvironment`.
134
135    This provides an OpenCL context and one queue per device.
136    """
137    global ENV
138    if ENV is None:
139        ENV = GpuEnvironment()
140    return ENV
141
142def has_type(device, dtype):
143    # type: (cl.Device, np.dtype) -> bool
144    """
145    Return true if device supports the requested precision.
146    """
147    if dtype == generate.F32:
148        return True
149    elif dtype == generate.F64:
150        return "cl_khr_fp64" in device.extensions
151    elif dtype == generate.F16:
152        return "cl_khr_fp16" in device.extensions
153    else:
154        return False
155
156def get_warp(kernel, queue):
157    # type: (cl.Kernel, cl.CommandQueue) -> int
158    """
159    Return the size of an execution batch for *kernel* running on *queue*.
160    """
161    return kernel.get_work_group_info(
162        cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
163        queue.device)
164
165def _stretch_input(vector, dtype, extra=1e-3, boundary=32):
166    # type: (np.ndarray, np.dtype, float, int) -> np.ndarray
167    """
168    Stretch an input vector to the correct boundary.
169
170    Performance on the kernels can drop by a factor of two or more if the
171    number of values to compute does not fall on a nice power of two
172    boundary.   The trailing additional vector elements are given a
173    value of *extra*, and so f(*extra*) will be computed for each of
174    them.  The returned array will thus be a subset of the computed array.
175
176    *boundary* should be a power of 2 which is at least 32 for good
177    performance on current platforms (as of Jan 2015).  It should
178    probably be the max of get_warp(kernel,queue) and
179    device.min_data_type_align_size//4.
180    """
181    remainder = vector.size % boundary
182    if remainder != 0:
183        size = vector.size + (boundary - remainder)
184        vector = np.hstack((vector, [extra] * (size - vector.size)))
185    return np.ascontiguousarray(vector, dtype=dtype)
186
187
188def compile_model(context, source, dtype, fast=False):
189    # type: (cl.Context, str, np.dtype, bool) -> cl.Program
190    """
191    Build a model to run on the gpu.
192
193    Returns the compiled program and its type.  The returned type will
194    be float32 even if the desired type is float64 if any of the
195    devices in the context do not support the cl_khr_fp64 extension.
196    """
197    dtype = np.dtype(dtype)
198    if not all(has_type(d, dtype) for d in context.devices):
199        raise RuntimeError("%s not supported for devices"%dtype)
200
201    source_list = [generate.convert_type(source, dtype)]
202
203    if dtype == generate.F16:
204        source_list.insert(0, _F16_PRAGMA)
205    elif dtype == generate.F64:
206        source_list.insert(0, _F64_PRAGMA)
207
208    # Note: USE_SINCOS makes the intel cpu slower under opencl
209    if context.devices[0].type == cl.device_type.GPU:
210        source_list.insert(0, "#define USE_SINCOS\n")
211    options = (get_fast_inaccurate_build_options(context.devices[0])
212               if fast else [])
213    source = "\n".join(source_list)
214    program = cl.Program(context, source).build(options=options)
215    #print("done with "+program)
216    return program
217
218
219# for now, this returns one device in the context
220# TODO: create a context that contains all devices on all platforms
221class GpuEnvironment(object):
222    """
223    GPU context, with possibly many devices, and one queue per device.
224    """
225    def __init__(self):
226        # type: () -> None
227        # find gpu context
228        #self.context = cl.create_some_context()
229
230        self.context = None
231        if 'SAS_OPENCL' in os.environ:
232            #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context
233            os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"]
234        if 'PYOPENCL_CTX' in os.environ:
235            self._create_some_context()
236
237        if not self.context:
238            self.context = _get_default_context()
239
240        # Byte boundary for data alignment
241        #self.data_boundary = max(d.min_data_type_align_size
242        #                         for d in self.context.devices)
243        self.queues = [cl.CommandQueue(context, context.devices[0])
244                       for context in self.context]
245        self.compiled = {}
246
247    def has_type(self, dtype):
248        # type: (np.dtype) -> bool
249        """
250        Return True if all devices support a given type.
251        """
252        return any(has_type(d, dtype)
253                   for context in self.context
254                   for d in context.devices)
255
256    def get_queue(self, dtype):
257        # type: (np.dtype) -> cl.CommandQueue
258        """
259        Return a command queue for the kernels of type dtype.
260        """
261        for context, queue in zip(self.context, self.queues):
262            if all(has_type(d, dtype) for d in context.devices):
263                return queue
264
265    def get_context(self, dtype):
266        # type: (np.dtype) -> cl.Context
267        """
268        Return a OpenCL context for the kernels of type dtype.
269        """
270        for context in self.context:
271            if all(has_type(d, dtype) for d in context.devices):
272                return context
273
274    def _create_some_context(self):
275        # type: () -> cl.Context
276        """
277        Protected call to cl.create_some_context without interactivity.  Use
278        this if SAS_OPENCL is set in the environment.  Sets the *context*
279        attribute.
280        """
281        try:
282            self.context = [cl.create_some_context(interactive=False)]
283        except Exception as exc:
284            warnings.warn(str(exc))
285            warnings.warn("pyopencl.create_some_context() failed")
286            warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly")
287
288    def compile_program(self, name, source, dtype, fast, timestamp):
289        # type: (str, str, np.dtype, bool, float) -> cl.Program
290        """
291        Compile the program for the device in the given context.
292        """
293        # Note: PyOpenCL caches based on md5 hash of source, options and device
294        # so we don't really need to cache things for ourselves.  I'll do so
295        # anyway just to save some data munging time.
296        key = "%s-%s%s"%(name, dtype, ("-fast" if fast else ""))
297        # Check timestamp on program
298        program, program_timestamp = self.compiled.get(key, (None, np.inf))
299        if program_timestamp < timestamp:
300            del self.compiled[key]
301        if key not in self.compiled:
302            context = self.get_context(dtype)
303            logging.info("building %s for OpenCL %s", key,
304                         context.devices[0].name.strip())
305            program = compile_model(self.get_context(dtype),
306                                    str(source), dtype, fast)
307            self.compiled[key] = (program, timestamp)
308        return program
309
310def _get_default_context():
311    # type: () -> List[cl.Context]
312    """
313    Get an OpenCL context, preferring GPU over CPU, and preferring Intel
314    drivers over AMD drivers.
315    """
316    # Note: on mobile devices there is automatic clock scaling if either the
317    # CPU or the GPU is underutilized; probably doesn't affect us, but we if
318    # it did, it would mean that putting a busy loop on the CPU while the GPU
319    # is running may increase throughput.
320    #
321    # Macbook pro, base install:
322    #     {'Apple': [Intel CPU, NVIDIA GPU]}
323    # Macbook pro, base install:
324    #     {'Apple': [Intel CPU, Intel GPU]}
325    # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed
326    #     {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]}
327    gpu, cpu = None, None
328    for platform in cl.get_platforms():
329        # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it.
330        # If someone has bothered to install the AMD/NVIDIA drivers, prefer
331        # them over the integrated graphics driver that may have been supplied
332        # with the CPU chipset.
333        preferred_cpu = (platform.vendor.startswith('Intel')
334                         or platform.vendor.startswith('Apple'))
335        preferred_gpu = (platform.vendor.startswith('Advanced')
336                         or platform.vendor.startswith('NVIDIA'))
337        for device in platform.get_devices():
338            if device.type == cl.device_type.GPU:
339                # If the existing type is not GPU then it will be CUSTOM
340                # or ACCELERATOR so don't override it.
341                if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU):
342                    gpu = device
343            elif device.type == cl.device_type.CPU:
344                if cpu is None or preferred_cpu:
345                    cpu = device
346            else:
347                # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM
348                # Intel Phi for example registers as an accelerator
349                # Since the user installed a custom device on their system
350                # and went through the pain of sorting out OpenCL drivers for
351                # it, lets assume they really do want to use it as their
352                # primary compute device.
353                gpu = device
354
355    # order the devices by gpu then by cpu; when searching for an available
356    # device by data type they will be checked in this order, which means
357    # that if the gpu supports double then the cpu will never be used (though
358    # we may make it possible to explicitly request the cpu at some point).
359    devices = []
360    if gpu is not None:
361        devices.append(gpu)
362    if cpu is not None:
363        devices.append(cpu)
364    return [cl.Context([d]) for d in devices]
365
366
367class GpuModel(KernelModel):
368    """
369    GPU wrapper for a single model.
370
371    *source* and *model_info* are the model source and interface as returned
372    from :func:`generate.make_source` and :func:`generate.make_model_info`.
373
374    *dtype* is the desired model precision.  Any numpy dtype for single
375    or double precision floats will do, such as 'f', 'float32' or 'single'
376    for single and 'd', 'float64' or 'double' for double.  Double precision
377    is an optional extension which may not be available on all devices.
378    Half precision ('float16','half') may be available on some devices.
379    Fast precision ('fast') is a loose version of single precision, indicating
380    that the compiler is allowed to take shortcuts.
381    """
382    def __init__(self, source, model_info, dtype=generate.F32, fast=False):
383        # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None
384        self.info = model_info
385        self.source = source
386        self.dtype = dtype
387        self.fast = fast
388        self.program = None # delay program creation
389        self._kernels = None
390
391    def __getstate__(self):
392        # type: () -> Tuple[ModelInfo, str, np.dtype, bool]
393        return self.info, self.source, self.dtype, self.fast
394
395    def __setstate__(self, state):
396        # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None
397        self.info, self.source, self.dtype, self.fast = state
398        self.program = None
399
400    def make_kernel(self, q_vectors):
401        # type: (List[np.ndarray]) -> "GpuKernel"
402        if self.program is None:
403            compile_program = environment().compile_program
404            timestamp = generate.ocl_timestamp(self.info)
405            self.program = compile_program(
406                self.info.name,
407                self.source['opencl'],
408                self.dtype,
409                self.fast,
410                timestamp)
411            variants = ['Iq', 'Iqxy', 'Imagnetic']
412            names = [generate.kernel_name(self.info, k) for k in variants]
413            kernels = [getattr(self.program, k) for k in names]
414            self._kernels = dict((k, v) for k, v in zip(variants, kernels))
415        is_2d = len(q_vectors) == 2
416        if is_2d:
417            kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']]
418        else:
419            kernel = [self._kernels['Iq']]*2
420        return GpuKernel(kernel, self.dtype, self.info, q_vectors)
421
422    def release(self):
423        # type: () -> None
424        """
425        Free the resources associated with the model.
426        """
427        if self.program is not None:
428            self.program = None
429
430    def __del__(self):
431        # type: () -> None
432        self.release()
433
434# TODO: check that we don't need a destructor for buffers which go out of scope
435class GpuInput(object):
436    """
437    Make q data available to the gpu.
438
439    *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
440    and *[qx, qy]* for 2-D data.  Internally, the vectors will be reallocated
441    to get the best performance on OpenCL, which may involve shifting and
442    stretching the array to better match the memory architecture.  Additional
443    points will be evaluated with *q=1e-3*.
444
445    *dtype* is the data type for the q vectors. The data type should be
446    set to match that of the kernel, which is an attribute of
447    :class:`GpuProgram`.  Note that not all kernels support double
448    precision, so even if the program was created for double precision,
449    the *GpuProgram.dtype* may be single precision.
450
451    Call :meth:`release` when complete.  Even if not called directly, the
452    buffer will be released when the data object is freed.
453    """
454    def __init__(self, q_vectors, dtype=generate.F32):
455        # type: (List[np.ndarray], np.dtype) -> None
456        # TODO: do we ever need double precision q?
457        env = environment()
458        self.nq = q_vectors[0].size
459        self.dtype = np.dtype(dtype)
460        self.is_2d = (len(q_vectors) == 2)
461        # TODO: stretch input based on get_warp()
462        # not doing it now since warp depends on kernel, which is not known
463        # at this point, so instead using 32, which is good on the set of
464        # architectures tested so far.
465        if self.is_2d:
466            # Note: 17 rather than 15 because results is 2 elements
467            # longer than input.
468            width = ((self.nq+17)//16)*16
469            self.q = np.empty((width, 2), dtype=dtype)
470            self.q[:self.nq, 0] = q_vectors[0]
471            self.q[:self.nq, 1] = q_vectors[1]
472        else:
473            # Note: 33 rather than 31 because results is 2 elements
474            # longer than input.
475            width = ((self.nq+33)//32)*32
476            self.q = np.empty(width, dtype=dtype)
477            self.q[:self.nq] = q_vectors[0]
478        self.global_size = [self.q.shape[0]]
479        context = env.get_context(self.dtype)
480        #print("creating inputs of size", self.global_size)
481        self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
482                             hostbuf=self.q)
483
484    def release(self):
485        # type: () -> None
486        """
487        Free the memory.
488        """
489        if self.q_b is not None:
490            self.q_b.release()
491            self.q_b = None
492
493    def __del__(self):
494        # type: () -> None
495        self.release()
496
497class GpuKernel(Kernel):
498    """
499    Callable SAS kernel.
500
501    *kernel* is the GpuKernel object to call
502
503    *model_info* is the module information
504
505    *q_vectors* is the q vectors at which the kernel should be evaluated
506
507    *dtype* is the kernel precision
508
509    The resulting call method takes the *pars*, a list of values for
510    the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
511    vectors for the polydisperse parameters.  *cutoff* determines the
512    integration limits: any points with combined weight less than *cutoff*
513    will not be calculated.
514
515    Call :meth:`release` when done with the kernel instance.
516    """
517    def __init__(self, kernel, dtype, model_info, q_vectors):
518        # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None
519        q_input = GpuInput(q_vectors, dtype)
520        self.kernel = kernel
521        self.info = model_info
522        self.dtype = dtype
523        self.dim = '2d' if q_input.is_2d else '1d'
524        # plus three for the normalization values
525        self.result = np.empty(q_input.nq+3, dtype)
526
527        # Inputs and outputs for each kernel call
528        # Note: res may be shorter than res_b if global_size != nq
529        env = environment()
530        self.queue = env.get_queue(dtype)
531
532        self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE,
533                                  q_input.global_size[0] * dtype.itemsize)
534        self.q_input = q_input # allocated by GpuInput above
535
536        self._need_release = [self.result_b, self.q_input]
537        self.real = (np.float32 if dtype == generate.F32
538                     else np.float64 if dtype == generate.F64
539                     else np.float16 if dtype == generate.F16
540                     else np.float32)  # will never get here, so use np.float32
541
542    def __call__(self, call_details, values, cutoff, magnetic):
543        # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray
544        context = self.queue.context
545        # Arrange data transfer to card
546        details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
547                              hostbuf=call_details.buffer)
548        values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
549                             hostbuf=values)
550
551        kernel = self.kernel[1 if magnetic else 0]
552        args = [
553            np.uint32(self.q_input.nq), None, None,
554            details_b, values_b, self.q_input.q_b, self.result_b,
555            self.real(cutoff),
556        ]
557        #print("Calling OpenCL")
558        #call_details.show(values)
559        # Call kernel and retrieve results
560        wait_for = None
561        last_nap = time.clock()
562        step = 1000000//self.q_input.nq + 1
563        for start in range(0, call_details.num_eval, step):
564            stop = min(start + step, call_details.num_eval)
565            #print("queuing",start,stop)
566            args[1:3] = [np.int32(start), np.int32(stop)]
567            wait_for = [kernel(self.queue, self.q_input.global_size, None,
568                               *args, wait_for=wait_for)]
569            if stop < call_details.num_eval:
570                # Allow other processes to run
571                wait_for[0].wait()
572                current_time = time.clock()
573                if current_time - last_nap > 0.5:
574                    time.sleep(0.05)
575                    last_nap = current_time
576        cl.enqueue_copy(self.queue, self.result, self.result_b)
577        #print("result", self.result)
578
579        # Free buffers
580        for v in (details_b, values_b):
581            if v is not None: v.release()
582
583        pd_norm = self.result[self.q_input.nq]
584        scale = values[0]/(pd_norm if pd_norm!=0.0 else 1.0)
585        background = values[1]
586        #print("scale",scale,values[0],self.result[self.q_input.nq],background)
587        return scale*self.result[:self.q_input.nq] + background
588        # return self.result[:self.q_input.nq]
589
590    def release(self):
591        # type: () -> None
592        """
593        Release resources associated with the kernel.
594        """
595        for v in self._need_release:
596            v.release()
597        self._need_release = []
598
599    def __del__(self):
600        # type: () -> None
601        self.release()
Note: See TracBrowser for help on using the repository browser.