[0db7dbd] | 1 | """ |
---|
[b0de252] | 2 | GPU driver for C kernels (with CUDA) |
---|
| 3 | |
---|
| 4 | To select cuda, use SAS_OPENCL=cuda, or SAS_OPENCL=cuda:n for a particular |
---|
| 5 | device number. If no device number is specified, then look for CUDA_DEVICE=n |
---|
| 6 | or a file ~/.cuda-device containing n for the device number. Otherwise, try |
---|
| 7 | all available device numbers. |
---|
| 8 | |
---|
| 9 | TODO: docs are out of date |
---|
[0db7dbd] | 10 | |
---|
| 11 | There should be a single GPU environment running on the system. This |
---|
| 12 | environment is constructed on the first call to :func:`env`, and the |
---|
| 13 | same environment is returned on each call. |
---|
| 14 | |
---|
| 15 | After retrieving the environment, the next step is to create the kernel. |
---|
| 16 | This is done with a call to :meth:`GpuEnvironment.make_kernel`, which |
---|
| 17 | returns the type of data used by the kernel. |
---|
| 18 | |
---|
| 19 | Next a :class:`GpuData` object should be created with the correct kind |
---|
| 20 | of data. This data object can be used by multiple kernels, for example, |
---|
| 21 | if the target model is a weighted sum of multiple kernels. The data |
---|
| 22 | should include any extra evaluation points required to compute the proper |
---|
| 23 | data smearing. This need not match the square grid for 2D data if there |
---|
| 24 | is an index saying which q points are active. |
---|
| 25 | |
---|
| 26 | Together the GpuData, the program, and a device form a :class:`GpuKernel`. |
---|
| 27 | This kernel is used during fitting, receiving new sets of parameters and |
---|
| 28 | evaluating them. The output value is stored in an output buffer on the |
---|
| 29 | devices, where it can be combined with other structure factors and form |
---|
| 30 | factors and have instrumental resolution effects applied. |
---|
| 31 | |
---|
| 32 | In order to use OpenCL for your models, you will need OpenCL drivers for |
---|
| 33 | your machine. These should be available from your graphics card vendor. |
---|
| 34 | Intel provides OpenCL drivers for CPUs as well as their integrated HD |
---|
| 35 | graphics chipsets. AMD also provides drivers for Intel CPUs, but as of |
---|
| 36 | this writing the performance is lacking compared to the Intel drivers. |
---|
| 37 | NVidia combines drivers for CUDA and OpenCL in one package. The result |
---|
| 38 | is a bit messy if you have multiple drivers installed. You can see which |
---|
| 39 | drivers are available by starting python and running: |
---|
| 40 | |
---|
| 41 | import pyopencl as cl |
---|
| 42 | cl.create_some_context(interactive=True) |
---|
| 43 | |
---|
| 44 | Once you have done that, it will show the available drivers which you |
---|
| 45 | can select. It will then tell you that you can use these drivers |
---|
| 46 | automatically by setting the SAS_OPENCL environment variable, which is |
---|
| 47 | PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs. |
---|
| 48 | |
---|
| 49 | Some graphics cards have multiple devices on the same card. You cannot |
---|
| 50 | yet use both of them concurrently to evaluate models, but you can run |
---|
| 51 | the program twice using a different device for each session. |
---|
| 52 | |
---|
| 53 | OpenCL kernels are compiled when needed by the device driver. Some |
---|
| 54 | drivers produce compiler output even when there is no error. You |
---|
| 55 | can see the output by setting PYOPENCL_COMPILER_OUTPUT=1. It should be |
---|
| 56 | harmless, albeit annoying. |
---|
| 57 | """ |
---|
| 58 | from __future__ import print_function |
---|
| 59 | |
---|
| 60 | import os |
---|
| 61 | import logging |
---|
| 62 | import time |
---|
[74e9b5f] | 63 | import re |
---|
[00afc15] | 64 | import atexit |
---|
[0db7dbd] | 65 | |
---|
| 66 | import numpy as np # type: ignore |
---|
| 67 | |
---|
| 68 | |
---|
[3199b17] | 69 | # Attempt to setup CUDA. This may fail if the pycuda package is not |
---|
[0db7dbd] | 70 | # installed or if it is installed but there are no devices available. |
---|
| 71 | try: |
---|
| 72 | import pycuda.driver as cuda # type: ignore |
---|
| 73 | from pycuda.compiler import SourceModule |
---|
[b0de252] | 74 | from pycuda.tools import make_default_context, clear_context_caches |
---|
| 75 | # Ask CUDA for the default context (so that we know that one exists) |
---|
| 76 | # then immediately throw it away in case the user doesn't want it. |
---|
| 77 | # Note: cribbed from pycuda.autoinit |
---|
| 78 | cuda.init() |
---|
| 79 | context = make_default_context() |
---|
| 80 | context.pop() |
---|
| 81 | clear_context_caches() |
---|
| 82 | del context |
---|
[0db7dbd] | 83 | HAVE_CUDA = True |
---|
| 84 | CUDA_ERROR = "" |
---|
| 85 | except Exception as exc: |
---|
| 86 | HAVE_CUDA = False |
---|
| 87 | CUDA_ERROR = str(exc) |
---|
| 88 | |
---|
| 89 | from . import generate |
---|
| 90 | from .kernel import KernelModel, Kernel |
---|
| 91 | |
---|
| 92 | # pylint: disable=unused-import |
---|
| 93 | try: |
---|
| 94 | from typing import Tuple, Callable, Any |
---|
| 95 | from .modelinfo import ModelInfo |
---|
| 96 | from .details import CallDetails |
---|
| 97 | except ImportError: |
---|
| 98 | pass |
---|
| 99 | # pylint: enable=unused-import |
---|
| 100 | |
---|
| 101 | # The max loops number is limited by the amount of local memory available |
---|
| 102 | # on the device. You don't want to make this value too big because it will |
---|
| 103 | # waste resources, nor too small because it may interfere with users trying |
---|
| 104 | # to do their polydispersity calculations. A value of 1024 should be much |
---|
| 105 | # larger than necessary given that cost grows as npts^k where k is the number |
---|
| 106 | # of polydisperse parameters. |
---|
| 107 | MAX_LOOPS = 2048 |
---|
| 108 | |
---|
[3199b17] | 109 | |
---|
[0db7dbd] | 110 | def use_cuda(): |
---|
[b297ba9] | 111 | # type: None -> bool |
---|
| 112 | """Returns True if CUDA is the default compute engine.""" |
---|
[3199b17] | 113 | sas_opencl = os.environ.get("SAS_OPENCL", "CUDA").lower() |
---|
| 114 | return HAVE_CUDA and sas_opencl.startswith("cuda") |
---|
| 115 | |
---|
[0db7dbd] | 116 | |
---|
| 117 | ENV = None |
---|
| 118 | def reset_environment(): |
---|
| 119 | """ |
---|
| 120 | Call to create a new OpenCL context, such as after a change to SAS_OPENCL. |
---|
| 121 | """ |
---|
| 122 | global ENV |
---|
[b0de252] | 123 | # Free any previous allocated context. |
---|
| 124 | if ENV is not None and ENV.context is not None: |
---|
| 125 | ENV.release() |
---|
[0db7dbd] | 126 | ENV = GpuEnvironment() if use_cuda() else None |
---|
| 127 | |
---|
[3199b17] | 128 | |
---|
[0db7dbd] | 129 | def environment(): |
---|
| 130 | # type: () -> "GpuEnvironment" |
---|
| 131 | """ |
---|
| 132 | Returns a singleton :class:`GpuEnvironment`. |
---|
| 133 | |
---|
| 134 | This provides an OpenCL context and one queue per device. |
---|
| 135 | """ |
---|
| 136 | if ENV is None: |
---|
| 137 | if not HAVE_CUDA: |
---|
[b0de252] | 138 | raise RuntimeError("CUDA startup failed with ***" |
---|
[b297ba9] | 139 | + CUDA_ERROR + "***; using C compiler instead") |
---|
[0db7dbd] | 140 | reset_environment() |
---|
| 141 | if ENV is None: |
---|
| 142 | raise RuntimeError("SAS_OPENCL=None in environment") |
---|
| 143 | return ENV |
---|
| 144 | |
---|
[00afc15] | 145 | |
---|
[fa26e78] | 146 | # PyTest is not freeing ENV, so make sure it gets freed. |
---|
| 147 | atexit.register(lambda: ENV.release() if ENV is not None else None) |
---|
| 148 | |
---|
| 149 | |
---|
[b0de252] | 150 | def has_type(dtype): |
---|
| 151 | # type: (np.dtype) -> bool |
---|
[0db7dbd] | 152 | """ |
---|
[b0de252] | 153 | Return true if device supports the requested precision. |
---|
[0db7dbd] | 154 | """ |
---|
[3199b17] | 155 | # Assume the NVIDIA card supports 32-bit and 64-bit floats. |
---|
| 156 | # TODO: Check if pycuda support F16. |
---|
[b0de252] | 157 | return dtype in (generate.F32, generate.F64) |
---|
[0db7dbd] | 158 | |
---|
[74e9b5f] | 159 | |
---|
| 160 | FUNCTION_PATTERN = re.compile(r"""^ |
---|
[3199b17] | 161 | (?P<space>\s*) # Initial space. |
---|
| 162 | (?P<qualifiers>^(?:\s*\b\w+\b\s*)+) # One or more qualifiers before function. |
---|
| 163 | (?P<function>\s*\b\w+\b\s*[(]) # Function name plus open parens. |
---|
[74e9b5f] | 164 | """, re.VERBOSE|re.MULTILINE) |
---|
| 165 | |
---|
| 166 | MARKED_PATTERN = re.compile(r""" |
---|
| 167 | \b(return|else|kernel|device|__device__)\b |
---|
| 168 | """, re.VERBOSE|re.MULTILINE) |
---|
| 169 | |
---|
[3199b17] | 170 | |
---|
[74e9b5f] | 171 | def _add_device_tag(match): |
---|
| 172 | # type: (None) -> str |
---|
[3199b17] | 173 | # Note: Should be re.Match, but that isn't a simple type. |
---|
[74e9b5f] | 174 | """ |
---|
| 175 | replace qualifiers with __device__ qualifiers if needed |
---|
| 176 | """ |
---|
| 177 | qualifiers = match.group("qualifiers") |
---|
| 178 | if MARKED_PATTERN.search(qualifiers): |
---|
| 179 | start, end = match.span() |
---|
| 180 | return match.string[start:end] |
---|
| 181 | else: |
---|
| 182 | function = match.group("function") |
---|
| 183 | space = match.group("space") |
---|
| 184 | return "".join((space, "__device__ ", qualifiers, function)) |
---|
| 185 | |
---|
[3199b17] | 186 | |
---|
[74e9b5f] | 187 | def mark_device_functions(source): |
---|
| 188 | # type: (str) -> str |
---|
| 189 | """ |
---|
| 190 | Mark all function declarations as __device__ functions (except kernel). |
---|
| 191 | """ |
---|
| 192 | return FUNCTION_PATTERN.sub(_add_device_tag, source) |
---|
| 193 | |
---|
[3199b17] | 194 | |
---|
[74e9b5f] | 195 | def show_device_functions(source): |
---|
| 196 | # type: (str) -> str |
---|
| 197 | """ |
---|
| 198 | Show all discovered function declarations, but don't change any. |
---|
| 199 | """ |
---|
| 200 | for match in FUNCTION_PATTERN.finditer(source): |
---|
[b297ba9] | 201 | print(match.group('qualifiers').replace('\n', r'\n'), |
---|
| 202 | match.group('function'), '(') |
---|
[74e9b5f] | 203 | return source |
---|
| 204 | |
---|
[3199b17] | 205 | |
---|
[0db7dbd] | 206 | def compile_model(source, dtype, fast=False): |
---|
[b0de252] | 207 | # type: (str, np.dtype, bool) -> SourceModule |
---|
[0db7dbd] | 208 | """ |
---|
| 209 | Build a model to run on the gpu. |
---|
| 210 | |
---|
| 211 | Returns the compiled program and its type. The returned type will |
---|
| 212 | be float32 even if the desired type is float64 if any of the |
---|
| 213 | devices in the context do not support the cl_khr_fp64 extension. |
---|
| 214 | """ |
---|
[b0de252] | 215 | dtype = np.dtype(dtype) |
---|
| 216 | if not has_type(dtype): |
---|
| 217 | raise RuntimeError("%s not supported for devices"%dtype) |
---|
[0db7dbd] | 218 | |
---|
[b0de252] | 219 | source_list = [generate.convert_type(source, dtype)] |
---|
[0db7dbd] | 220 | |
---|
| 221 | source_list.insert(0, "#define USE_SINCOS\n") |
---|
| 222 | source = "\n".join(source_list) |
---|
[74e9b5f] | 223 | #source = show_device_functions(source) |
---|
| 224 | source = mark_device_functions(source) |
---|
| 225 | #with open('/tmp/kernel.cu', 'w') as fd: fd.write(source) |
---|
| 226 | #print(source) |
---|
| 227 | #options = ['--verbose', '-E'] |
---|
| 228 | options = ['--use_fast_math'] if fast else None |
---|
[3199b17] | 229 | program = SourceModule(source, no_extern_c=True, options=options) #, include_dirs=[...]) |
---|
[74e9b5f] | 230 | |
---|
[b0de252] | 231 | #print("done with "+program) |
---|
[0db7dbd] | 232 | return program |
---|
| 233 | |
---|
| 234 | |
---|
[3199b17] | 235 | # For now, this returns one device in the context. |
---|
| 236 | # TODO: Create a context that contains all devices on all platforms. |
---|
[0db7dbd] | 237 | class GpuEnvironment(object): |
---|
| 238 | """ |
---|
[3199b17] | 239 | GPU context for CUDA. |
---|
[0db7dbd] | 240 | """ |
---|
[b0de252] | 241 | context = None # type: cuda.Context |
---|
| 242 | def __init__(self, devnum=None): |
---|
| 243 | # type: (int) -> None |
---|
| 244 | env = os.environ.get("SAS_OPENCL", "").lower() |
---|
| 245 | if devnum is None and env.startswith("cuda:"): |
---|
| 246 | devnum = int(env[5:]) |
---|
[3199b17] | 247 | |
---|
[b0de252] | 248 | # Set the global context to the particular device number if one is |
---|
| 249 | # given, otherwise use the default context. Perhaps this will be set |
---|
| 250 | # by an environment variable within autoinit. |
---|
| 251 | if devnum is not None: |
---|
| 252 | self.context = cuda.Device(devnum).make_context() |
---|
| 253 | else: |
---|
| 254 | self.context = make_default_context() |
---|
| 255 | |
---|
[3199b17] | 256 | ## Byte boundary for data alignment. |
---|
| 257 | #self.data_boundary = max(d.min_data_type_align_size |
---|
| 258 | # for d in self.context.devices) |
---|
| 259 | |
---|
| 260 | # Cache for compiled programs, and for items in context. |
---|
| 261 | self.compiled = {} |
---|
| 262 | |
---|
[b0de252] | 263 | def release(self): |
---|
[b297ba9] | 264 | """Free the CUDA device associated with this context.""" |
---|
[b0de252] | 265 | if self.context is not None: |
---|
| 266 | self.context.pop() |
---|
| 267 | self.context = None |
---|
| 268 | |
---|
| 269 | def __del__(self): |
---|
| 270 | self.release() |
---|
[0db7dbd] | 271 | |
---|
| 272 | def has_type(self, dtype): |
---|
| 273 | # type: (np.dtype) -> bool |
---|
| 274 | """ |
---|
| 275 | Return True if all devices support a given type. |
---|
| 276 | """ |
---|
[b0de252] | 277 | return has_type(dtype) |
---|
[0db7dbd] | 278 | |
---|
| 279 | def compile_program(self, name, source, dtype, fast, timestamp): |
---|
| 280 | # type: (str, str, np.dtype, bool, float) -> cl.Program |
---|
| 281 | """ |
---|
| 282 | Compile the program for the device in the given context. |
---|
| 283 | """ |
---|
[3199b17] | 284 | # Note: PyCuda (probably) caches but I'll do so as well just to |
---|
| 285 | # save some data munging time. |
---|
[0db7dbd] | 286 | tag = generate.tag_source(source) |
---|
| 287 | key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else "")) |
---|
[3199b17] | 288 | # Check timestamp on program. |
---|
[0db7dbd] | 289 | program, program_timestamp = self.compiled.get(key, (None, np.inf)) |
---|
| 290 | if program_timestamp < timestamp: |
---|
| 291 | del self.compiled[key] |
---|
| 292 | if key not in self.compiled: |
---|
| 293 | logging.info("building %s for CUDA", key) |
---|
| 294 | program = compile_model(str(source), dtype, fast) |
---|
| 295 | self.compiled[key] = (program, timestamp) |
---|
| 296 | return program |
---|
| 297 | |
---|
[3199b17] | 298 | |
---|
[0db7dbd] | 299 | class GpuModel(KernelModel): |
---|
| 300 | """ |
---|
| 301 | GPU wrapper for a single model. |
---|
| 302 | |
---|
| 303 | *source* and *model_info* are the model source and interface as returned |
---|
| 304 | from :func:`generate.make_source` and :func:`generate.make_model_info`. |
---|
| 305 | |
---|
| 306 | *dtype* is the desired model precision. Any numpy dtype for single |
---|
| 307 | or double precision floats will do, such as 'f', 'float32' or 'single' |
---|
| 308 | for single and 'd', 'float64' or 'double' for double. Double precision |
---|
| 309 | is an optional extension which may not be available on all devices. |
---|
| 310 | Half precision ('float16','half') may be available on some devices. |
---|
| 311 | Fast precision ('fast') is a loose version of single precision, indicating |
---|
| 312 | that the compiler is allowed to take shortcuts. |
---|
| 313 | """ |
---|
[3199b17] | 314 | info = None # type: ModelInfo |
---|
| 315 | source = "" # type: str |
---|
| 316 | dtype = None # type: np.dtype |
---|
| 317 | fast = False # type: bool |
---|
| 318 | _program = None # type: SourceModule |
---|
| 319 | _kernels = None # type: Dict[str, cuda.Function] |
---|
[b0de252] | 320 | |
---|
[0db7dbd] | 321 | def __init__(self, source, model_info, dtype=generate.F32, fast=False): |
---|
| 322 | # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None |
---|
| 323 | self.info = model_info |
---|
| 324 | self.source = source |
---|
| 325 | self.dtype = dtype |
---|
| 326 | self.fast = fast |
---|
| 327 | |
---|
| 328 | def __getstate__(self): |
---|
| 329 | # type: () -> Tuple[ModelInfo, str, np.dtype, bool] |
---|
| 330 | return self.info, self.source, self.dtype, self.fast |
---|
| 331 | |
---|
| 332 | def __setstate__(self, state): |
---|
| 333 | # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None |
---|
| 334 | self.info, self.source, self.dtype, self.fast = state |
---|
[7126c04] | 335 | self._program = self._kernels = None |
---|
[0db7dbd] | 336 | |
---|
| 337 | def make_kernel(self, q_vectors): |
---|
| 338 | # type: (List[np.ndarray]) -> "GpuKernel" |
---|
[7126c04] | 339 | return GpuKernel(self, q_vectors) |
---|
| 340 | |
---|
| 341 | def get_function(self, name): |
---|
| 342 | # type: (str) -> cuda.Function |
---|
| 343 | """ |
---|
| 344 | Fetch the kernel from the environment by name, compiling it if it |
---|
| 345 | does not already exist. |
---|
| 346 | """ |
---|
| 347 | if self._program is None: |
---|
| 348 | self._prepare_program() |
---|
| 349 | return self._kernels[name] |
---|
| 350 | |
---|
| 351 | def _prepare_program(self): |
---|
| 352 | # type: (str) -> None |
---|
| 353 | env = environment() |
---|
| 354 | timestamp = generate.ocl_timestamp(self.info) |
---|
| 355 | program = env.compile_program( |
---|
| 356 | self.info.name, |
---|
| 357 | self.source['opencl'], |
---|
| 358 | self.dtype, |
---|
| 359 | self.fast, |
---|
| 360 | timestamp) |
---|
| 361 | variants = ['Iq', 'Iqxy', 'Imagnetic'] |
---|
| 362 | names = [generate.kernel_name(self.info, k) for k in variants] |
---|
[00afc15] | 363 | functions = [program.get_function(k) for k in names] |
---|
| 364 | self._kernels = {k: v for k, v in zip(variants, functions)} |
---|
[3199b17] | 365 | # Keep a handle to program so GC doesn't collect. |
---|
[7126c04] | 366 | self._program = program |
---|
[0db7dbd] | 367 | |
---|
[3199b17] | 368 | |
---|
| 369 | # TODO: Check that we don't need a destructor for buffers which go out of scope. |
---|
[0db7dbd] | 370 | class GpuInput(object): |
---|
| 371 | """ |
---|
| 372 | Make q data available to the gpu. |
---|
| 373 | |
---|
| 374 | *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data, |
---|
| 375 | and *[qx, qy]* for 2-D data. Internally, the vectors will be reallocated |
---|
| 376 | to get the best performance on OpenCL, which may involve shifting and |
---|
| 377 | stretching the array to better match the memory architecture. Additional |
---|
| 378 | points will be evaluated with *q=1e-3*. |
---|
| 379 | |
---|
| 380 | *dtype* is the data type for the q vectors. The data type should be |
---|
| 381 | set to match that of the kernel, which is an attribute of |
---|
| 382 | :class:`GpuProgram`. Note that not all kernels support double |
---|
| 383 | precision, so even if the program was created for double precision, |
---|
| 384 | the *GpuProgram.dtype* may be single precision. |
---|
| 385 | |
---|
| 386 | Call :meth:`release` when complete. Even if not called directly, the |
---|
| 387 | buffer will be released when the data object is freed. |
---|
| 388 | """ |
---|
| 389 | def __init__(self, q_vectors, dtype=generate.F32): |
---|
| 390 | # type: (List[np.ndarray], np.dtype) -> None |
---|
[3199b17] | 391 | # TODO: Do we ever need double precision q? |
---|
[0db7dbd] | 392 | self.nq = q_vectors[0].size |
---|
| 393 | self.dtype = np.dtype(dtype) |
---|
| 394 | self.is_2d = (len(q_vectors) == 2) |
---|
[3199b17] | 395 | # TODO: Stretch input based on get_warp(). |
---|
| 396 | # Not doing it now since warp depends on kernel, which is not known |
---|
[0db7dbd] | 397 | # at this point, so instead using 32, which is good on the set of |
---|
| 398 | # architectures tested so far. |
---|
| 399 | if self.is_2d: |
---|
[3199b17] | 400 | width = ((self.nq+15)//16)*16 |
---|
[0db7dbd] | 401 | self.q = np.empty((width, 2), dtype=dtype) |
---|
| 402 | self.q[:self.nq, 0] = q_vectors[0] |
---|
| 403 | self.q[:self.nq, 1] = q_vectors[1] |
---|
| 404 | else: |
---|
[3199b17] | 405 | width = ((self.nq+31)//32)*32 |
---|
[0db7dbd] | 406 | self.q = np.empty(width, dtype=dtype) |
---|
| 407 | self.q[:self.nq] = q_vectors[0] |
---|
| 408 | self.global_size = [self.q.shape[0]] |
---|
| 409 | #print("creating inputs of size", self.global_size) |
---|
[7126c04] | 410 | |
---|
[3199b17] | 411 | # Transfer input value to GPU. |
---|
[0db7dbd] | 412 | self.q_b = cuda.to_device(self.q) |
---|
| 413 | |
---|
| 414 | def release(self): |
---|
| 415 | # type: () -> None |
---|
| 416 | """ |
---|
[3199b17] | 417 | Free the buffer associated with the q value. |
---|
[0db7dbd] | 418 | """ |
---|
| 419 | if self.q_b is not None: |
---|
| 420 | self.q_b.free() |
---|
| 421 | self.q_b = None |
---|
| 422 | |
---|
| 423 | def __del__(self): |
---|
| 424 | # type: () -> None |
---|
| 425 | self.release() |
---|
| 426 | |
---|
[3199b17] | 427 | |
---|
[0db7dbd] | 428 | class GpuKernel(Kernel): |
---|
| 429 | """ |
---|
| 430 | Callable SAS kernel. |
---|
| 431 | |
---|
[7126c04] | 432 | *model* is the GpuModel object to call |
---|
[0db7dbd] | 433 | |
---|
[7126c04] | 434 | The kernel is derived from :class:`Kernel`, providing the |
---|
| 435 | :meth:`call_kernel` method to evaluate the kernel for a given set of |
---|
| 436 | parameters. Because of the need to move the q values to the GPU before |
---|
| 437 | evaluation, the kernel is instantiated for a particular set of q vectors, |
---|
| 438 | and can be called many times without transfering q each time. |
---|
[0db7dbd] | 439 | |
---|
| 440 | Call :meth:`release` when done with the kernel instance. |
---|
| 441 | """ |
---|
[3199b17] | 442 | #: SAS model information structure. |
---|
| 443 | info = None # type: ModelInfo |
---|
| 444 | #: Kernel precision. |
---|
| 445 | dtype = None # type: np.dtype |
---|
| 446 | #: Kernel dimensions (1d or 2d). |
---|
| 447 | dim = "" # type: str |
---|
| 448 | #: Calculation results, updated after each call to :meth:`_call_kernel`. |
---|
| 449 | result = None # type: np.ndarray |
---|
[7126c04] | 450 | |
---|
| 451 | def __init__(self, model, q_vectors): |
---|
| 452 | # type: (GpuModel, List[np.ndarray]) -> None |
---|
| 453 | dtype = model.dtype |
---|
[869fd7b] | 454 | self.q_input = GpuInput(q_vectors, dtype) |
---|
[7126c04] | 455 | self._model = model |
---|
[869fd7b] | 456 | |
---|
[3199b17] | 457 | # Attributes accessed from the outside. |
---|
[869fd7b] | 458 | self.dim = '2d' if self.q_input.is_2d else '1d' |
---|
[7126c04] | 459 | self.info = model.info |
---|
[3199b17] | 460 | self.dtype = dtype |
---|
| 461 | |
---|
| 462 | # Converter to translate input to target type. |
---|
| 463 | self._as_dtype = np.float64 if dtype == generate.F64 else np.float32 |
---|
[869fd7b] | 464 | |
---|
[3199b17] | 465 | # Holding place for the returned value. |
---|
[869fd7b] | 466 | nout = 2 if self.info.have_Fq and self.dim == '1d' else 1 |
---|
[3199b17] | 467 | extra_q = 4 # Total weight, form volume, shell volume and R_eff. |
---|
| 468 | self.result = np.empty(self.q_input.nq*nout + extra_q, dtype) |
---|
[0db7dbd] | 469 | |
---|
[3199b17] | 470 | # Allocate result value on GPU. |
---|
[869fd7b] | 471 | width = ((self.result.size+31)//32)*32 * self.dtype.itemsize |
---|
[7126c04] | 472 | self._result_b = cuda.mem_alloc(width) |
---|
[0db7dbd] | 473 | |
---|
[3199b17] | 474 | def _call_kernel(self, call_details, values, cutoff, magnetic, |
---|
| 475 | effective_radius_type): |
---|
| 476 | # type: (CallDetails, np.ndarray, float, bool, int) -> np.ndarray |
---|
| 477 | |
---|
| 478 | # Arrange data transfer to card. |
---|
[0db7dbd] | 479 | details_b = cuda.to_device(call_details.buffer) |
---|
| 480 | values_b = cuda.to_device(values) |
---|
| 481 | |
---|
[3199b17] | 482 | # Setup kernel function and arguments. |
---|
[7126c04] | 483 | name = 'Iq' if self.dim == '1d' else 'Imagnetic' if magnetic else 'Iqxy' |
---|
| 484 | kernel = self._model.get_function(name) |
---|
| 485 | kernel_args = [ |
---|
[3199b17] | 486 | np.uint32(self.q_input.nq), # Number of inputs. |
---|
| 487 | None, # Placeholder for pd_start. |
---|
| 488 | None, # Placeholder for pd_stop. |
---|
| 489 | details_b, # Problem definition. |
---|
| 490 | values_b, # Parameter values. |
---|
| 491 | self.q_input.q_b, # Q values. |
---|
| 492 | self._result_b, # Result storage. |
---|
| 493 | self._as_dtype(cutoff), # Probability cutoff. |
---|
| 494 | np.uint32(effective_radius_type), # R_eff mode. |
---|
[0db7dbd] | 495 | ] |
---|
| 496 | grid = partition(self.q_input.nq) |
---|
[3199b17] | 497 | |
---|
| 498 | # Call kernel and retrieve results. |
---|
| 499 | #print("Calling CUDA") |
---|
[0db7dbd] | 500 | #call_details.show(values) |
---|
| 501 | last_nap = time.clock() |
---|
[8b31efa] | 502 | step = 100000000//self.q_input.nq + 1 |
---|
[0db7dbd] | 503 | #step = 1000000000 |
---|
| 504 | for start in range(0, call_details.num_eval, step): |
---|
| 505 | stop = min(start + step, call_details.num_eval) |
---|
| 506 | #print("queuing",start,stop) |
---|
[00afc15] | 507 | kernel_args[1:3] = [np.int32(start), np.int32(stop)] |
---|
| 508 | kernel(*kernel_args, **grid) |
---|
[0db7dbd] | 509 | if stop < call_details.num_eval: |
---|
| 510 | sync() |
---|
[3199b17] | 511 | # Allow other processes to run. |
---|
[0db7dbd] | 512 | current_time = time.clock() |
---|
| 513 | if current_time - last_nap > 0.5: |
---|
[8b31efa] | 514 | time.sleep(0.001) |
---|
[0db7dbd] | 515 | last_nap = current_time |
---|
| 516 | sync() |
---|
[00afc15] | 517 | cuda.memcpy_dtoh(self.result, self._result_b) |
---|
[0db7dbd] | 518 | #print("result", self.result) |
---|
| 519 | |
---|
[b0de252] | 520 | details_b.free() |
---|
| 521 | values_b.free() |
---|
| 522 | |
---|
[0db7dbd] | 523 | def release(self): |
---|
| 524 | # type: () -> None |
---|
| 525 | """ |
---|
| 526 | Release resources associated with the kernel. |
---|
| 527 | """ |
---|
[7126c04] | 528 | self.q_input.release() |
---|
| 529 | if self._result_b is not None: |
---|
| 530 | self._result_b.free() |
---|
| 531 | self._result_b = None |
---|
[0db7dbd] | 532 | |
---|
| 533 | def __del__(self): |
---|
| 534 | # type: () -> None |
---|
| 535 | self.release() |
---|
| 536 | |
---|
| 537 | |
---|
| 538 | def sync(): |
---|
| 539 | """ |
---|
| 540 | Overview: |
---|
| 541 | Waits for operation in the current context to complete. |
---|
| 542 | |
---|
| 543 | Note: Maybe context.synchronize() is sufficient. |
---|
| 544 | """ |
---|
[3199b17] | 545 | # Create an event with which to synchronize. |
---|
[0db7dbd] | 546 | done = cuda.Event() |
---|
| 547 | |
---|
| 548 | # Schedule an event trigger on the GPU. |
---|
| 549 | done.record() |
---|
| 550 | |
---|
[3199b17] | 551 | # Make sure we don't hog resource while waiting to sync. |
---|
[b0de252] | 552 | while not done.query(): |
---|
| 553 | time.sleep(0.01) |
---|
[0db7dbd] | 554 | |
---|
| 555 | # Block until the GPU executes the kernel. |
---|
| 556 | done.synchronize() |
---|
[3199b17] | 557 | |
---|
[0db7dbd] | 558 | # Clean up the event; I don't think they can be reused. |
---|
| 559 | del done |
---|
| 560 | |
---|
| 561 | |
---|
| 562 | def partition(n): |
---|
| 563 | ''' |
---|
| 564 | Overview: |
---|
| 565 | Auto grids the thread blocks to achieve some level of calculation |
---|
| 566 | efficiency. |
---|
| 567 | ''' |
---|
[b0de252] | 568 | max_gx, max_gy = 65535, 65535 |
---|
[0db7dbd] | 569 | blocksize = 32 |
---|
[b0de252] | 570 | #max_gx, max_gy = 5, 65536 |
---|
[0db7dbd] | 571 | #blocksize = 3 |
---|
[b0de252] | 572 | block = (blocksize, 1, 1) |
---|
[0db7dbd] | 573 | num_blocks = int((n+blocksize-1)/blocksize) |
---|
| 574 | if num_blocks < max_gx: |
---|
[b0de252] | 575 | grid = (num_blocks, 1) |
---|
[0db7dbd] | 576 | else: |
---|
| 577 | gx = max_gx |
---|
| 578 | gy = (num_blocks + max_gx - 1) / max_gx |
---|
[b0de252] | 579 | if gy >= max_gy: |
---|
| 580 | raise ValueError("vector is too large") |
---|
| 581 | grid = (gx, gy) |
---|
| 582 | #print("block", block, "grid", grid) |
---|
| 583 | #print("waste", block[0]*block[1]*block[2]*grid[0]*grid[1] - n) |
---|
| 584 | return dict(block=block, grid=grid) |
---|