[14de349] | 1 | """ |
---|
| 2 | GPU support through OpenCL |
---|
| 3 | |
---|
| 4 | There should be a single GPU environment running on the system. This |
---|
| 5 | environment is constructed on the first call to :func:`env`, and the |
---|
| 6 | same environment is returned on each call. |
---|
| 7 | |
---|
| 8 | After retrieving the environment, the next step is to create the kernel. |
---|
| 9 | This is done with a call to :meth:`GpuEnvironment.make_kernel`, which |
---|
| 10 | returns the type of data used by the kernel. |
---|
| 11 | |
---|
| 12 | Next a :class:`GpuData` object should be created with the correct kind |
---|
| 13 | of data. This data object can be used by multiple kernels, for example, |
---|
| 14 | if the target model is a weighted sum of multiple kernels. The data |
---|
| 15 | should include any extra evaluation points required to compute the proper |
---|
| 16 | data smearing. This need not match the square grid for 2D data if there |
---|
| 17 | is an index saying which q points are active. |
---|
| 18 | |
---|
| 19 | Together the GpuData, the program, and a device form a :class:`GpuKernel`. |
---|
| 20 | This kernel is used during fitting, receiving new sets of parameters and |
---|
| 21 | evaluating them. The output value is stored in an output buffer on the |
---|
| 22 | devices, where it can be combined with other structure factors and form |
---|
| 23 | factors and have instrumental resolution effects applied. |
---|
[92da231] | 24 | |
---|
| 25 | In order to use OpenCL for your models, you will need OpenCL drivers for |
---|
| 26 | your machine. These should be available from your graphics card vendor. |
---|
| 27 | Intel provides OpenCL drivers for CPUs as well as their integrated HD |
---|
| 28 | graphics chipsets. AMD also provides drivers for Intel CPUs, but as of |
---|
| 29 | this writing the performance is lacking compared to the Intel drivers. |
---|
| 30 | NVidia combines drivers for CUDA and OpenCL in one package. The result |
---|
| 31 | is a bit messy if you have multiple drivers installed. You can see which |
---|
| 32 | drivers are available by starting python and running: |
---|
| 33 | |
---|
| 34 | import pyopencl as cl |
---|
| 35 | cl.create_some_context(interactive=True) |
---|
| 36 | |
---|
| 37 | Once you have done that, it will show the available drivers which you |
---|
| 38 | can select. It will then tell you that you can use these drivers |
---|
| 39 | automatically by setting the PYOPENCL_CTX environment variable. |
---|
| 40 | |
---|
| 41 | Some graphics cards have multiple devices on the same card. You cannot |
---|
| 42 | yet use both of them concurrently to evaluate models, but you can run |
---|
| 43 | the program twice using a different device for each session. |
---|
| 44 | |
---|
| 45 | OpenCL kernels are compiled when needed by the device driver. Some |
---|
| 46 | drivers produce compiler output even when there is no error. You |
---|
| 47 | can see the output by setting PYOPENCL_COMPILER_OUTPUT=1. It should be |
---|
| 48 | harmless, albeit annoying. |
---|
[14de349] | 49 | """ |
---|
[250fa25] | 50 | import os |
---|
| 51 | import warnings |
---|
| 52 | |
---|
[14de349] | 53 | import numpy as np |
---|
[b3f6bc3] | 54 | |
---|
[250fa25] | 55 | try: |
---|
| 56 | import pyopencl as cl |
---|
[3c56da87] | 57 | # Ask OpenCL for the default context so that we know that one exists |
---|
| 58 | cl.create_some_context(interactive=False) |
---|
[9404dd3] | 59 | except Exception as exc: |
---|
[7841376] | 60 | warnings.warn(str(exc)) |
---|
[664c8e7] | 61 | raise RuntimeError("OpenCL not available") |
---|
[7841376] | 62 | |
---|
[14de349] | 63 | from pyopencl import mem_flags as mf |
---|
[5d316e9] | 64 | from pyopencl.characterize import get_fast_inaccurate_build_options |
---|
[14de349] | 65 | |
---|
[cb6ecf4] | 66 | from . import generate |
---|
[14de349] | 67 | |
---|
[ce27e21] | 68 | # The max loops number is limited by the amount of local memory available |
---|
| 69 | # on the device. You don't want to make this value too big because it will |
---|
| 70 | # waste resources, nor too small because it may interfere with users trying |
---|
| 71 | # to do their polydispersity calculations. A value of 1024 should be much |
---|
| 72 | # larger than necessary given that cost grows as npts^k where k is the number |
---|
| 73 | # of polydisperse parameters. |
---|
[5d4777d] | 74 | MAX_LOOPS = 2048 |
---|
| 75 | |
---|
[ce27e21] | 76 | |
---|
[14de349] | 77 | ENV = None |
---|
| 78 | def environment(): |
---|
| 79 | """ |
---|
| 80 | Returns a singleton :class:`GpuEnvironment`. |
---|
| 81 | |
---|
| 82 | This provides an OpenCL context and one queue per device. |
---|
| 83 | """ |
---|
| 84 | global ENV |
---|
| 85 | if ENV is None: |
---|
| 86 | ENV = GpuEnvironment() |
---|
| 87 | return ENV |
---|
| 88 | |
---|
[5d316e9] | 89 | def has_type(device, dtype): |
---|
[14de349] | 90 | """ |
---|
[5d316e9] | 91 | Return true if device supports the requested precision. |
---|
[14de349] | 92 | """ |
---|
[5d316e9] | 93 | if dtype == generate.F32: |
---|
| 94 | return True |
---|
| 95 | elif dtype == generate.F64: |
---|
| 96 | return "cl_khr_fp64" in device.extensions |
---|
| 97 | elif dtype == generate.F16: |
---|
| 98 | return "cl_khr_fp16" in device.extensions |
---|
| 99 | else: |
---|
| 100 | return False |
---|
[14de349] | 101 | |
---|
[f5b9a6b] | 102 | def get_warp(kernel, queue): |
---|
| 103 | """ |
---|
| 104 | Return the size of an execution batch for *kernel* running on *queue*. |
---|
| 105 | """ |
---|
[750ffa5] | 106 | return kernel.get_work_group_info( |
---|
[63b32bb] | 107 | cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE, |
---|
| 108 | queue.device) |
---|
[14de349] | 109 | |
---|
[f5b9a6b] | 110 | def _stretch_input(vector, dtype, extra=1e-3, boundary=32): |
---|
[14de349] | 111 | """ |
---|
| 112 | Stretch an input vector to the correct boundary. |
---|
| 113 | |
---|
| 114 | Performance on the kernels can drop by a factor of two or more if the |
---|
| 115 | number of values to compute does not fall on a nice power of two |
---|
[f5b9a6b] | 116 | boundary. The trailing additional vector elements are given a |
---|
| 117 | value of *extra*, and so f(*extra*) will be computed for each of |
---|
| 118 | them. The returned array will thus be a subset of the computed array. |
---|
| 119 | |
---|
| 120 | *boundary* should be a power of 2 which is at least 32 for good |
---|
| 121 | performance on current platforms (as of Jan 2015). It should |
---|
| 122 | probably be the max of get_warp(kernel,queue) and |
---|
| 123 | device.min_data_type_align_size//4. |
---|
| 124 | """ |
---|
[c85db69] | 125 | remainder = vector.size % boundary |
---|
[f5b9a6b] | 126 | if remainder != 0: |
---|
| 127 | size = vector.size + (boundary - remainder) |
---|
[c85db69] | 128 | vector = np.hstack((vector, [extra] * (size - vector.size))) |
---|
[14de349] | 129 | return np.ascontiguousarray(vector, dtype=dtype) |
---|
| 130 | |
---|
| 131 | |
---|
[5d316e9] | 132 | def compile_model(context, source, dtype, fast=False): |
---|
[14de349] | 133 | """ |
---|
| 134 | Build a model to run on the gpu. |
---|
| 135 | |
---|
| 136 | Returns the compiled program and its type. The returned type will |
---|
| 137 | be float32 even if the desired type is float64 if any of the |
---|
| 138 | devices in the context do not support the cl_khr_fp64 extension. |
---|
| 139 | """ |
---|
| 140 | dtype = np.dtype(dtype) |
---|
[5d316e9] | 141 | if not all(has_type(d, dtype) for d in context.devices): |
---|
| 142 | raise RuntimeError("%s not supported for devices"%dtype) |
---|
[14de349] | 143 | |
---|
[5d316e9] | 144 | source = generate.convert_type(source, dtype) |
---|
[14de349] | 145 | # Note: USE_SINCOS makes the intel cpu slower under opencl |
---|
| 146 | if context.devices[0].type == cl.device_type.GPU: |
---|
[5d316e9] | 147 | source = "#define USE_SINCOS\n" + source |
---|
| 148 | options = (get_fast_inaccurate_build_options(context.devices[0]) |
---|
| 149 | if fast else []) |
---|
| 150 | program = cl.Program(context, source).build(options=options) |
---|
[ce27e21] | 151 | return program |
---|
[14de349] | 152 | |
---|
| 153 | |
---|
| 154 | def make_result(self, size): |
---|
| 155 | self.res = np.empty(size, dtype=self.dtype) |
---|
| 156 | self.res_b = cl.Buffer(self.program.context, mf.READ_WRITE, self.res.nbytes) |
---|
| 157 | return self.res, self.res_b |
---|
| 158 | |
---|
| 159 | |
---|
| 160 | # for now, this returns one device in the context |
---|
| 161 | # TODO: create a context that contains all devices on all platforms |
---|
| 162 | class GpuEnvironment(object): |
---|
| 163 | """ |
---|
| 164 | GPU context, with possibly many devices, and one queue per device. |
---|
| 165 | """ |
---|
| 166 | def __init__(self): |
---|
[250fa25] | 167 | # find gpu context |
---|
| 168 | #self.context = cl.create_some_context() |
---|
| 169 | |
---|
| 170 | self.context = None |
---|
| 171 | if 'PYOPENCL_CTX' in os.environ: |
---|
| 172 | self._create_some_context() |
---|
| 173 | |
---|
| 174 | if not self.context: |
---|
[3c56da87] | 175 | self.context = _get_default_context() |
---|
[250fa25] | 176 | |
---|
[f5b9a6b] | 177 | # Byte boundary for data alignment |
---|
| 178 | #self.data_boundary = max(d.min_data_type_align_size |
---|
| 179 | # for d in self.context.devices) |
---|
[250fa25] | 180 | self.queues = [cl.CommandQueue(self.context, d) |
---|
| 181 | for d in self.context.devices] |
---|
[ce27e21] | 182 | self.compiled = {} |
---|
| 183 | |
---|
[5d316e9] | 184 | def has_type(self, dtype): |
---|
[cde11f0f] | 185 | dtype = generate.F32 if dtype == 'fast' else np.dtype(dtype) |
---|
[5d316e9] | 186 | return all(has_type(d, dtype) for d in self.context.devices) |
---|
| 187 | |
---|
[250fa25] | 188 | def _create_some_context(self): |
---|
| 189 | try: |
---|
| 190 | self.context = cl.create_some_context(interactive=False) |
---|
[9404dd3] | 191 | except Exception as exc: |
---|
[250fa25] | 192 | warnings.warn(str(exc)) |
---|
| 193 | warnings.warn("pyopencl.create_some_context() failed") |
---|
| 194 | warnings.warn("the environment variable 'PYOPENCL_CTX' might not be set correctly") |
---|
| 195 | |
---|
[5d316e9] | 196 | def compile_program(self, name, source, dtype, fast=False): |
---|
[cde11f0f] | 197 | key = "%s-%s-%s"%(name, dtype, fast) |
---|
| 198 | if key not in self.compiled: |
---|
[9404dd3] | 199 | #print("compiling",name) |
---|
[cde11f0f] | 200 | dtype = np.dtype(dtype) |
---|
| 201 | program = compile_model(self.context, source, dtype, fast) |
---|
| 202 | self.compiled[key] = program |
---|
| 203 | return self.compiled[key] |
---|
[ce27e21] | 204 | |
---|
| 205 | def release_program(self, name): |
---|
| 206 | if name in self.compiled: |
---|
| 207 | self.compiled[name].release() |
---|
| 208 | del self.compiled[name] |
---|
[14de349] | 209 | |
---|
[3c56da87] | 210 | def _get_default_context(): |
---|
| 211 | default = None |
---|
| 212 | for platform in cl.get_platforms(): |
---|
| 213 | for device in platform.get_devices(): |
---|
| 214 | if device.type == cl.device_type.GPU: |
---|
| 215 | return cl.Context([device]) |
---|
| 216 | if default is None: |
---|
| 217 | default = device |
---|
| 218 | |
---|
| 219 | if not default: |
---|
| 220 | raise RuntimeError("OpenCL device not found") |
---|
| 221 | |
---|
| 222 | return cl.Context([default]) |
---|
| 223 | |
---|
[250fa25] | 224 | |
---|
[14de349] | 225 | class GpuModel(object): |
---|
| 226 | """ |
---|
| 227 | GPU wrapper for a single model. |
---|
| 228 | |
---|
[ce27e21] | 229 | *source* and *info* are the model source and interface as returned |
---|
[14de349] | 230 | from :func:`gen.make`. |
---|
| 231 | |
---|
| 232 | *dtype* is the desired model precision. Any numpy dtype for single |
---|
| 233 | or double precision floats will do, such as 'f', 'float32' or 'single' |
---|
| 234 | for single and 'd', 'float64' or 'double' for double. Double precision |
---|
| 235 | is an optional extension which may not be available on all devices. |
---|
[cde11f0f] | 236 | Half precision ('float16','half') may be available on some devices. |
---|
| 237 | Fast precision ('fast') is a loose version of single precision, indicating |
---|
| 238 | that the compiler is allowed to take shortcuts. |
---|
[14de349] | 239 | """ |
---|
[cde11f0f] | 240 | def __init__(self, source, info, dtype=generate.F32): |
---|
[ce27e21] | 241 | self.info = info |
---|
| 242 | self.source = source |
---|
[cde11f0f] | 243 | self.dtype = generate.F32 if dtype=='fast' else np.dtype(dtype) |
---|
| 244 | self.fast = (dtype == 'fast') |
---|
[ce27e21] | 245 | self.program = None # delay program creation |
---|
[14de349] | 246 | |
---|
[ce27e21] | 247 | def __getstate__(self): |
---|
| 248 | state = self.__dict__.copy() |
---|
| 249 | state['program'] = None |
---|
| 250 | return state |
---|
[14de349] | 251 | |
---|
[ce27e21] | 252 | def __setstate__(self, state): |
---|
| 253 | self.__dict__ = state.copy() |
---|
| 254 | |
---|
[750ffa5] | 255 | def __call__(self, q_input): |
---|
| 256 | if self.dtype != q_input.dtype: |
---|
[5d316e9] | 257 | raise TypeError("data is %s kernel is %s" |
---|
| 258 | % (q_input.dtype, self.dtype)) |
---|
[ce27e21] | 259 | if self.program is None: |
---|
[3c56da87] | 260 | compiler = environment().compile_program |
---|
[5d316e9] | 261 | self.program = compiler(self.info['name'], self.source, self.dtype, |
---|
| 262 | self.fast) |
---|
[750ffa5] | 263 | kernel_name = generate.kernel_name(self.info, q_input.is_2D) |
---|
[14de349] | 264 | kernel = getattr(self.program, kernel_name) |
---|
[750ffa5] | 265 | return GpuKernel(kernel, self.info, q_input) |
---|
[ce27e21] | 266 | |
---|
| 267 | def release(self): |
---|
| 268 | if self.program is not None: |
---|
| 269 | environment().release_program(self.info['name']) |
---|
| 270 | self.program = None |
---|
[14de349] | 271 | |
---|
| 272 | def make_input(self, q_vectors): |
---|
| 273 | """ |
---|
| 274 | Make q input vectors available to the model. |
---|
| 275 | |
---|
[b3f6bc3] | 276 | Note that each model needs its own q vector even if the case of |
---|
| 277 | mixture models because some models may be OpenCL, some may be |
---|
| 278 | ctypes and some may be pure python. |
---|
[14de349] | 279 | """ |
---|
[f734e7d] | 280 | return GpuInput(q_vectors, dtype=self.dtype) |
---|
[14de349] | 281 | |
---|
| 282 | # TODO: check that we don't need a destructor for buffers which go out of scope |
---|
| 283 | class GpuInput(object): |
---|
| 284 | """ |
---|
| 285 | Make q data available to the gpu. |
---|
| 286 | |
---|
| 287 | *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data, |
---|
| 288 | and *[qx, qy]* for 2-D data. Internally, the vectors will be reallocated |
---|
| 289 | to get the best performance on OpenCL, which may involve shifting and |
---|
| 290 | stretching the array to better match the memory architecture. Additional |
---|
| 291 | points will be evaluated with *q=1e-3*. |
---|
| 292 | |
---|
| 293 | *dtype* is the data type for the q vectors. The data type should be |
---|
| 294 | set to match that of the kernel, which is an attribute of |
---|
| 295 | :class:`GpuProgram`. Note that not all kernels support double |
---|
| 296 | precision, so even if the program was created for double precision, |
---|
| 297 | the *GpuProgram.dtype* may be single precision. |
---|
| 298 | |
---|
| 299 | Call :meth:`release` when complete. Even if not called directly, the |
---|
| 300 | buffer will be released when the data object is freed. |
---|
| 301 | """ |
---|
[cb6ecf4] | 302 | def __init__(self, q_vectors, dtype=generate.F32): |
---|
[14de349] | 303 | env = environment() |
---|
| 304 | self.nq = q_vectors[0].size |
---|
| 305 | self.dtype = np.dtype(dtype) |
---|
| 306 | self.is_2D = (len(q_vectors) == 2) |
---|
[f5b9a6b] | 307 | # TODO: stretch input based on get_warp() |
---|
| 308 | # not doing it now since warp depends on kernel, which is not known |
---|
| 309 | # at this point, so instead using 32, which is good on the set of |
---|
| 310 | # architectures tested so far. |
---|
| 311 | self.q_vectors = [_stretch_input(q, self.dtype, 32) for q in q_vectors] |
---|
[14de349] | 312 | self.q_buffers = [ |
---|
[c85db69] | 313 | cl.Buffer(env.context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q) |
---|
[14de349] | 314 | for q in self.q_vectors |
---|
| 315 | ] |
---|
| 316 | self.global_size = [self.q_vectors[0].size] |
---|
| 317 | |
---|
| 318 | def release(self): |
---|
| 319 | for b in self.q_buffers: |
---|
| 320 | b.release() |
---|
| 321 | self.q_buffers = [] |
---|
| 322 | |
---|
| 323 | class GpuKernel(object): |
---|
[ff7119b] | 324 | """ |
---|
| 325 | Callable SAS kernel. |
---|
| 326 | |
---|
| 327 | *kernel* is the GpuKernel object to call. |
---|
| 328 | |
---|
| 329 | *info* is the module information |
---|
| 330 | |
---|
[3c56da87] | 331 | *q_input* is the DllInput q vectors at which the kernel should be |
---|
[ff7119b] | 332 | evaluated. |
---|
| 333 | |
---|
| 334 | The resulting call method takes the *pars*, a list of values for |
---|
| 335 | the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight) |
---|
| 336 | vectors for the polydisperse parameters. *cutoff* determines the |
---|
| 337 | integration limits: any points with combined weight less than *cutoff* |
---|
| 338 | will not be calculated. |
---|
| 339 | |
---|
| 340 | Call :meth:`release` when done with the kernel instance. |
---|
| 341 | """ |
---|
[3c56da87] | 342 | def __init__(self, kernel, info, q_input): |
---|
| 343 | self.q_input = q_input |
---|
[14de349] | 344 | self.kernel = kernel |
---|
[ce27e21] | 345 | self.info = info |
---|
[3c56da87] | 346 | self.res = np.empty(q_input.nq, q_input.dtype) |
---|
| 347 | dim = '2d' if q_input.is_2D else '1d' |
---|
[c85db69] | 348 | self.fixed_pars = info['partype']['fixed-' + dim] |
---|
| 349 | self.pd_pars = info['partype']['pd-' + dim] |
---|
[14de349] | 350 | |
---|
| 351 | # Inputs and outputs for each kernel call |
---|
[ce27e21] | 352 | # Note: res may be shorter than res_b if global_size != nq |
---|
| 353 | env = environment() |
---|
[14de349] | 354 | self.loops_b = [cl.Buffer(env.context, mf.READ_WRITE, |
---|
[3c56da87] | 355 | 2 * MAX_LOOPS * q_input.dtype.itemsize) |
---|
[14de349] | 356 | for _ in env.queues] |
---|
| 357 | self.res_b = [cl.Buffer(env.context, mf.READ_WRITE, |
---|
[3c56da87] | 358 | q_input.global_size[0] * q_input.dtype.itemsize) |
---|
[14de349] | 359 | for _ in env.queues] |
---|
| 360 | |
---|
| 361 | |
---|
[f734e7d] | 362 | def __call__(self, fixed_pars, pd_pars, cutoff=1e-5): |
---|
[5d316e9] | 363 | real = (np.float32 if self.q_input.dtype == generate.F32 |
---|
| 364 | else np.float64 if self.q_input.dtype == generate.F64 |
---|
| 365 | else np.float16 if self.q_input.dtype == generate.F16 |
---|
| 366 | else np.float32) # will never get here, so use np.float32 |
---|
[f734e7d] | 367 | |
---|
[14de349] | 368 | device_num = 0 |
---|
| 369 | queuei = environment().queues[device_num] |
---|
[f734e7d] | 370 | res_bi = self.res_b[device_num] |
---|
[3c56da87] | 371 | nq = np.uint32(self.q_input.nq) |
---|
[f734e7d] | 372 | if pd_pars: |
---|
| 373 | cutoff = real(cutoff) |
---|
| 374 | loops_N = [np.uint32(len(p[0])) for p in pd_pars] |
---|
[3c56da87] | 375 | loops = np.hstack(pd_pars) \ |
---|
| 376 | if pd_pars else np.empty(0, dtype=self.q_input.dtype) |
---|
| 377 | loops = np.ascontiguousarray(loops.T, self.q_input.dtype).flatten() |
---|
[9404dd3] | 378 | #print("loops",Nloops, loops) |
---|
[f734e7d] | 379 | |
---|
[9404dd3] | 380 | #import sys; print("opencl eval",pars) |
---|
| 381 | #print("opencl eval",pars) |
---|
[c85db69] | 382 | if len(loops) > 2 * MAX_LOOPS: |
---|
[f734e7d] | 383 | raise ValueError("too many polydispersity points") |
---|
| 384 | |
---|
| 385 | loops_bi = self.loops_b[device_num] |
---|
| 386 | cl.enqueue_copy(queuei, loops_bi, loops) |
---|
| 387 | loops_l = cl.LocalMemory(len(loops.data)) |
---|
| 388 | #ctx = environment().context |
---|
[3c56da87] | 389 | #loops_bi = cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=loops) |
---|
[f734e7d] | 390 | dispersed = [loops_bi, loops_l, cutoff] + loops_N |
---|
| 391 | else: |
---|
| 392 | dispersed = [] |
---|
| 393 | fixed = [real(p) for p in fixed_pars] |
---|
[3c56da87] | 394 | args = self.q_input.q_buffers + [res_bi, nq] + dispersed + fixed |
---|
| 395 | self.kernel(queuei, self.q_input.global_size, None, *args) |
---|
[14de349] | 396 | cl.enqueue_copy(queuei, self.res, res_bi) |
---|
| 397 | |
---|
| 398 | return self.res |
---|
| 399 | |
---|
| 400 | def release(self): |
---|
| 401 | for b in self.loops_b: |
---|
| 402 | b.release() |
---|
| 403 | self.loops_b = [] |
---|
| 404 | for b in self.res_b: |
---|
| 405 | b.release() |
---|
| 406 | self.res_b = [] |
---|
| 407 | |
---|
| 408 | def __del__(self): |
---|
| 409 | self.release() |
---|