1 | """ |
---|
2 | GPU driver for C kernels |
---|
3 | |
---|
4 | There should be a single GPU environment running on the system. This |
---|
5 | environment is constructed on the first call to :func:`env`, and the |
---|
6 | same environment is returned on each call. |
---|
7 | |
---|
8 | After retrieving the environment, the next step is to create the kernel. |
---|
9 | This is done with a call to :meth:`GpuEnvironment.make_kernel`, which |
---|
10 | returns the type of data used by the kernel. |
---|
11 | |
---|
12 | Next a :class:`GpuData` object should be created with the correct kind |
---|
13 | of data. This data object can be used by multiple kernels, for example, |
---|
14 | if the target model is a weighted sum of multiple kernels. The data |
---|
15 | should include any extra evaluation points required to compute the proper |
---|
16 | data smearing. This need not match the square grid for 2D data if there |
---|
17 | is an index saying which q points are active. |
---|
18 | |
---|
19 | Together the GpuData, the program, and a device form a :class:`GpuKernel`. |
---|
20 | This kernel is used during fitting, receiving new sets of parameters and |
---|
21 | evaluating them. The output value is stored in an output buffer on the |
---|
22 | devices, where it can be combined with other structure factors and form |
---|
23 | factors and have instrumental resolution effects applied. |
---|
24 | |
---|
25 | In order to use OpenCL for your models, you will need OpenCL drivers for |
---|
26 | your machine. These should be available from your graphics card vendor. |
---|
27 | Intel provides OpenCL drivers for CPUs as well as their integrated HD |
---|
28 | graphics chipsets. AMD also provides drivers for Intel CPUs, but as of |
---|
29 | this writing the performance is lacking compared to the Intel drivers. |
---|
30 | NVidia combines drivers for CUDA and OpenCL in one package. The result |
---|
31 | is a bit messy if you have multiple drivers installed. You can see which |
---|
32 | drivers are available by starting python and running: |
---|
33 | |
---|
34 | import pyopencl as cl |
---|
35 | cl.create_some_context(interactive=True) |
---|
36 | |
---|
37 | Once you have done that, it will show the available drivers which you |
---|
38 | can select. It will then tell you that you can use these drivers |
---|
39 | automatically by setting the SAS_OPENCL environment variable, which is |
---|
40 | PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs. |
---|
41 | |
---|
42 | Some graphics cards have multiple devices on the same card. You cannot |
---|
43 | yet use both of them concurrently to evaluate models, but you can run |
---|
44 | the program twice using a different device for each session. |
---|
45 | |
---|
46 | OpenCL kernels are compiled when needed by the device driver. Some |
---|
47 | drivers produce compiler output even when there is no error. You |
---|
48 | can see the output by setting PYOPENCL_COMPILER_OUTPUT=1. It should be |
---|
49 | harmless, albeit annoying. |
---|
50 | """ |
---|
51 | from __future__ import print_function |
---|
52 | |
---|
53 | import os |
---|
54 | import warnings |
---|
55 | import logging |
---|
56 | import time |
---|
57 | |
---|
58 | import numpy as np # type: ignore |
---|
59 | |
---|
60 | |
---|
61 | # Attempt to setup opencl. This may fail if the opencl package is not |
---|
62 | # installed or if it is installed but there are no devices available. |
---|
63 | try: |
---|
64 | import pyopencl as cl # type: ignore |
---|
65 | from pyopencl import mem_flags as mf |
---|
66 | from pyopencl.characterize import get_fast_inaccurate_build_options |
---|
67 | # Ask OpenCL for the default context so that we know that one exists |
---|
68 | cl.create_some_context(interactive=False) |
---|
69 | HAVE_OPENCL = True |
---|
70 | OPENCL_ERROR = "" |
---|
71 | except Exception as exc: |
---|
72 | HAVE_OPENCL = False |
---|
73 | OPENCL_ERROR = str(exc) |
---|
74 | |
---|
75 | from . import generate |
---|
76 | from .kernel import KernelModel, Kernel |
---|
77 | |
---|
78 | # pylint: disable=unused-import |
---|
79 | try: |
---|
80 | from typing import Tuple, Callable, Any |
---|
81 | from .modelinfo import ModelInfo |
---|
82 | from .details import CallDetails |
---|
83 | except ImportError: |
---|
84 | pass |
---|
85 | # pylint: enable=unused-import |
---|
86 | |
---|
87 | # CRUFT: pyopencl < 2017.1 (as of June 2016 needs quotes around include path) |
---|
88 | def quote_path(v): |
---|
89 | """ |
---|
90 | Quote the path if it is not already quoted. |
---|
91 | |
---|
92 | If v starts with '-', then assume that it is a -I option or similar |
---|
93 | and do not quote it. This is fragile: -Ipath with space needs to |
---|
94 | be quoted. |
---|
95 | """ |
---|
96 | return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v |
---|
97 | |
---|
98 | def fix_pyopencl_include(): |
---|
99 | """ |
---|
100 | Monkey patch pyopencl to allow spaces in include file path. |
---|
101 | """ |
---|
102 | import pyopencl as cl |
---|
103 | if hasattr(cl, '_DEFAULT_INCLUDE_OPTIONS'): |
---|
104 | cl._DEFAULT_INCLUDE_OPTIONS = [quote_path(v) for v in cl._DEFAULT_INCLUDE_OPTIONS] |
---|
105 | |
---|
106 | if HAVE_OPENCL: |
---|
107 | fix_pyopencl_include() |
---|
108 | |
---|
109 | # The max loops number is limited by the amount of local memory available |
---|
110 | # on the device. You don't want to make this value too big because it will |
---|
111 | # waste resources, nor too small because it may interfere with users trying |
---|
112 | # to do their polydispersity calculations. A value of 1024 should be much |
---|
113 | # larger than necessary given that cost grows as npts^k where k is the number |
---|
114 | # of polydisperse parameters. |
---|
115 | MAX_LOOPS = 2048 |
---|
116 | |
---|
117 | |
---|
118 | # Pragmas for enable OpenCL features. Be sure to protect them so that they |
---|
119 | # still compile even if OpenCL is not present. |
---|
120 | _F16_PRAGMA = """\ |
---|
121 | #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16) |
---|
122 | # pragma OPENCL EXTENSION cl_khr_fp16: enable |
---|
123 | #endif |
---|
124 | """ |
---|
125 | |
---|
126 | _F64_PRAGMA = """\ |
---|
127 | #if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64) |
---|
128 | # pragma OPENCL EXTENSION cl_khr_fp64: enable |
---|
129 | #endif |
---|
130 | """ |
---|
131 | |
---|
132 | def use_opencl(): |
---|
133 | return HAVE_OPENCL and os.environ.get("SAS_OPENCL", "").lower() != "none" |
---|
134 | |
---|
135 | ENV = None |
---|
136 | def reset_environment(): |
---|
137 | """ |
---|
138 | Call to create a new OpenCL context, such as after a change to SAS_OPENCL. |
---|
139 | """ |
---|
140 | global ENV |
---|
141 | ENV = GpuEnvironment() if use_opencl() else None |
---|
142 | |
---|
143 | def environment(): |
---|
144 | # type: () -> "GpuEnvironment" |
---|
145 | """ |
---|
146 | Returns a singleton :class:`GpuEnvironment`. |
---|
147 | |
---|
148 | This provides an OpenCL context and one queue per device. |
---|
149 | """ |
---|
150 | if ENV is None: |
---|
151 | if not HAVE_OPENCL: |
---|
152 | raise RuntimeError("OpenCL startup failed with ***" |
---|
153 | + OPENCL_ERROR + "***; using C compiler instead") |
---|
154 | reset_environment() |
---|
155 | if ENV is None: |
---|
156 | raise RuntimeError("SAS_OPENCL=None in environment") |
---|
157 | return ENV |
---|
158 | |
---|
159 | def has_type(device, dtype): |
---|
160 | # type: (cl.Device, np.dtype) -> bool |
---|
161 | """ |
---|
162 | Return true if device supports the requested precision. |
---|
163 | """ |
---|
164 | if dtype == generate.F32: |
---|
165 | return True |
---|
166 | elif dtype == generate.F64: |
---|
167 | return "cl_khr_fp64" in device.extensions |
---|
168 | elif dtype == generate.F16: |
---|
169 | return "cl_khr_fp16" in device.extensions |
---|
170 | else: |
---|
171 | return False |
---|
172 | |
---|
173 | def get_warp(kernel, queue): |
---|
174 | # type: (cl.Kernel, cl.CommandQueue) -> int |
---|
175 | """ |
---|
176 | Return the size of an execution batch for *kernel* running on *queue*. |
---|
177 | """ |
---|
178 | return kernel.get_work_group_info( |
---|
179 | cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE, |
---|
180 | queue.device) |
---|
181 | |
---|
182 | def _stretch_input(vector, dtype, extra=1e-3, boundary=32): |
---|
183 | # type: (np.ndarray, np.dtype, float, int) -> np.ndarray |
---|
184 | """ |
---|
185 | Stretch an input vector to the correct boundary. |
---|
186 | |
---|
187 | Performance on the kernels can drop by a factor of two or more if the |
---|
188 | number of values to compute does not fall on a nice power of two |
---|
189 | boundary. The trailing additional vector elements are given a |
---|
190 | value of *extra*, and so f(*extra*) will be computed for each of |
---|
191 | them. The returned array will thus be a subset of the computed array. |
---|
192 | |
---|
193 | *boundary* should be a power of 2 which is at least 32 for good |
---|
194 | performance on current platforms (as of Jan 2015). It should |
---|
195 | probably be the max of get_warp(kernel,queue) and |
---|
196 | device.min_data_type_align_size//4. |
---|
197 | """ |
---|
198 | remainder = vector.size % boundary |
---|
199 | if remainder != 0: |
---|
200 | size = vector.size + (boundary - remainder) |
---|
201 | vector = np.hstack((vector, [extra] * (size - vector.size))) |
---|
202 | return np.ascontiguousarray(vector, dtype=dtype) |
---|
203 | |
---|
204 | |
---|
205 | def compile_model(context, source, dtype, fast=False): |
---|
206 | # type: (cl.Context, str, np.dtype, bool) -> cl.Program |
---|
207 | """ |
---|
208 | Build a model to run on the gpu. |
---|
209 | |
---|
210 | Returns the compiled program and its type. The returned type will |
---|
211 | be float32 even if the desired type is float64 if any of the |
---|
212 | devices in the context do not support the cl_khr_fp64 extension. |
---|
213 | """ |
---|
214 | dtype = np.dtype(dtype) |
---|
215 | if not all(has_type(d, dtype) for d in context.devices): |
---|
216 | raise RuntimeError("%s not supported for devices"%dtype) |
---|
217 | |
---|
218 | source_list = [generate.convert_type(source, dtype)] |
---|
219 | |
---|
220 | if dtype == generate.F16: |
---|
221 | source_list.insert(0, _F16_PRAGMA) |
---|
222 | elif dtype == generate.F64: |
---|
223 | source_list.insert(0, _F64_PRAGMA) |
---|
224 | |
---|
225 | # Note: USE_SINCOS makes the intel cpu slower under opencl |
---|
226 | if context.devices[0].type == cl.device_type.GPU: |
---|
227 | source_list.insert(0, "#define USE_SINCOS\n") |
---|
228 | options = (get_fast_inaccurate_build_options(context.devices[0]) |
---|
229 | if fast else []) |
---|
230 | source = "\n".join(source_list) |
---|
231 | program = cl.Program(context, source).build(options=options) |
---|
232 | #print("done with "+program) |
---|
233 | return program |
---|
234 | |
---|
235 | |
---|
236 | # for now, this returns one device in the context |
---|
237 | # TODO: create a context that contains all devices on all platforms |
---|
238 | class GpuEnvironment(object): |
---|
239 | """ |
---|
240 | GPU context, with possibly many devices, and one queue per device. |
---|
241 | """ |
---|
242 | def __init__(self): |
---|
243 | # type: () -> None |
---|
244 | # find gpu context |
---|
245 | #self.context = cl.create_some_context() |
---|
246 | |
---|
247 | self.context = None |
---|
248 | if 'SAS_OPENCL' in os.environ: |
---|
249 | #Setting PYOPENCL_CTX as a SAS_OPENCL to create cl context |
---|
250 | os.environ["PYOPENCL_CTX"] = os.environ["SAS_OPENCL"] |
---|
251 | if 'PYOPENCL_CTX' in os.environ: |
---|
252 | self._create_some_context() |
---|
253 | |
---|
254 | if not self.context: |
---|
255 | self.context = _get_default_context() |
---|
256 | |
---|
257 | # Byte boundary for data alignment |
---|
258 | #self.data_boundary = max(d.min_data_type_align_size |
---|
259 | # for d in self.context.devices) |
---|
260 | self.queues = [cl.CommandQueue(context, context.devices[0]) |
---|
261 | for context in self.context] |
---|
262 | self.compiled = {} |
---|
263 | |
---|
264 | def has_type(self, dtype): |
---|
265 | # type: (np.dtype) -> bool |
---|
266 | """ |
---|
267 | Return True if all devices support a given type. |
---|
268 | """ |
---|
269 | return any(has_type(d, dtype) |
---|
270 | for context in self.context |
---|
271 | for d in context.devices) |
---|
272 | |
---|
273 | def get_queue(self, dtype): |
---|
274 | # type: (np.dtype) -> cl.CommandQueue |
---|
275 | """ |
---|
276 | Return a command queue for the kernels of type dtype. |
---|
277 | """ |
---|
278 | for context, queue in zip(self.context, self.queues): |
---|
279 | if all(has_type(d, dtype) for d in context.devices): |
---|
280 | return queue |
---|
281 | |
---|
282 | def get_context(self, dtype): |
---|
283 | # type: (np.dtype) -> cl.Context |
---|
284 | """ |
---|
285 | Return a OpenCL context for the kernels of type dtype. |
---|
286 | """ |
---|
287 | for context in self.context: |
---|
288 | if all(has_type(d, dtype) for d in context.devices): |
---|
289 | return context |
---|
290 | |
---|
291 | def _create_some_context(self): |
---|
292 | # type: () -> cl.Context |
---|
293 | """ |
---|
294 | Protected call to cl.create_some_context without interactivity. Use |
---|
295 | this if SAS_OPENCL is set in the environment. Sets the *context* |
---|
296 | attribute. |
---|
297 | """ |
---|
298 | try: |
---|
299 | self.context = [cl.create_some_context(interactive=False)] |
---|
300 | except Exception as exc: |
---|
301 | warnings.warn(str(exc)) |
---|
302 | warnings.warn("pyopencl.create_some_context() failed") |
---|
303 | warnings.warn("the environment variable 'SAS_OPENCL' might not be set correctly") |
---|
304 | |
---|
305 | def compile_program(self, name, source, dtype, fast, timestamp): |
---|
306 | # type: (str, str, np.dtype, bool, float) -> cl.Program |
---|
307 | """ |
---|
308 | Compile the program for the device in the given context. |
---|
309 | """ |
---|
310 | # Note: PyOpenCL caches based on md5 hash of source, options and device |
---|
311 | # so we don't really need to cache things for ourselves. I'll do so |
---|
312 | # anyway just to save some data munging time. |
---|
313 | tag = generate.tag_source(source) |
---|
314 | key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else "")) |
---|
315 | # Check timestamp on program |
---|
316 | program, program_timestamp = self.compiled.get(key, (None, np.inf)) |
---|
317 | if program_timestamp < timestamp: |
---|
318 | del self.compiled[key] |
---|
319 | if key not in self.compiled: |
---|
320 | context = self.get_context(dtype) |
---|
321 | logging.info("building %s for OpenCL %s", key, |
---|
322 | context.devices[0].name.strip()) |
---|
323 | program = compile_model(self.get_context(dtype), |
---|
324 | str(source), dtype, fast) |
---|
325 | self.compiled[key] = (program, timestamp) |
---|
326 | return program |
---|
327 | |
---|
328 | def _get_default_context(): |
---|
329 | # type: () -> List[cl.Context] |
---|
330 | """ |
---|
331 | Get an OpenCL context, preferring GPU over CPU, and preferring Intel |
---|
332 | drivers over AMD drivers. |
---|
333 | """ |
---|
334 | # Note: on mobile devices there is automatic clock scaling if either the |
---|
335 | # CPU or the GPU is underutilized; probably doesn't affect us, but we if |
---|
336 | # it did, it would mean that putting a busy loop on the CPU while the GPU |
---|
337 | # is running may increase throughput. |
---|
338 | # |
---|
339 | # Macbook pro, base install: |
---|
340 | # {'Apple': [Intel CPU, NVIDIA GPU]} |
---|
341 | # Macbook pro, base install: |
---|
342 | # {'Apple': [Intel CPU, Intel GPU]} |
---|
343 | # 2 x nvidia 295 with Intel and NVIDIA opencl drivers installed |
---|
344 | # {'Intel': [CPU], 'NVIDIA': [GPU, GPU, GPU, GPU]} |
---|
345 | gpu, cpu = None, None |
---|
346 | for platform in cl.get_platforms(): |
---|
347 | # AMD provides a much weaker CPU driver than Intel/Apple, so avoid it. |
---|
348 | # If someone has bothered to install the AMD/NVIDIA drivers, prefer |
---|
349 | # them over the integrated graphics driver that may have been supplied |
---|
350 | # with the CPU chipset. |
---|
351 | preferred_cpu = (platform.vendor.startswith('Intel') |
---|
352 | or platform.vendor.startswith('Apple')) |
---|
353 | preferred_gpu = (platform.vendor.startswith('Advanced') |
---|
354 | or platform.vendor.startswith('NVIDIA')) |
---|
355 | for device in platform.get_devices(): |
---|
356 | if device.type == cl.device_type.GPU: |
---|
357 | # If the existing type is not GPU then it will be CUSTOM |
---|
358 | # or ACCELERATOR so don't override it. |
---|
359 | if gpu is None or (preferred_gpu and gpu.type == cl.device_type.GPU): |
---|
360 | gpu = device |
---|
361 | elif device.type == cl.device_type.CPU: |
---|
362 | if cpu is None or preferred_cpu: |
---|
363 | cpu = device |
---|
364 | else: |
---|
365 | # System has cl.device_type.ACCELERATOR or cl.device_type.CUSTOM |
---|
366 | # Intel Phi for example registers as an accelerator |
---|
367 | # Since the user installed a custom device on their system |
---|
368 | # and went through the pain of sorting out OpenCL drivers for |
---|
369 | # it, lets assume they really do want to use it as their |
---|
370 | # primary compute device. |
---|
371 | gpu = device |
---|
372 | |
---|
373 | # order the devices by gpu then by cpu; when searching for an available |
---|
374 | # device by data type they will be checked in this order, which means |
---|
375 | # that if the gpu supports double then the cpu will never be used (though |
---|
376 | # we may make it possible to explicitly request the cpu at some point). |
---|
377 | devices = [] |
---|
378 | if gpu is not None: |
---|
379 | devices.append(gpu) |
---|
380 | if cpu is not None: |
---|
381 | devices.append(cpu) |
---|
382 | return [cl.Context([d]) for d in devices] |
---|
383 | |
---|
384 | |
---|
385 | class GpuModel(KernelModel): |
---|
386 | """ |
---|
387 | GPU wrapper for a single model. |
---|
388 | |
---|
389 | *source* and *model_info* are the model source and interface as returned |
---|
390 | from :func:`generate.make_source` and :func:`generate.make_model_info`. |
---|
391 | |
---|
392 | *dtype* is the desired model precision. Any numpy dtype for single |
---|
393 | or double precision floats will do, such as 'f', 'float32' or 'single' |
---|
394 | for single and 'd', 'float64' or 'double' for double. Double precision |
---|
395 | is an optional extension which may not be available on all devices. |
---|
396 | Half precision ('float16','half') may be available on some devices. |
---|
397 | Fast precision ('fast') is a loose version of single precision, indicating |
---|
398 | that the compiler is allowed to take shortcuts. |
---|
399 | """ |
---|
400 | def __init__(self, source, model_info, dtype=generate.F32, fast=False): |
---|
401 | # type: (Dict[str,str], ModelInfo, np.dtype, bool) -> None |
---|
402 | self.info = model_info |
---|
403 | self.source = source |
---|
404 | self.dtype = dtype |
---|
405 | self.fast = fast |
---|
406 | self.program = None # delay program creation |
---|
407 | self._kernels = None |
---|
408 | |
---|
409 | def __getstate__(self): |
---|
410 | # type: () -> Tuple[ModelInfo, str, np.dtype, bool] |
---|
411 | return self.info, self.source, self.dtype, self.fast |
---|
412 | |
---|
413 | def __setstate__(self, state): |
---|
414 | # type: (Tuple[ModelInfo, str, np.dtype, bool]) -> None |
---|
415 | self.info, self.source, self.dtype, self.fast = state |
---|
416 | self.program = None |
---|
417 | |
---|
418 | def make_kernel(self, q_vectors): |
---|
419 | # type: (List[np.ndarray]) -> "GpuKernel" |
---|
420 | if self.program is None: |
---|
421 | compile_program = environment().compile_program |
---|
422 | timestamp = generate.ocl_timestamp(self.info) |
---|
423 | self.program = compile_program( |
---|
424 | self.info.name, |
---|
425 | self.source['opencl'], |
---|
426 | self.dtype, |
---|
427 | self.fast, |
---|
428 | timestamp) |
---|
429 | variants = ['Iq', 'Iqxy', 'Imagnetic'] |
---|
430 | names = [generate.kernel_name(self.info, k) for k in variants] |
---|
431 | kernels = [getattr(self.program, k) for k in names] |
---|
432 | self._kernels = dict((k, v) for k, v in zip(variants, kernels)) |
---|
433 | is_2d = len(q_vectors) == 2 |
---|
434 | if is_2d: |
---|
435 | kernel = [self._kernels['Iqxy'], self._kernels['Imagnetic']] |
---|
436 | else: |
---|
437 | kernel = [self._kernels['Iq']]*2 |
---|
438 | return GpuKernel(kernel, self.dtype, self.info, q_vectors) |
---|
439 | |
---|
440 | def release(self): |
---|
441 | # type: () -> None |
---|
442 | """ |
---|
443 | Free the resources associated with the model. |
---|
444 | """ |
---|
445 | if self.program is not None: |
---|
446 | self.program = None |
---|
447 | |
---|
448 | def __del__(self): |
---|
449 | # type: () -> None |
---|
450 | self.release() |
---|
451 | |
---|
452 | # TODO: check that we don't need a destructor for buffers which go out of scope |
---|
453 | class GpuInput(object): |
---|
454 | """ |
---|
455 | Make q data available to the gpu. |
---|
456 | |
---|
457 | *q_vectors* is a list of q vectors, which will be *[q]* for 1-D data, |
---|
458 | and *[qx, qy]* for 2-D data. Internally, the vectors will be reallocated |
---|
459 | to get the best performance on OpenCL, which may involve shifting and |
---|
460 | stretching the array to better match the memory architecture. Additional |
---|
461 | points will be evaluated with *q=1e-3*. |
---|
462 | |
---|
463 | *dtype* is the data type for the q vectors. The data type should be |
---|
464 | set to match that of the kernel, which is an attribute of |
---|
465 | :class:`GpuProgram`. Note that not all kernels support double |
---|
466 | precision, so even if the program was created for double precision, |
---|
467 | the *GpuProgram.dtype* may be single precision. |
---|
468 | |
---|
469 | Call :meth:`release` when complete. Even if not called directly, the |
---|
470 | buffer will be released when the data object is freed. |
---|
471 | """ |
---|
472 | def __init__(self, q_vectors, dtype=generate.F32): |
---|
473 | # type: (List[np.ndarray], np.dtype) -> None |
---|
474 | # TODO: do we ever need double precision q? |
---|
475 | env = environment() |
---|
476 | self.nq = q_vectors[0].size |
---|
477 | self.dtype = np.dtype(dtype) |
---|
478 | self.is_2d = (len(q_vectors) == 2) |
---|
479 | # TODO: stretch input based on get_warp() |
---|
480 | # not doing it now since warp depends on kernel, which is not known |
---|
481 | # at this point, so instead using 32, which is good on the set of |
---|
482 | # architectures tested so far. |
---|
483 | if self.is_2d: |
---|
484 | # Note: 16 rather than 15 because result is 1 longer than input. |
---|
485 | width = ((self.nq+16)//16)*16 |
---|
486 | self.q = np.empty((width, 2), dtype=dtype) |
---|
487 | self.q[:self.nq, 0] = q_vectors[0] |
---|
488 | self.q[:self.nq, 1] = q_vectors[1] |
---|
489 | else: |
---|
490 | # Note: 32 rather than 31 because result is 1 longer than input. |
---|
491 | width = ((self.nq+32)//32)*32 |
---|
492 | self.q = np.empty(width, dtype=dtype) |
---|
493 | self.q[:self.nq] = q_vectors[0] |
---|
494 | self.global_size = [self.q.shape[0]] |
---|
495 | context = env.get_context(self.dtype) |
---|
496 | #print("creating inputs of size", self.global_size) |
---|
497 | self.q_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, |
---|
498 | hostbuf=self.q) |
---|
499 | |
---|
500 | def release(self): |
---|
501 | # type: () -> None |
---|
502 | """ |
---|
503 | Free the memory. |
---|
504 | """ |
---|
505 | if self.q_b is not None: |
---|
506 | self.q_b.release() |
---|
507 | self.q_b = None |
---|
508 | |
---|
509 | def __del__(self): |
---|
510 | # type: () -> None |
---|
511 | self.release() |
---|
512 | |
---|
513 | class GpuKernel(Kernel): |
---|
514 | """ |
---|
515 | Callable SAS kernel. |
---|
516 | |
---|
517 | *kernel* is the GpuKernel object to call |
---|
518 | |
---|
519 | *model_info* is the module information |
---|
520 | |
---|
521 | *q_vectors* is the q vectors at which the kernel should be evaluated |
---|
522 | |
---|
523 | *dtype* is the kernel precision |
---|
524 | |
---|
525 | The resulting call method takes the *pars*, a list of values for |
---|
526 | the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight) |
---|
527 | vectors for the polydisperse parameters. *cutoff* determines the |
---|
528 | integration limits: any points with combined weight less than *cutoff* |
---|
529 | will not be calculated. |
---|
530 | |
---|
531 | Call :meth:`release` when done with the kernel instance. |
---|
532 | """ |
---|
533 | def __init__(self, kernel, dtype, model_info, q_vectors): |
---|
534 | # type: (cl.Kernel, np.dtype, ModelInfo, List[np.ndarray]) -> None |
---|
535 | q_input = GpuInput(q_vectors, dtype) |
---|
536 | self.kernel = kernel |
---|
537 | self.info = model_info |
---|
538 | self.dtype = dtype |
---|
539 | self.dim = '2d' if q_input.is_2d else '1d' |
---|
540 | # plus three for the normalization values |
---|
541 | self.result = np.empty(q_input.nq+1, dtype) |
---|
542 | |
---|
543 | # Inputs and outputs for each kernel call |
---|
544 | # Note: res may be shorter than res_b if global_size != nq |
---|
545 | env = environment() |
---|
546 | self.queue = env.get_queue(dtype) |
---|
547 | |
---|
548 | self.result_b = cl.Buffer(self.queue.context, mf.READ_WRITE, |
---|
549 | q_input.global_size[0] * dtype.itemsize) |
---|
550 | self.q_input = q_input # allocated by GpuInput above |
---|
551 | |
---|
552 | self._need_release = [self.result_b, self.q_input] |
---|
553 | self.real = (np.float32 if dtype == generate.F32 |
---|
554 | else np.float64 if dtype == generate.F64 |
---|
555 | else np.float16 if dtype == generate.F16 |
---|
556 | else np.float32) # will never get here, so use np.float32 |
---|
557 | |
---|
558 | def __call__(self, call_details, values, cutoff, magnetic): |
---|
559 | # type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> np.ndarray |
---|
560 | context = self.queue.context |
---|
561 | # Arrange data transfer to card |
---|
562 | details_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, |
---|
563 | hostbuf=call_details.buffer) |
---|
564 | values_b = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, |
---|
565 | hostbuf=values) |
---|
566 | |
---|
567 | kernel = self.kernel[1 if magnetic else 0] |
---|
568 | args = [ |
---|
569 | np.uint32(self.q_input.nq), None, None, |
---|
570 | details_b, values_b, self.q_input.q_b, self.result_b, |
---|
571 | self.real(cutoff), |
---|
572 | ] |
---|
573 | #print("Calling OpenCL") |
---|
574 | #call_details.show(values) |
---|
575 | # Call kernel and retrieve results |
---|
576 | wait_for = None |
---|
577 | last_nap = time.clock() |
---|
578 | step = 1000000//self.q_input.nq + 1 |
---|
579 | for start in range(0, call_details.num_eval, step): |
---|
580 | stop = min(start + step, call_details.num_eval) |
---|
581 | #print("queuing",start,stop) |
---|
582 | args[1:3] = [np.int32(start), np.int32(stop)] |
---|
583 | wait_for = [kernel(self.queue, self.q_input.global_size, None, |
---|
584 | *args, wait_for=wait_for)] |
---|
585 | if stop < call_details.num_eval: |
---|
586 | # Allow other processes to run |
---|
587 | wait_for[0].wait() |
---|
588 | current_time = time.clock() |
---|
589 | if current_time - last_nap > 0.5: |
---|
590 | time.sleep(0.05) |
---|
591 | last_nap = current_time |
---|
592 | cl.enqueue_copy(self.queue, self.result, self.result_b) |
---|
593 | #print("result", self.result) |
---|
594 | |
---|
595 | # Free buffers |
---|
596 | for v in (details_b, values_b): |
---|
597 | if v is not None: |
---|
598 | v.release() |
---|
599 | |
---|
600 | pd_norm = self.result[self.q_input.nq] |
---|
601 | scale = values[0]/(pd_norm if pd_norm != 0.0 else 1.0) |
---|
602 | background = values[1] |
---|
603 | #print("scale",scale,values[0],self.result[self.q_input.nq],background) |
---|
604 | return scale*self.result[:self.q_input.nq] + background |
---|
605 | # return self.result[:self.q_input.nq] |
---|
606 | |
---|
607 | def release(self): |
---|
608 | # type: () -> None |
---|
609 | """ |
---|
610 | Release resources associated with the kernel. |
---|
611 | """ |
---|
612 | for v in self._need_release: |
---|
613 | v.release() |
---|
614 | self._need_release = [] |
---|
615 | |
---|
616 | def __del__(self): |
---|
617 | # type: () -> None |
---|
618 | self.release() |
---|