我有一个程序来计算给定数据点(pos)对其余数据的潜力/力。它最初是用Cython编写的,我尝试使用PyOpenCL(在我的2013 Macbook Pro上将设备设置为Intel(R)Core(TM)i7-4750HQ CPU @ 2.00GHz)希望提高速度,但结果实际上很多比Cython慢。此外,Cython版本是双精度,而CL只是浮动。结果证实是相同的。
ipython笔记本如下,对于2mil x 2数据,PyOpenCL需要176ms而Cython只使用82ms。有没有办法优化和减少开销?非常感谢
from __future__ import division
import numpy as np
import pyopencl as cl
import pyopencl.array
import math
import time
%load_ext pyopencl.ipython_ext
%load_ext Cython
%pylab inline
# prepare data
datad = np.random.rand(2000000,2)-[0.5, 0.5] # Double
data = datad.astype(np.float32)
N, dim = data.shape[0], data.shape[1]
sigma = 0.04
i = 2
pos = np.array(data[i,:]) # float
posd = np.array(datad[i,:]) #double
dt = 0.005
resistant = 0.9995
kernelsource = """
__kernel void forceFinder(
const int N,
const int dim,
const float sigma,
__global float* datacl,
__global float* poscl,
__global float* res)
{
int i = get_global_id(0); // Global id;
float f_sum ;
int k;
float sigma2 = sigma * sigma;
if (i < N) {
f_sum = 0.0;
for (k = 0; k < dim; k++)
{
f_sum += (poscl[k] - datacl[i * dim + k]) * (poscl[k] - datacl[i * dim + k]);
}
for (k = 0; k < dim; k++)
{
res[i * dim + k] = (datacl[i * dim + k] - poscl[k]) * exp(-f_sum/sigma2)/sigma2;
}
}
}
"""
# Setup PyOpenCl
platform = cl.get_platforms()[0]
device = platform.get_devices()[0] # Get the GPU ID
ctx = cl.Context([device]) # Tell CL to use GPU
queue = cl.CommandQueue(ctx) # Create a command queue for the target device.
program = cl.Program(ctx, kernelsource).build()
size = N * dim
datacl = data.reshape((size,))
rescl = np.empty(size).astype(np.float32)
rescl.fill(0.0)
datacl_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = datacl)
pos_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = pos)
rescl_buf = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = rescl)
forceFinder = program.forceFinder
forceFinder.set_scalar_arg_dtypes([np.uint32, np.uint32, np.float32, None, None, None])
globalrange = (N, dim)
localrange = None
# Run CL
t0 = time.time()
forceFinder(queue, globalrange, localrange, N, dim, sigma, datacl_buf, \
pos_buf, rescl_buf)
queue.finish()
cl.enqueue_copy(queue, rescl, rescl_buf)
result = rescl.reshape((N,dim))
t1 = time.time()
print (t1-t0)
# Now Cython
%%cython
cimport numpy as np
import numpy as np
from libc.stdlib cimport rand, malloc, free
from libc.math cimport exp
def PTSM(np.ndarray[np.float64_t, ndim = 1] position, np.ndarray[np.float64_t, ndim = 2] X,\
double sigma=0.25,\
double dt=0.01, double r=0.99, int Nsamp=1000):
cdef int N, dim
N, dim = X.shape[0], X.shape[1]
cdef int i,j, steps # These 3 are for iterations.
cdef double sigma2
cdef double force1p_sum
cdef double *force = <double *>malloc(dim * sizeof(double))
sigma2 = sigma * sigma
#--------------------
# Force
# for steps in range(Nsamp):
for i in range(dim):
force[i] = 0
for j in range (N):
for i in range (dim):
force1p_sum += (position[i] - X[j,i]) * (position[i] - X[j,i])
for i in range (dim):
force[i] += ( X[j,i] - position[i]) * exp(- force1p_sum /sigma2) / sigma2
force1p_sum = 0
resultForce = np.zeros(dim)
for i in range(dim):
resultForce[i] = force[i]
free(force)
return resultForce
t0 = time.time()
f = PTSM(posd, datad, sigma, dt, resistant)
t1 = time.time()
print (t1 - t0)
答案 0 :(得分:2)
您的全球范围是globalrange = (N, dim)
。在内部,您只使用get_global_id(0)
并在for循环中循环dim
。
如此有效地使用N*dim*dim
vs N*dim
,一个额外的暗淡维度操作不影响输出(2个内部线程正在执行相同的工作并将相同的内容写入输出)。这是有道理的:176ms vs 82ms
几乎翻倍。使用这两种方法可以获得相同的硬件和相同的设备利用率,因此看似合乎逻辑。
此外,还有一些优化:
我不会在复制之前使用queue.finish()
。因为这会导致CL设备的隐式阻止。
此:
f_sum += (poscl[k] - datacl[i * dim + k]) * (poscl[k] - datacl[i * dim + k]);
f_sum += pown(poscl[k] - datacl[i * dim + k]), 2);
更改数据形状以使其具有合并访问权限。目前,每个工作项在小步幅中访问i*k
的矩阵。虽然以昏暗的市长顺序形成的矩阵可以提供合并的访问。将其从i*k
更改为k*i
。
poscl
应该是常量且只读。
您应该先计算poscl-datacl
。将其存储在专用阵列中。然后在2个循环中使用它。避免额外的全局读取。
模式代码(未测试):注意:我没有添加矩阵排序更改。
# prepare data
datad = np.random.rand(2000000,2)-[0.5, 0.5] # Double
data = datad.astype(np.float32)
N, dim = data.shape[0], data.shape[1]
sigma = 0.04
i = 2
pos = np.array(data[i,:]) # float
posd = np.array(datad[i,:]) #double
dt = 0.005
resistant = 0.9995
kernelsource = """
__kernel void forceFinder(
const int N,
const int dim,
const float sigma,
__global float* datacl,
__constant float* poscl,
__global float* res)
{
int i = get_global_id(0); // Global id;
float f_sum ;
int k;
float sigma2 = sigma * sigma;
if (i < N) {
f_sum = 0.0;
float t[2]; //INCREASE TO THE MAX "DIM" POSSIBLE
for (k = 0; k < dim; k++){
t = poscl[k] - datacl[i * dim + k];
}
for (k = 0; k < dim; k++){
f_sum += pown(t,2);
}
for (k = 0; k < dim; k++){
res[i * dim + k] = (-t) * exp(-f_sum/sigma2)/sigma2;
}
}
}
"""
# Setup PyOpenCl
platform = cl.get_platforms()[0]
device = platform.get_devices()[0] # Get the GPU ID
ctx = cl.Context([device]) # Tell CL to use GPU
queue = cl.CommandQueue(ctx) # Create a command queue for the target device.
program = cl.Program(ctx, kernelsource).build()
size = N * dim
datacl = data.reshape((size,))
rescl = np.empty(size).astype(np.float32)
rescl.fill(0.0)
datacl_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = datacl)
pos_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = pos)
rescl_buf = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY, rescl.nbytes)
forceFinder = program.forceFinder
forceFinder.set_scalar_arg_dtypes([np.uint32, np.uint32, np.float32, None, None, None])
globalrange = (N, 1)
localrange = None
# Run CL
t0 = time.time()
forceFinder(queue, globalrange, localrange, N, dim, sigma, datacl_buf, \
pos_buf, rescl_buf)
cl.enqueue_copy(queue, rescl, rescl_buf)
queue.finish()
result = rescl.reshape((N,dim))
t1 = time.time()
print (t1-t0)