由于看似缺乏适合CUDA的2D直方图(我可以找到...指针欢迎),我正在尝试用pyCUDA自己实现它。
这是直方图的样子(使用Numpy):
这是我到目前为止所得到的:
code = '''
__global__ void histogram2d(const float *in_x, const float *in_y, const float *in_w, float *out) {{
int start = blockIdx.x * blockDim.x + threadIdx.x;
float *block_out = &out[{xres} * {yres} * {num_chans} * blockIdx.x];
for(int i = 0; i < {length}; i++) {{
float x = in_x[start + i];
float y = in_y[start + i];
int w_idx = (start + i) * {num_chans};
int xbin = (int) (((x - {xmin}) / {xptp}) * {xres});
int ybin = (int) (((y - {ymin}) / {yptp}) * {yres});
if (0 <= xbin && xbin < {xres} && 0 <= ybin && ybin < {yres}) {{
for(int c = 0; c < {num_chans}; c++) {{
atomicAdd(&block_out[(ybin * {xres} + xbin) * {num_chans} + c], in_w[w_idx + c]);
}}
}}
}}
}}
'''.format(**args)
------
__global__ void histogram2d(const float *in_x, const float *in_y, const float *in_w, float *out) {
int start = blockIdx.x * blockDim.x + threadIdx.x;
float *block_out = &out[50 * 50 * 4 * blockIdx.x];
for(int i = 0; i < 100; i++) {
float x = in_x[start + i];
float y = in_y[start + i];
int w_idx = (start + i) * 4;
int xbin = (int) (((x - -10.0) / 20.0) * 50);
int ybin = (int) (((y - -10.0) / 20.0) * 50);
if (0 <= xbin && xbin < 50 && 0 <= ybin && ybin < 50) {
for(int c = 0; c < 4; c++) {
atomicAdd(&block_out[(ybin * 50 + xbin) * 4 + c], in_w[w_idx + c]);
}
}
}
}
索引似乎有问题,但之前我没有做过很多纯粹的CUDA,所以我不知道它是什么。这就是我认为等效的python:
def slow_hist(in_x, in_y, in_w, out, blockx, blockdimx, threadx):
start = blockx * blockdimx + threadx
block_out_addr = args['xres'] * args['yres'], args['num_chans'] * blockx
for i in range(args['length']):
x = in_x[start + i]
y = in_y[start + i]
w_idx = (start + i) * args['num_chans']
xbin = int(((x - args['xmin']) / args['xptp']) * args['xres'])
ybin = int(((y - args['ymin']) / args['yptp']) * args['yres'])
if 0 <= xbin < args['xres'] and 0 <= ybin < args['yres']:
for c in range(args['num_chans']):
out[(ybin * args['xres'] + xbin) * args['num_chans'] + c] += in_w[w_idx + c]
所有代码都是可见的,包括这些图像,at the Github page of this notebook(此单元格位于底部)。
我在这个CUDA代码中做错了什么?我已经尝试了很多小的调整(通过1,4,8,16转换atomicAdd地址,转置输出等),但似乎我遗漏了一些微妙的东西,可能是指针算法是如何工作的。任何帮助将不胜感激。