我正在启动一个包含512个线程的线性块的内核。与每个线程相关联的是六个双精度值(两个3元素向量),我想存储在共享内存中,总共512 * 6 * 8 = 24576个字节。我想创建指向共享的中间元素的指针,以便将所有向量排成行,如下所示:
__global__ void my_kernel(double *global_data) {
extern __shared__ double shr[];
id = threadIdx.x;
double *X = &shr[id*3];
double *Y = &shr[(id+1)*3];
// Some arithmetic to set X[0:3] ad Y[0:3]
// Now I have a small for loop to compute something for each thread
for (int i = 0; i < 3; i++) {
for (int j=0; j < 3; j++) {
// Some computations involving the X and Y vectors
}
}
我的问题是使用循环索引访问X和Y中的值。在第一次循环迭代期间,我无法解释以下行为:
(cuda-gdb) cuda thread
thread (0,0,0)
(cuda-gdb) p shr[0]
$1 = 0.62293193093894383
(cuda-gdb) p &shr[0]
$2 = (@shared double *) 0x0
(cuda-gdb) p X[0]
$3 = 0.62293193093894383
(cuda-gdb) p &X[0]
$4 = (@generic double *) 0x1000000
(cuda-gdb) p X
$5 = (@generic double * @register) 0x1000000
我认为这很正常。但那时:
(cuda-gdb) p i == 0
$7 = true
(cuda-gdb) p X[i]
Error: Failed to read global memory at address 0x0 on device 0 sm 0 warp 0 lane 0 (error=7).
为什么当i == 0时我可以访问X [0]但不能访问X [i]?
编辑:这是一个完整的工作示例,展示了我的问题:
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
from pycuda.compiler import SourceModule
from math import pi
mydat = np.arange(12).astype(np.float64)
mydat_gpu = gpuarray.to_gpu(mydat)
mod = SourceModule("""
__global__ void my_kernel(double *mydat) {
extern __shared__ double shr[];
int id = threadIdx.x;
double *X = &shr[(id * 6)];
double *Y = &shr[(id * 6) + 3];
X[0] = mydat[0];
X[1] = mydat[1];
X[2] = mydat[2];
Y[0] = mydat[3];
Y[1] = mydat[4];
Y[2] = mydat[5];
__syncthreads();
double result;
for (int i = 0; i < 3; i++) {
result += X[i] + Y[i];
}
}
""")
my_kernel = mod.get_function("my_kernel")
blk = (1,1,1)
grd = (1,1,1)
my_kernel(mydat_gpu, grid=grd, block=blk, shared=(8*6))
此时我启动了一个调试会话:
cuda-gdb --args python -m pycuda.debug minimal_working_example.py
(cuda-gdb) b my_kernel
Function "my_kernel" not defined.
Make breakpoint pending on future shared library load? (y or [n]) y
Breakpoint 1 (my_kernel) pending.
(cuda-gdb) run
[Switching focus to CUDA kernel 0, grid 1, block (0,0,0), thread (0,0,0), device 0, sm 0, warp 0, lane 0]
Breakpoint 1, my_kernel(double * @generic)<<<(1,1,1),(1,1,1)>>> (mydat=0x13034a0000)
at kernel.cu:5
5 int id = threadIdx.x;
(cuda-gdb) n
7 double *X = &shr[(id * 6)];
(cuda-gdb) p id
$1 = 0
(cuda-gdb) p id * 6
$2 = 0
(cuda-gdb) n
8 double *Y = &shr[(id * 6) + 3];
(cuda-gdb) p (id * 6) + 3
$3 = 3
(cuda-gdb) n
10 X[0] = mydat[0];
(cuda-gdb) n
11 X[1] = mydat[1];
(cuda-gdb) n
12 X[2] = mydat[2];
(cuda-gdb) n
13 Y[0] = mydat[3];
(cuda-gdb) n
14 Y[1] = mydat[4];
(cuda-gdb) n
15 Y[2] = mydat[5];
(cuda-gdb) p X
$4 = (@generic double * @register) 0x1000000
(cuda-gdb) p X[0]
$5 = 0
(cuda-gdb) p X[1]
$6 = 1
(cuda-gdb) p Y[0]
$7 = 3
(cuda-gdb) p Y[1]
$8 = 4
(cuda-gdb) n
18 __syncthreads();
(cuda-gdb) n
22 for (int i = 0; i < 3; i++) {
(cuda-gdb) n
23 result += X[i] + Y[i];
(cuda-gdb) p i
$9 = 0
(cuda-gdb) p X[0]
$10 = 0
(cuda-gdb) p X[i]
Error: Failed to read global memory at address 0x0 on device 0 sm 0 warp 0 lane 0 (error=7).
答案 0 :(得分:0)
这里发生的一切都是您正在逐步执行尚未编译到正在运行的内核中的源指令。您要检查的变量已经超出范围,调试器无法再显示给您。
这是由于设备代码编译器中的积极优化。在您的示例中,求和循环不会产生影响对全局或共享内存的写入的输出,因此编译器只是将其消除。当逐步完成优化代码时,源代码调试器尽力在源代码和执行代码之间显示1:1的关系,但并不总是可行,这就是你看到的有些令人困惑的结果。
您可以通过使用nvcc将内核代码编译为PTX并检查代码来自行确认:
// .globl _Z9my_kernelPd
.visible .entry _Z9my_kernelPd(
.param .u64 _Z9my_kernelPd_param_0
)
{
.reg .b32 %r<3>;
.reg .f64 %fd<7>;
.reg .b64 %rd<6>;
ld.param.u64 %rd1, [_Z9my_kernelPd_param_0];
cvta.to.global.u64 %rd2, %rd1;
mov.u32 %r1, %tid.x;
mul.lo.s32 %r2, %r1, 6;
mul.wide.s32 %rd3, %r2, 8;
mov.u64 %rd4, shr;
add.s64 %rd5, %rd4, %rd3;
ld.global.nc.f64 %fd1, [%rd2];
ld.global.nc.f64 %fd2, [%rd2+8];
ld.global.nc.f64 %fd3, [%rd2+16];
ld.global.nc.f64 %fd4, [%rd2+24];
ld.global.nc.f64 %fd5, [%rd2+32];
ld.global.nc.f64 %fd6, [%rd2+40];
st.shared.f64 [%rd5], %fd1;
st.shared.f64 [%rd5+8], %fd2;
st.shared.f64 [%rd5+16], %fd3;
st.shared.f64 [%rd5+24], %fd4;
st.shared.f64 [%rd5+32], %fd5;
st.shared.f64 [%rd5+40], %fd6;
bar.sync 0;
ret;
}
您可以看到最后一条PTX指令是bar
,这是__syncthreads()
设备功能发出的指令。求和的循环不存在。
如果我像这样修改你的来源:
__global__ void my_kernel2(double *mydat, double *out) {
extern __shared__ double shr[];
int id = threadIdx.x;
double *X = &shr[(id * 6)];
double *Y = &shr[(id * 6) + 3];
X[0] = mydat[0];
X[1] = mydat[1];
X[2] = mydat[2];
Y[0] = mydat[3];
Y[1] = mydat[4];
Y[2] = mydat[5];
__syncthreads();
double result;
for (int i = 0; i < 3; i++) {
result += X[i] + Y[i];
}
*out = result;
}
以便result
现在存储到全局内存并将其编译为PTX:
.visible .entry _Z10my_kernel2PdS_(
.param .u64 _Z10my_kernel2PdS__param_0,
.param .u64 _Z10my_kernel2PdS__param_1
)
{
.reg .b32 %r<3>;
.reg .f64 %fd<20>;
.reg .b64 %rd<8>;
ld.param.u64 %rd3, [_Z10my_kernel2PdS__param_0];
ld.param.u64 %rd2, [_Z10my_kernel2PdS__param_1];
cvta.to.global.u64 %rd4, %rd3;
mov.u32 %r1, %tid.x;
mul.lo.s32 %r2, %r1, 6;
mul.wide.s32 %rd5, %r2, 8;
mov.u64 %rd6, shr;
add.s64 %rd1, %rd6, %rd5;
ld.global.f64 %fd1, [%rd4];
ld.global.f64 %fd2, [%rd4+8];
ld.global.f64 %fd3, [%rd4+16];
ld.global.f64 %fd4, [%rd4+24];
ld.global.f64 %fd5, [%rd4+32];
ld.global.f64 %fd6, [%rd4+40];
st.shared.f64 [%rd1], %fd1;
st.shared.f64 [%rd1+8], %fd2;
st.shared.f64 [%rd1+16], %fd3;
st.shared.f64 [%rd1+24], %fd4;
st.shared.f64 [%rd1+32], %fd5;
st.shared.f64 [%rd1+40], %fd6;
bar.sync 0;
ld.shared.f64 %fd7, [%rd1];
ld.shared.f64 %fd8, [%rd1+24];
add.f64 %fd9, %fd7, %fd8;
add.f64 %fd10, %fd9, %fd11;
ld.shared.f64 %fd12, [%rd1+8];
ld.shared.f64 %fd13, [%rd1+32];
add.f64 %fd14, %fd12, %fd13;
add.f64 %fd15, %fd10, %fd14;
ld.shared.f64 %fd16, [%rd1+16];
ld.shared.f64 %fd17, [%rd1+40];
add.f64 %fd18, %fd16, %fd17;
add.f64 %fd19, %fd15, %fd18;
cvta.to.global.u64 %rd7, %rd2;
st.global.f64 [%rd7], %fd19;
ret;
}
您可以看到PTX中现在存在(urolled)循环,并且调试器行为应该更接近您尝试它时的预期。
正如评论中所建议的那样,由于编译器优化引起的复杂性,你不应该花时间去分析任何不会改变块或全局状态的代码。