我正在使用CUDA来计算out = C(b(A(in)))
,其中函数A
和C
是卷积,而b
是逐元素函数。一个玩具示例是:
#define N 1000
__device__ float b(float d_in){return min(d_in + 10.0f, 100.0f);}
__global__ void bA(float *d_in, float *d_out){
const int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x >= N) return;
// replicate boundary
int x_left = max(x-1, 0); int x_right = min(x+1, N-1);
d_out[x] = b( d_in[x_left] + d_in[x] + d_in[x_right] );
}
__global__ void C(float *d_in, float *d_out){
const int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x >= N) return;
// replicate boundary
int x_left = max(x-1, 0); int x_right = min(x+1, N-1);
d_out[x] = d_in[x_left] + d_in[x] + d_in[x_right];
}
void myfunc(float *d_data, float *d_temp){
dim3 threads(256);
dim3 blocks( (N + threads.x - 1) / threads.x ); // divide up
// kernels that I would like to merge into one:
bA<<<blocks, threads>>>(d_data, d_temp);
C <<<blocks, threads>>>(d_temp, d_data);
}
像这样进行计算需要一个我不希望的附加变量d_temp
。因此,我想将这些内核合并为一个内核,即用于计算C(b(A(in)))
的一个内核。
一个难题是,如何保存b(A(in))
的临时结果,然后执行卷积函数C()
?我曾尝试使用共享内存,但是在如何将临时结果b(A(in))
加载到共享内存上却迷失了方向。例如:
#define BLOCK_SIZE 32
__global__ void CbA(float *d_in, float *d_out){
const int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x >= N) return;
// replicate boundary
int x_left = max(x-1, 0); int x_right = min(x+1, N-1);
// temp result for b(A(in))
float temp = b( d_in[x_left] + d_in[x] + d_in[x_right] );
// shared memory for convolution (stencil size of 3)
__shared__ float shmem[BLOCK_SIZE+2];
// load center part to shared memory
shmem[threadIdx.x+1] = temp;
// but how to load boundary parts from temp to shmem?
// ...
__syncthreads();
// perform function C()
// ...
}
任何建议或提示都将受到高度赞赏。
答案 0 :(得分:2)
首先评论
// load center part to shared memory
shmem[threadIdx.x+1] = temp;
我会称其为保存到共享内存...
除了一些想法:
b(A(in))
当然,您必须在计算x
(const int x = threadIdx.x + blockIdx.x * (blockDim.x-2);
)时考虑这一点,并使用更多线程/块调用内核。
执行C()
时,每个块将有两个线程空闲。但这不会产生很大的影响。
这是内核。如果您尝试可视化计算流程,就更容易理解。
__global__ void CbA(float *d_in, float *d_out)
{
const int x = threadIdx.x + blockIdx.x * (blockDim.x - 2);
if (x >= N) return;
int x_left = max(x-1, 0); int x_right = min(x+1, N-1);
float temp = b( d_in[x_left] + d_in[x] + d_in[x_right] );
__shared__ float shmem[BLOCK_SIZE]; // = 256
shmem[threadIdx.x] = temp;
__syncthreads();
if (threadIdx.x > 0 && threadIdx.x < blockDim.x-1)
d_out[x-1] = shmem[threadIdx.x-1] + d_in[threadIdx.x] + d_in[threadIdx.x+1];
}
b(A())
但是,对于每个块,您将仅使用32个线程中的1个进行该计算。最坏的情况是整个SM在附加计算时的比率为1/32。
...
// but how to load boundary parts from temp to shmem?
if (threadIdx.x == 0)
{
{
const int x = 0 + blockIdx.x * blockDim.x;
int x_left = max(x-1, 0); int x_right = min(x+1, N-1);
float temp = b( d_in[x_left] + d_in[x] + d_in[x_right] );
shmem[0] = temp;
}
{
const int x = blockDim.x-1 + blockIdx.x * blockDim.x;
int x_left = max(x-1, 0); int x_right = min(x+1, N-1);
float temp = b( d_in[x_left] + d_in[x] + d_in[x_right] );
shmem[blockDim.x-1] = temp;
}
}
// perform function C()
...
(至少在您的简化示例中)temp
的值是非常简单的计算结果。也许最好计算在该线程本地线程中执行C()
所需的所有值。
__global__ void CbA(float *d_in, float *d_out)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x >= N) return;
float temp[3];
for (int i(0); i < 3; ++i)
{
int x_left = max(x-1-1+i, 0); int x_right = min(x+1-1+i, N-1);
temp[i] = b( d_in[x_left] + d_in[x-1+i] + d_in[x_right] );
}
// perform function C()
...
}