嘿,我正在尝试编写一个内核,基本上在C
中执行以下操作 float sum = 0.0;
for(int i = 0; i < N; i++){
sum += valueArray[i]*valueArray[i];
}
sum += sum / N;
目前我在内核中有这个,但它没有提供正确的值。
int i0 = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=i0; i<N; i += blockDim.x*gridDim.x){
*d_sum += d_valueArray[i]*d_valueArray[i];
}
*d_sum= __fdividef(*d_sum, N);
用于调用内核的代码是
kernelName<<<64,128>>>(N, d_valueArray, d_sum);
cudaMemcpy(&sum, d_sum, sizeof(float) , cudaMemcpyDeviceToHost);
我认为每个内核都在计算一个部分和,但是最后的除法语句没有考虑每个线程的累加值。每个内核都为d_sum生成它自己的最终值?
有谁知道我怎样才能以有效的方式做到这一点?也许在线程之间使用共享内存?我是GPU编程的新手。干杯
答案 0 :(得分:2)
您正在从多个线程更新d_sum。
请参阅以下SDK示例:
http://developer.download.nvidia.com/compute/cuda/sdk/website/samples.html
以下是该示例中的代码。请注意这是一个两步过程。在尝试累积最终结果之前,将每个线程块和__syncthreads求和。
#define ACCUM_N 1024
__global__ void scalarProdGPU(
float *d_C,
float *d_A,
float *d_B,
int vectorN,
int elementN
){
//Accumulators cache
__shared__ float accumResult[ACCUM_N];
////////////////////////////////////////////////////////////////////////////
// Cycle through every pair of vectors,
// taking into account that vector counts can be different
// from total number of thread blocks
////////////////////////////////////////////////////////////////////////////
for(int vec = blockIdx.x; vec < vectorN; vec += gridDim.x){
int vectorBase = IMUL(elementN, vec);
int vectorEnd = vectorBase + elementN;
////////////////////////////////////////////////////////////////////////
// Each accumulator cycles through vectors with
// stride equal to number of total number of accumulators ACCUM_N
// At this stage ACCUM_N is only preferred be a multiple of warp size
// to meet memory coalescing alignment constraints.
////////////////////////////////////////////////////////////////////////
for(int iAccum = threadIdx.x; iAccum < ACCUM_N; iAccum += blockDim.x){
float sum = 0;
for(int pos = vectorBase + iAccum; pos < vectorEnd; pos += ACCUM_N)
sum += d_A[pos] * d_B[pos];
accumResult[iAccum] = sum;
}
////////////////////////////////////////////////////////////////////////
// Perform tree-like reduction of accumulators' results.
// ACCUM_N has to be power of two at this stage
////////////////////////////////////////////////////////////////////////
for(int stride = ACCUM_N / 2; stride > 0; stride >>= 1){
__syncthreads();
for(int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x)
accumResult[iAccum] += accumResult[stride + iAccum];
}
if(threadIdx.x == 0) d_C[vec] = accumResult[0];
}
}