我有以下问题。我已经实现了几种不同的并行约简算法,如果我每个内核只减少一个值,它们都能正常工作。但现在我需要减少几个(21),我只是不知道为什么它有时候工作,有时候不工作。
执行的步骤是:
以下是完整的代码,您只需cpy& pst并运行。
#include <stdio.h>
#include <cuda_runtime.h>
// switch the compiler flag if you don't have the sdk's helper_cuda.h file
#if 1
#include "helper_cuda.h"
#else
#define checkCudaErrors(val) (val)
#define getLastCudaError(msg)
#endif
#ifdef __CDT_PARSER__
#define __global__
#define __device__
#define __shared__
#define __host__
#endif
// compute sum of val over num threads
__device__ float localSum(const float& val, volatile float* reductionSpace, const uint& localId)
{
reductionSpace[localId] = val; // load data into shared mem
__syncthreads();
// complete loop unroll
if (localId < 128) reductionSpace[localId] += reductionSpace[localId + 128];
__syncthreads();
if (localId < 64) reductionSpace[localId] += reductionSpace[localId + 64];
__syncthreads();
// within one warp (=32 threads) instructions are SIMD synchronous
// -> __syncthreads() not needed
if (localId < 32)
{
reductionSpace[localId] += reductionSpace[localId + 32];
reductionSpace[localId] += reductionSpace[localId + 16];
reductionSpace[localId] += reductionSpace[localId + 8];
reductionSpace[localId] += reductionSpace[localId + 4];
reductionSpace[localId] += reductionSpace[localId + 2];
reductionSpace[localId] += reductionSpace[localId + 1];
}
## Edit: Here we need to sync in order to guarantee that the thread with ID 0 is also done... ##
__syncthreads();
return reductionSpace[0];
}
__global__ void d_kernel(float* od, int n)
{
extern __shared__ float reductionSpace[];
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int linId = threadIdx.x;
__shared__ float partialSums[21];
float tmp[6] =
{ 0, 0, 0, 0, 0, 0 };
// for simplification all computations are remove - this version still shows the same behaviour
if (g_idx < n)
{
tmp[0] = 1.0f;
tmp[1] = 1.0f;
tmp[2] = 1.0f;
tmp[3] = 1.0f;
tmp[4] = 1.0f;
tmp[5] = 1.0f;
}
float res = 0.0f;
int c = 0;
for (int i = 0; i < 6; ++i)
{
for (int j = i; j < 6; ++j, ++c)
{
res = tmp[i] * tmp[j];
// compute the sum of the values res for blockDim.x threads. This uses
// the shared memory reductionSpace for calculations
partialSums[c] = localSum(res, reductionSpace, linId);
}
}
__syncthreads();
// write back the sum values for this block
if (linId < 21)
{
atomicAdd(&od[linId], partialSums[linId]);
}
}
int main()
{
int w = 320;
int h = 240;
int n = w * h;
// ------------------------------------------------------------------------------------
float *d_out;
checkCudaErrors(cudaMalloc(&d_out, 21 * sizeof(float)));
float* h_out = new float[21];
int dimBlock = 256;
int dimGrid = (n - 1) / dimBlock + 1;
int sharedMemSize = dimBlock * sizeof(float);
printf("w: %d\n", w);
printf("h: %d\n", h);
printf("dimBlock: %d\n", dimBlock);
printf("dimGrid: %d\n", dimGrid);
printf("sharedMemSize: %d\n", sharedMemSize);
int failcounter = 0;
float target = (float) n;
int c = 0;
// ------------------------------------------------------------------------------------
// run the kernel for 200 times
for (int run = 0; run < 200; ++run)
{
cudaMemset(d_out, 0, 21 * sizeof(float));
d_kernel<<<dimGrid, dimBlock, sharedMemSize>>>(d_out, n);;
getLastCudaError("d_kernel");
checkCudaErrors(cudaMemcpy(h_out, d_out, 21 * sizeof(float), cudaMemcpyDeviceToHost));
// check if the output has target value
// since all threads get value 1 the kernel output corresponds to counting the elements which is w*h=n
bool failed = false;
for (int i = 0; i < 21; ++i)
{
if (abs(h_out[i] - target) > 0.01f)
{
++failcounter;
failed = true;
}
}
// if failed, print the elements to show which one failed
if (failed)
{
c = 0;
for (int i = 0; i < 6; ++i)
{
for (int j = i; j < 6; ++j, ++c)
{
printf("%10.7f ", h_out[c]);
}
printf("\n");
}
}
}
printf("failcounter: %d\n", failcounter);
// ------------------------------------------------------------------------------------
delete[] h_out;
checkCudaErrors(cudaFree(d_out));
// ------------------------------------------------------------------------------------
return 0;
}
一些意见:
BlockSize 始终 256 - 因此 localSum()中的展开循环会检查正确的threadIds。 就像开头提到的那样,在200次运行中它有时完全正确,有时只有2个值是错误的,有时150左右是错误的。
并且它不需要具有浮点精度的任何东西,因为只有 1x1 被乘以并存储在d_kernel()中的变量 res 中。我可以清楚地看到,有时只是某些线程或块没有开始,但我不知道为什么。 :/
仅仅从结果来看,显然存在某种竞争条件,但我根本看不出问题。
有谁知道问题出在哪里?
我现在测试了很多东西,我发现它必须对 BlockSize 做一些事情。如果我将其缩小为&lt; = 64 并相应地更改 localSum(),则所有内容始终按预期工作。
但那对我来说根本没有意义?!除了使用共享内存进行正常的并行缩减之外,我仍然没有做任何其他事情,唯一的区别就是每个线程执行21次。
现在我完全糊涂了。 问题是展开循环!! 或者更好地说同步warp 。以下 localSum()代码有效:
// compute sum of val over num threads
__device__ float localSum(const float& val, volatile float* reductionSpace, const uint& localId)
{
reductionSpace[localId] = val; // load data into shared mem
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (localId < s)
{
reductionSpace[localId] += reductionSpace[localId + s];
}
__syncthreads();
}
return reductionSpace[0];
}
但是如果我展开最后一个warp并且不在线程之间进行同步,我有时会再次得到2000次运行中的2或3个错误结果。 因此,以下代码执行 NOT :
// compute sum of val over num threads
__device__ float localSum(const float& val, volatile float* reductionSpace, const uint& localId)
{
reductionSpace[localId] = val; // load data into shared mem
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (localId < s)
{
reductionSpace[localId] += reductionSpace[localId + s];
}
__syncthreads();
}
if (localId < 32)
{
reductionSpace[localId] += reductionSpace[localId + 32];
reductionSpace[localId] += reductionSpace[localId + 16];
reductionSpace[localId] += reductionSpace[localId + 8];
reductionSpace[localId] += reductionSpace[localId + 4];
reductionSpace[localId] += reductionSpace[localId + 2];
reductionSpace[localId] += reductionSpace[localId + 1];
}
return reductionSpace[0];
}
但是这有什么用呢,因为CUDA同时执行一个warp(32个线程)并且不需要__syncthreads()?!
我不需要有人在这里发布我的工作代码,但我真的要求有很多经验和CUDA编程知识的人在这里描述我的潜在问题。或者至少给我一个提示。
答案 0 :(得分:1)
解决方案非常简单,我几乎不敢羞于告诉它。我是如此盲目,到处寻找但不是最明显的代码。在localSum()中的return语句之前缺少一个简单的__syncthreads()。 Bc最后一个warp本身是同时执行的,但是不能保证带有threadID 0的那个完成...这样一个愚蠢的错误,我只是没有看到它。
抱歉所有麻烦.. :))