CUDA内存分配问题与大图像

时间:2013-02-01 09:03:17

标签: c++ cuda

我有一个从图像制作直方图的功能(给定的顺序版本(家庭作业))

CImg< unsigned char > histogramImage = CImg< unsigned char >(BAR_WIDTH * HISTOGRAM_SIZE, HISTOGRAM_SIZE, 1, 1);
unsigned int *histogram;
histogram = (unsigned int *)malloc(HISTOGRAM_SIZE * sizeof(unsigned int));
 memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(unsigned int));

cudaMemset(gpuImage, 0, grayImage.width() * grayImage.height() * sizeof(unsigned char));

cuda_err = cudaMemcpy(gpuImage, grayImage, grayImage.width() * grayImage.height() * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMemcpy" << std::endl;
   return -1;
}

unsigned int *gpuhistogram;
cuda_err = cudaMalloc((void **)(&gpuhistogram), HISTOGRAM_SIZE * sizeof(unsigned int));
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMalloc" << std::endl;
}
cudaMemset (gpuhistogram, 0, HISTOGRAM_SIZE * sizeof(unsigned int));

histogram1D(gpuImage, histogramImage, grayImage.width(), grayImage.height(), gpuhistogram, HISTOGRAM_SIZE, BAR_WIDTH, total, gridSize, blockSize);

cuda_err = cudaMemcpy(histogram, gpuhistogram, HISTOGRAM_SIZE * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMemcpy" << std::endl;
}

那叫

void histogram1D(unsigned char *grayImage, unsigned char *histogramImage, const int width, const int height, unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, const unsigned int BAR_WIDTH, NSTimer &timer, dim3 grid_size, dim3 block_size) {

NSTimer kernelTime = NSTimer("kernelTime", false, false);

kernelTime.start();
histo <<< grid_size, block_size >>> (grayImage, histogram,width);
cudaDeviceSynchronize();
kernelTime.stop();

cout << fixed << setprecision(6);
cout << "histogram1D (kernel): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}

内核函数是

__global__ void histo(unsigned char *inputImage, unsigned int *histogram, int width)
{

int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);

unsigned int index = static_cast< unsigned int >(inputImage[(y * width) + x]);
atomicAdd(&histogram[index],1);
}

我遇到的问题是,当我用1024x1024到3543x2480的图像调用它时,它可以工作。但是,我有一个图像是8192x8192,当函数返回时,* histogram中的值都是0.我的试验似乎表明它与* gpuhistogram的内存分配有关(不应该是unsigned int足够大) ?)因为这个的顺序版本工作。怎么解决这个问题?有什么想法吗?

2 个答案:

答案 0 :(得分:0)

  1. 检查您的卡。来自维基百科:

    技术规格计算能力(版本) 1.0 1.1 1.2 1.3 2.x 3.0 3.5 线程块网格的最大维数2 3 线程块网格的最大x-,y-或z-维度65535 231-1

  2. 我怀疑你的直方图会比CPU代码更糟糕,尝试使用像共享内存这样的东西并假设256个值。诀窍是每块使用bin#of threads(每个块256个线程)。我不想破坏作者的收入,所以请参阅CUDA by Example 2010

答案 1 :(得分:0)

只想添加;按照米哈伊尔的回答,这就是我现在正在做的事情;

void histogram1D(unsigned char *grayImage, unsigned char *histogramImage, const int width, const int height, unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, const unsigned int BAR_WIDTH, NSTimer &timer, dim3 grid_size, dim3 block_size) {

NSTimer kernelTime = NSTimer("kernelTime", false, false);


kernelTime.start();
// Kernel
histo <<< 15*2, 256 >>> (grayImage, histogram,width,height);//15 is the number of blocks for my device
//cudaDeviceSynchronize(); //i get slow results with this. figured it's not nessesary since the kernel threads are synced.
kernelTime.stop();

cout << fixed << setprecision(6);
cout << "histogram1D (kernel): \t\t" << kernelTime.getElapsed()*1000 << " milliseconds." << endl;
}

内核代码;

__global__ void histo(unsigned char *inputImage, unsigned int *histogram, int width, int height)
{
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;

__syncthreads();

int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.y * gridDim.x;
while(i<width*height)
{
    atomicAdd(&temp[inputImage[i]],1);
    i += offset;
}

__syncthreads();
atomicAdd(&(histogram[threadIdx.x]),temp[threadIdx.x]);
}