CUDA中的粒子细胞计数(1D和2D直方图生成)

时间:2012-08-31 09:49:53

标签: sorting cuda atomic race-condition particle-system

这个__global__函数用于递增数字并计算某些单元格中有多少粒子。

__global__ void Set_Nc_GPU_0831(int *nc,int *index,SP DSMC)
{
    int tidx;
    tidx=threadIdx.x+blockDim.x*blockIdx.x;

    atomicAdd(&nc[index[tidx]],1);
}

我正在使用速度慢的原子操作。所以我想用其他一些函数或算法替换原子函数。

有没有其他方法可以修改这个简单的__global__函数?

2 个答案:

答案 0 :(得分:3)

这是一个迟到的答案,用于从未答复的列表中删除此问题。

您已经认识到计算某个单元格内有多少个粒子等同于构建直方图。直方图的构造是一个研究得很好的问题。 Shane Cook的书(CUDA编程)包含了关于这个主题的很好的讨论。此外,CUDA样本包含直方图示例。此外,histogram construction by CUDA Thrust也是可能的。最后,CUDA Programming Blog包含更多洞察力。

下面我提供了一个代码来比较5种不同版本的直方图计算:

  1. CPU;
  2. GPU with atomics(基本上是你的方法);
  3. GPU与共享内存中的原子与部分直方图的最终总和(基本上,Paul R提出的方法);
  4. GPU使用CUDA Thrust。
  5. 如果您在Kepler K20c上运行典型10MB数据的代码,您将获得以下时间:

    1. CPU = 83ms;
    2. GPU with atomics = 15.8ms;
    3. 共享内存中带有原子的GPU = 17.7ms;
    4. GPU by CUDA Thrust = 40ms
    5. 正如您所看到的,令人惊讶的是,您的“蛮力”解决方案是最快的。这是合理的,因为对于最新的架构(你的帖子是2012年8月Kepler尚未发布,至少在欧洲发布),原子操作非常快。

      以下是代码:

      #include <thrust/device_vector.h>
      #include <thrust/sort.h>
      #include <thrust/generate.h>
      #include <thrust/adjacent_difference.h>
      #include <thrust/binary_search.h>
      
      #define SIZE (100*1024*1024) // 100 MB
      
      /**********************************************/
      /* FUNCTION TO GENERATE RANDOM UNSIGNED CHARS */
      /**********************************************/
      unsigned char* big_random_block(int size) {
          unsigned char *data = (unsigned char*)malloc(size);
          for (int i=0; i<size; i++)
              data[i] = rand();
          return data;
      }
      
      /***************************************/
      /* GPU HISTOGRAM CALCULATION VERSION 1 */
      /***************************************/
      __global__ void histo_kernel1(unsigned char *buffer, long size, unsigned int *histo ) {
      
          // --- The number of threads does not cover all the data size
          int i = threadIdx.x + blockIdx.x * blockDim.x;
          int stride = blockDim.x * gridDim.x;
          while (i < size) {
              atomicAdd(&histo[buffer[i]], 1);
              i += stride;
          }
      }
      
      /***************************************/
      /* GPU HISTOGRAM CALCULATION VERSION 2 */
      /***************************************/
      __global__ void histo_kernel2(unsigned char *buffer, long size, unsigned int *histo ) {
      
          // --- Allocating and initializing shared memory to store partial histograms
          __shared__ unsigned int temp[256];
          temp[threadIdx.x] = 0;
          __syncthreads();
      
          // --- The number of threads does not cover all the data size
          int i = threadIdx.x + blockIdx.x * blockDim.x;
          int offset = blockDim.x * gridDim.x;
          while (i < size)
          {
              atomicAdd(&temp[buffer[i]], 1);
              i += offset;
          }
          __syncthreads();
      
          // --- Summing histograms
          atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
      }
      
      /********************/
      /* CUDA ERROR CHECK */
      /********************/
      #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
      inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
      {
          if (code != cudaSuccess) 
          {
              fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
              if (abort) exit(code);
          }
      }
      
      /********/
      /* MAIN */
      /********/
      void main(){
      
          // --- Generating an array of SIZE unsigned chars
          unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
      
          /********************/
          /* CPU COMPUTATIONS */
          /********************/
      
          // --- Allocating host memory space and initializing the host-side histogram
          unsigned int histo[256];
          for (int i=0; i<256; i++) histo [i] = 0;
      
          clock_t start_CPU, stop_CPU;
      
          // --- Histogram calculation on the host
          start_CPU = clock();
          for (int i=0; i<SIZE; i++) histo [buffer[i]]++;
          stop_CPU = clock();
          float elapsedTime = (float)(stop_CPU - start_CPU) / (float)CLOCKS_PER_SEC * 1000.0f;
          printf("Time to generate (CPU): %3.1f ms\n", elapsedTime);
      
          // --- Indirect check of the result
          long histoCount = 0;
          for (int i=0; i<256; i++) { histoCount += histo[i]; }
          printf("Histogram Sum: %ld\n", histoCount);
      
          /********************/
          /* GPU COMPUTATIONS */
          /********************/
      
          // --- Initializing the device-side data
          unsigned char *dev_buffer;
          gpuErrchk(cudaMalloc((void**)&dev_buffer,SIZE));
          gpuErrchk(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice));
      
          // --- Allocating device memory space for the device-side histogram
          unsigned int *dev_histo;
          gpuErrchk(cudaMalloc((void**)&dev_histo,256*sizeof(long)));
      
          // --- GPU timing
          cudaEvent_t start, stop;
          gpuErrchk(cudaEventCreate(&start));
          gpuErrchk(cudaEventCreate(&stop));
      
          // --- ATOMICS
          // --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
          gpuErrchk(cudaEventRecord(start,0));
          gpuErrchk(cudaMemset(dev_histo,0,256*sizeof(int)));
          cudaDeviceProp prop;
          gpuErrchk(cudaGetDeviceProperties(&prop,0));
          int blocks = prop.multiProcessorCount;
          histo_kernel1<<<blocks*2,256>>>(dev_buffer, SIZE, dev_histo);
      
          gpuErrchk(cudaMemcpy(histo,dev_histo,256*sizeof(int),cudaMemcpyDeviceToHost));
          gpuErrchk(cudaEventRecord(stop,0));
          gpuErrchk(cudaEventSynchronize(stop));
          gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
          printf("Time to generate (GPU):  %3.1f ms\n", elapsedTime);
      
          histoCount = 0;
          for (int i=0; i<256; i++) {
              histoCount += histo[i];
          }
          printf( "Histogram Sum:  %ld\n", histoCount );
      
          // --- Check the correctness of the results via the host
          for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
          for (int i=0; i<256; i++) {
              if (histo[i] != 0) printf( "Failure at %d!  Off by %d\n", i, histo[i] );
      }
      
          // --- ATOMICS IN SHARED MEMORY
          // --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
          gpuErrchk(cudaEventRecord(start,0));
          gpuErrchk(cudaMemset(dev_histo,0,256*sizeof(int)));
          gpuErrchk(cudaGetDeviceProperties(&prop,0));
          blocks = prop.multiProcessorCount;
          histo_kernel2<<<blocks*2,256>>>(dev_buffer, SIZE, dev_histo);
      
          gpuErrchk(cudaMemcpy(histo,dev_histo,256*sizeof(int),cudaMemcpyDeviceToHost));
          gpuErrchk(cudaEventRecord(stop,0));
          gpuErrchk(cudaEventSynchronize(stop));
          gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
          printf("Time to generate (GPU):  %3.1f ms\n", elapsedTime);
      
          histoCount = 0;
          for (int i=0; i<256; i++) {
              histoCount += histo[i];
          }
          printf( "Histogram Sum:  %ld\n", histoCount );
      
          // --- Check the correctness of the results via the host
          for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
          for (int i=0; i<256; i++) {
              if (histo[i] != 0) printf( "Failure at %d!  Off by %d\n", i, histo[i] );
          }
      
          // --- CUDA THRUST
      
          gpuErrchk(cudaEventRecord(start,0));
      
          // --- Wrapping dev_buffer raw pointer with a device_ptr and initializing a device_vector with it
          thrust::device_ptr<unsigned char> dev_ptr(dev_buffer);
          thrust::device_vector<unsigned char> dev_buffer_thrust(dev_ptr, dev_ptr + SIZE);
      
          // --- Sorting data to bring equal elements together
          thrust::sort(dev_buffer_thrust.begin(), dev_buffer_thrust.end());
      
          // - The number of histogram bins is equal to the maximum value plus one
          int num_bins = dev_buffer_thrust.back() + 1;
      
          // --- Resize histogram storage
          thrust::device_vector<int> d_histogram;
          d_histogram.resize(num_bins);
      
          // --- Find the end of each bin of values
          thrust::counting_iterator<int> search_begin(0);
          thrust::upper_bound(dev_buffer_thrust.begin(), dev_buffer_thrust.end(),
                          search_begin, search_begin + num_bins,
                          d_histogram.begin());
      
          // --- Compute the histogram by taking differences of the cumulative histogram
          thrust::adjacent_difference(d_histogram.begin(), d_histogram.end(),
                                  d_histogram.begin());
      
          thrust::host_vector<int> h_histogram(d_histogram);
          gpuErrchk(cudaEventRecord(stop,0));
          gpuErrchk(cudaEventSynchronize(stop));
          gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
          printf("Time to generate (GPU):  %3.1f ms\n", elapsedTime);
      
          histoCount = 0;
          for (int i=0; i<256; i++) {
              histoCount += h_histogram[i];
          }
          printf( "Histogram Sum:  %ld\n", histoCount );
      
          // --- Check the correctness of the results via the host
          for (int i=0; i<SIZE; i++) h_histogram[buffer[i]]--;
          for (int i=0; i<256; i++) {
              if (h_histogram[i] != 0) printf( "Failure at %d!  Off by %d\n", i, h_histogram[i] );
          }
      
          gpuErrchk(cudaEventDestroy(start));
          gpuErrchk(cudaEventDestroy(stop));
          gpuErrchk(cudaFree(dev_histo));
          gpuErrchk(cudaFree(dev_buffer));
      
          free(buffer);
      
          getchar();
      
      }
      

答案 1 :(得分:2)

根据乔治的评论,在这个答案中我正在处理2D情况,其中粒子不在线上,而是在平面的一部分上。

实际上,2D情况只需要对上面提到的代码进行微小的修改。唯一要做的就是定义粒子的xy坐标以及2D 256 x 256直方图。

下面的代码提供了一个完整的例子。

#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/generate.h>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>

#define SIZE (100*1024*1024) // 100 MB

/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
    if (code != cudaSuccess) 
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

/**********************************************/
/* FUNCTION TO GENERATE RANDOM UNSIGNED CHARS */
/**********************************************/
unsigned char* big_random_block(int size) {
    unsigned char *data = (unsigned char*)malloc(size);
    for (int i=0; i<size; i++)
        data[i] = rand();
    return data;
}

/********************************/
/* GPU HISTOGRAM CALCULATION 2D */
/********************************/
__global__ void histo_kernel2(unsigned char *dev_x_coord, unsigned char *dev_y_coord, unsigned int *histo, unsigned int size) {

    // --- The number of threads does not cover all the data size
    int i = threadIdx.x + blockIdx.x * blockDim.x;
    int offset = blockDim.x * gridDim.x;
    while (i < size)
    {
        atomicAdd(&histo[dev_y_coord[i] * 256 + dev_x_coord[i]], 1);
        i += offset;
    }
}

/********/
/* MAIN */
/********/
void main(){

    // --- Generating x- and y- coordinates of the particles
    unsigned char *x_coord = (unsigned char*)big_random_block(SIZE);
    unsigned char *y_coord = (unsigned char*)big_random_block(SIZE);

    /********************/
    /* CPU COMPUTATIONS */
    /********************/

    // --- Allocating host memory space and initializing the host-side histogram
    unsigned int *histo = (unsigned int*)malloc(256 * 256 * sizeof(unsigned int));
    for (int i=0; i < 256 * 256; i++) histo [i] = 0;

    clock_t start_CPU, stop_CPU;

    // --- Histogram calculation on the host
    start_CPU = clock();
    for (int i=0; i < SIZE; i++) histo[y_coord[i] * 256 + x_coord[i]]++;
    stop_CPU = clock();
    float elapsedTime = (float)(stop_CPU - start_CPU) / (float)CLOCKS_PER_SEC * 1000.0f;
    printf("Time to generate (CPU): %3.1f ms\n", elapsedTime);

    // --- Indirect check of the result
    long histoCount = 0;
    for (int i=0; i < 256 * 256; i++) { histoCount += histo[i]; }
    printf("Histogram Sum: %ld\n", histoCount);

    /********************/
    /* GPU COMPUTATIONS */
    /********************/

    // --- Initializing the device-side data
    unsigned char *dev_x_coord, *dev_y_coord;
    gpuErrchk(cudaMalloc((void**)&dev_x_coord,SIZE));
    gpuErrchk(cudaMalloc((void**)&dev_y_coord,SIZE));
    gpuErrchk(cudaMemcpy(dev_x_coord, x_coord, SIZE, cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(dev_y_coord, y_coord, SIZE, cudaMemcpyHostToDevice));

    // --- Allocating device memory space for the device-side histogram
    unsigned int *dev_histo;
    gpuErrchk(cudaMalloc((void**)&dev_histo,256*256*sizeof(unsigned int)));

    // --- GPU timing
    cudaEvent_t start, stop;
    gpuErrchk(cudaEventCreate(&start));
    gpuErrchk(cudaEventCreate(&stop));

    // --- ATOMICS
    gpuErrchk(cudaEventRecord(start,0));
    gpuErrchk(cudaMemset(dev_histo,0,256*256*sizeof(unsigned int)));
    cudaDeviceProp prop;
    gpuErrchk(cudaGetDeviceProperties(&prop,0));

    int blocks = prop.multiProcessorCount;
    histo_kernel2<<<blocks*2,256>>>(dev_x_coord, dev_y_coord, dev_histo, SIZE);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());

    gpuErrchk(cudaMemcpy(histo, dev_histo, 256 * 256 * sizeof(unsigned int),cudaMemcpyDeviceToHost));
    gpuErrchk(cudaEventRecord(stop,0));
    gpuErrchk(cudaEventSynchronize(stop));
    gpuErrchk(cudaEventElapsedTime(&elapsedTime,start,stop));
    printf("Time to generate (GPU):  %3.1f ms\n", elapsedTime);

    histoCount = 0;
    for (int i=0; i<256 * 256; i++) {
        histoCount += histo[i];
    }
    printf( "Histogram Sum:  %ld\n", histoCount );

    // --- Check the correctness of the results via the host
    for (int i=0; i<SIZE; i++) histo[y_coord[i] * 256 + x_coord[i]]--;
    for (int i=0; i<256*256; i++) {
        if (histo[i] != 0) printf( "Failure at %d!  Off by %d\n", i, histo[i] );
    }

}