GPU缩减代码只运行一次

时间:2016-06-28 09:59:24

标签: cuda reduction

我一直在使用Robert Crovella提供的代码示例:

thrust::max_element slow in comparison cublasIsamax - More efficient implementation?

这是一个非常快速的缩减代码。我修改它也返回浮点数输入数组中的最大索引。当我在我的代码中使用它时,它只会执行一次。如果我再次尝试调用例程它没有找到新的最大值,它只返回先前的最大值。是否有关于常规使用的易失性全局内存的某些内容需要在可以再次调用之前重置?

#include <cuda.h>  
#include <cublas_v2.h>  
#include <thrust/extrema.h>  
#include <thrust/device_ptr.h>  
#include <thrust/device_vector.h>  
#include <stdio.h>  
#include <stdlib.h>  

#define DSIZE 4096*4  // nTPB should be a power-of-2  
#define nTPB 512  
#define MAX_KERNEL_BLOCKS 30  
#define MAX_BLOCKS ((DSIZE/nTPB)+1)  
#define MIN(a,b) ((a>b)?b:a)  
#define FLOAT_MIN -1.0f  

#include <helper_functions.h>  
#include <helper_cuda.h>  

// this code has been modified to return the index of the max instead of the actual max value - for my application
__device__ volatile float blk_vals[MAX_BLOCKS];  
__device__ volatile int   blk_idxs[MAX_BLOCKS];  
__device__ int   blk_num = 0;  

//template <typename T>  
__global__ void max_idx_kernel(const float *data, const int dsize, int *result){  

  __shared__ volatile float   vals[nTPB];  
  __shared__ volatile int idxs[nTPB];  
  __shared__ volatile int last_block;  
  int idx = threadIdx.x+blockDim.x*blockIdx.x;  
  last_block = 0;  
  float   my_val = FLOAT_MIN;  
  int my_idx = -1;  
  // sweep from global memory  
  while (idx < dsize){  
    if (data[idx] > my_val) {my_val = data[idx]; my_idx = idx;}  
    idx += blockDim.x*gridDim.x;}  
  // populate shared memory  
  vals[threadIdx.x] = my_val;  
  idxs[threadIdx.x] = my_idx;  
  __syncthreads();  
  // sweep in shared memory  
  for (int i = (nTPB>>1); i > 0; i>>=1){  
    if (threadIdx.x < i)  
      if (vals[threadIdx.x] < vals[threadIdx.x + i]) {vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; }  
    __syncthreads();}  
  // perform block-level reduction  
  if (!threadIdx.x){  
    blk_vals[blockIdx.x] = vals[0];  
    blk_idxs[blockIdx.x] = idxs[0];  
    if (atomicAdd(&blk_num, 1) == gridDim.x - 1) // then I am the last block  
      last_block = 1;}  
  __syncthreads();  
  if (last_block){  
    idx = threadIdx.x;  
    my_val = FLOAT_MIN;  
    my_idx = -1;  
    while (idx < gridDim.x){  
      if (blk_vals[idx] > my_val) {my_val = blk_vals[idx]; my_idx = blk_idxs[idx]; }  
      idx += blockDim.x;}  
  // populate shared memory  
    vals[threadIdx.x] = my_val;  
    idxs[threadIdx.x] = my_idx;  
    __syncthreads();  
  // sweep in shared memory  
    for (int i = (nTPB>>1); i > 0; i>>=1){  
      if (threadIdx.x < i)  
        if (vals[threadIdx.x] < vals[threadIdx.x + i]) {vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; }  
      __syncthreads();}  
    if (!threadIdx.x)  
      *result = idxs[0];  
    }  
}  



int main(){  

  int nrElements = DSIZE;  
  float *d_vector, *h_vector;  

  StopWatchInterface *hTimer = NULL;  
  sdkCreateTimer(&hTimer);  
  double gpuTime;  
  int k;  
  int max_index;  
  int *d_max_index;  
  cudaMalloc(&d_max_index, sizeof(int));  


  h_vector = new float[DSIZE];  
  for(k=0; k < 5; k++){  
    for (int i = 0; i < DSIZE; i++) h_vector[i] = rand()/(float)RAND_MAX;  
       h_vector[10+k] = 10;  // create definite max element that changes with each loop iteration   
   cublasHandle_t my_handle;  
   cublasStatus_t my_status = cublasCreate(&my_handle);  
   cudaMalloc(&d_vector, DSIZE*sizeof(float));  
   cudaMemcpy(d_vector, h_vector, DSIZE*sizeof(float), cudaMemcpyHostToDevice);  

       max_index = 0;  
       sdkResetTimer(&hTimer);  
       sdkStartTimer(&hTimer);  
       //d_vector is a pointer on the device pointing to the beginning of the vector, containing nrElements floats.  
       thrust::device_ptr<float> d_ptr = thrust::device_pointer_cast(d_vector);  
       thrust::device_vector<float>::iterator d_it = thrust::max_element(d_ptr, d_ptr + nrElements);  
       max_index = d_it - (thrust::device_vector<float>::iterator)d_ptr;  
       cudaDeviceSynchronize();  
       gpuTime = sdkGetTimerValue(&hTimer);  
       std::cout << "loop: " << k << "  thrust time:   " << gpuTime << " max index: " << max_index << std::endl;  

       max_index = 0;  
       sdkResetTimer(&hTimer);  
       sdkStartTimer(&hTimer);  
       my_status = cublasIsamax(my_handle, DSIZE, d_vector, 1, &max_index);  
       cudaDeviceSynchronize();  
       gpuTime = sdkGetTimerValue(&hTimer);   
       std::cout << "loop: " << k << "  cublas time:   " << gpuTime << " max index: " << max_index-1 << std::endl;  

       max_index = 0;  
       sdkResetTimer(&hTimer);  
       sdkStartTimer(&hTimer);  
       max_idx_kernel<<<MIN(MAX_KERNEL_BLOCKS, ((DSIZE+nTPB-1)/nTPB)), nTPB>>>(d_vector, DSIZE, d_max_index);  
       cudaMemcpy(&max_index, d_max_index, sizeof(int), cudaMemcpyDeviceToHost);  
       gpuTime = sdkGetTimerValue(&hTimer);  
       std::cout << "loop: " << k << "  idx kern time: " << gpuTime << " max index: " << max_index << std::endl;  
       std::cout <<  std::endl;  

  } // end for loop on k  

   cudaFree(d_max_index);  
   cudaFree(d_vector);  

  return 0;  
}  

1 个答案:

答案 0 :(得分:1)

将此代码重复用于多个循环的主要问题是设备(全局)变量的静态初始化:

__device__ int   blk_num = 0;  

如果您只打算执行一次例程,那就没问题。但是如果你打算重用它,你需要在每次调用内核之前将这个变量重新初始化为零。

我们可以通过在每次调用还原内核之前将此变量的显式初始化设置为零来解决此问题:

cudaMemcpyToSymbol(blk_num, &max_index, sizeof(int));

(我在这里使用max_index只是因为它是一个方便的主机int变量,刚刚设置为零。)

这是获得代码所需的唯一更改&#34;工作&#34;。

然而,循环的引入创造了一些其他的问题&#34;我会指出。这三行代码:

cublasHandle_t my_handle;  
cublasStatus_t my_status = cublasCreate(&my_handle);  
cudaMalloc(&d_vector, DSIZE*sizeof(float));  

不属于k的for循环内部。这有效地造成了内存泄漏,并且不必要地重新初始化了cublas库。

以下代码包含这些更改,似乎对我有用:

$ cat t1183.cu
#include <cuda.h>
#include <cublas_v2.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>

#define DSIZE 4096*4  // nTPB should be a power-of-2
#define nTPB 512
#define MAX_KERNEL_BLOCKS 30
#define MAX_BLOCKS ((DSIZE/nTPB)+1)
#define MIN(a,b) ((a>b)?b:a)
#define FLOAT_MIN -1.0f

#include <helper_functions.h>
#include <helper_cuda.h>

// this code has been modified to return the index of the max instead of the actual max value - for my application
__device__ volatile float blk_vals[MAX_BLOCKS];
__device__ volatile int   blk_idxs[MAX_BLOCKS];
__device__ int   blk_num;

//template <typename T>
__global__ void max_idx_kernel(const float *data, const int dsize, int *result){

  __shared__ volatile float   vals[nTPB];
  __shared__ volatile int idxs[nTPB];
  __shared__ volatile int last_block;
  int idx = threadIdx.x+blockDim.x*blockIdx.x;
  last_block = 0;
  float   my_val = FLOAT_MIN;
  int my_idx = -1;
  // sweep from global memory
  while (idx < dsize){
    if (data[idx] > my_val) {my_val = data[idx]; my_idx = idx;}
    idx += blockDim.x*gridDim.x;}
  // populate shared memory
  vals[threadIdx.x] = my_val;
  idxs[threadIdx.x] = my_idx;
  __syncthreads();
  // sweep in shared memory
  for (int i = (nTPB>>1); i > 0; i>>=1){
    if (threadIdx.x < i)
      if (vals[threadIdx.x] < vals[threadIdx.x + i]) {vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; }
    __syncthreads();}
  // perform block-level reduction
  if (!threadIdx.x){
    blk_vals[blockIdx.x] = vals[0];
    blk_idxs[blockIdx.x] = idxs[0];
    if (atomicAdd(&blk_num, 1) == gridDim.x - 1) // then I am the last block
      last_block = 1;}
  __syncthreads();
  if (last_block){
    idx = threadIdx.x;
    my_val = FLOAT_MIN;
    my_idx = -1;
    while (idx < gridDim.x){
      if (blk_vals[idx] > my_val) {my_val = blk_vals[idx]; my_idx = blk_idxs[idx]; }
      idx += blockDim.x;}
  // populate shared memory
    vals[threadIdx.x] = my_val;
    idxs[threadIdx.x] = my_idx;
    __syncthreads();
  // sweep in shared memory
    for (int i = (nTPB>>1); i > 0; i>>=1){
      if (threadIdx.x < i)
        if (vals[threadIdx.x] < vals[threadIdx.x + i]) {vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; }
      __syncthreads();}
    if (!threadIdx.x)
      *result = idxs[0];
    }
}



int main(){

  int nrElements = DSIZE;
  float *d_vector, *h_vector;

  StopWatchInterface *hTimer = NULL;
  sdkCreateTimer(&hTimer);
  double gpuTime;
  int k;
  int max_index;
  int *d_max_index;
  cudaMalloc(&d_max_index, sizeof(int));


  h_vector = new float[DSIZE];
  cublasHandle_t my_handle;
  cublasStatus_t my_status = cublasCreate(&my_handle);
  cudaMalloc(&d_vector, DSIZE*sizeof(float));
  for(k=0; k < 5; k++){
    for (int i = 0; i < DSIZE; i++) h_vector[i] = rand()/(float)RAND_MAX;
       h_vector[10+k] = 10;  // create definite max element that changes with each loop iteration
    cudaMemcpy(d_vector, h_vector, DSIZE*sizeof(float), cudaMemcpyHostToDevice);

    max_index = 0;
    sdkResetTimer(&hTimer);
    sdkStartTimer(&hTimer);
       //d_vector is a pointer on the device pointing to the beginning of the vector, containing nrElements floats.
    thrust::device_ptr<float> d_ptr = thrust::device_pointer_cast(d_vector);
    thrust::device_vector<float>::iterator d_it = thrust::max_element(d_ptr, d_ptr + nrElements);
    max_index = d_it - (thrust::device_vector<float>::iterator)d_ptr;
    cudaDeviceSynchronize();
    gpuTime = sdkGetTimerValue(&hTimer);
    std::cout << "loop: " << k << "  thrust time:   " << gpuTime << " max index: " << max_index << std::endl;

    max_index = 0;
    sdkResetTimer(&hTimer);
    sdkStartTimer(&hTimer);
    my_status = cublasIsamax(my_handle, DSIZE, d_vector, 1, &max_index);
    cudaDeviceSynchronize();
    gpuTime = sdkGetTimerValue(&hTimer);
    std::cout << "loop: " << k << "  cublas time:   " << gpuTime << " max index: " << max_index-1 << std::endl;

    max_index = 0;
    sdkResetTimer(&hTimer);
    sdkStartTimer(&hTimer);
    cudaMemcpyToSymbol(blk_num, &max_index, sizeof(int));
    max_idx_kernel<<<MIN(MAX_KERNEL_BLOCKS, ((DSIZE+nTPB-1)/nTPB)), nTPB>>>(d_vector, DSIZE, d_max_index);
    cudaMemcpy(&max_index, d_max_index, sizeof(int), cudaMemcpyDeviceToHost);
    gpuTime = sdkGetTimerValue(&hTimer);
    std::cout << "loop: " << k << "  idx kern time: " << gpuTime << " max index: " << max_index << std::endl;
    std::cout <<  std::endl;

  } // end for loop on k

   cudaFree(d_max_index);
   cudaFree(d_vector);

  return 0;
}
$ nvcc -I/usr/local/cuda/samples/common/inc t1183.cu -o t1183 -lcublas
$ cuda-memcheck ./t1183
========= CUDA-MEMCHECK
loop: 0  thrust time:   2.806 max index: 10
loop: 0  cublas time:   0.441 max index: 10
loop: 0  idx kern time: 0.395 max index: 10

loop: 1  thrust time:   1.298 max index: 11
loop: 1  cublas time:   0.419 max index: 11
loop: 1  idx kern time: 0.424 max index: 11

loop: 2  thrust time:   1.303 max index: 12
loop: 2  cublas time:   0.43 max index: 12
loop: 2  idx kern time: 0.419 max index: 12

loop: 3  thrust time:   1.291 max index: 13
loop: 3  cublas time:   0.423 max index: 13
loop: 3  idx kern time: 0.415 max index: 13

loop: 4  thrust time:   1.299 max index: 14
loop: 4  cublas time:   0.423 max index: 14
loop: 4  idx kern time: 0.417 max index: 14

========= ERROR SUMMARY: 0 errors
$