为什么使用cuBLAS计算L2规范会导致错误?

时间:2015-07-10 18:26:02

标签: cuda thrust gpu-programming cublas

编辑2 包含更完整的程序

编辑1:包含完整的程序

我试图使用cuBLAS计算向量的L2范数。我的代码如下

void GPU_Print_Matrix(real_t *A, int nrows, int ncols) {
  real_t *hostA = (real_t*)malloc(nrows*ncols * sizeof(real_t));
  CUDA_SAFE_CALL(cudaMemcpy(hostA, A, nrows*ncols * sizeof(real_t), cudaMemcpyDeviceToHost));

  cout << "GPU Matrix of Size: " << nrows << "x" << ncols << endl;
  for (int i = 0; i < nrows; ++i) {
    for (int j = 0; j < ncols; ++j) {
      cout << fixed << setprecision(PRINT_PRECISION) << hostA[j*nrows + i] << " ";
    }
    cout << endl;
  }

  free(hostA);
  cout << endl;
}

void GPU_Random_Vector(thrust::device_vector <real_t> &vec) {
  thrust::counting_iterator<unsigned int> index_sequence_begin(rand());
  thrust::transform(index_sequence_begin, index_sequence_begin + vec.size(), vec.begin(), RANDOM(-initRange, initRange));
}

int main(int argc, char *argv[]) {
  srand(clock());
  cout << "# Running NMT" << endl;

  //ParseOpts(argc, argv);

  cublasHandle_t handle;
  CUBLAS_SAFE_CALL(cublasCreate(&handle));
  thrust::device_vector <real_t> x(10);
  GPU_Random_Vector(x);
  GPU_Print_Matrix(thrust::raw_pointer_cast(&x[0]), 10, 1);
  real_t nrm = 0; 
  CUBLAS_SAFE_CALL(cublasXnrm2(handle, 10, thrust::raw_pointer_cast(&x[0]), 1, &nrm));
  cout << "nrm2 = " << nrm << endl;
}

此处,CUBLAS_SAFE_CALL定义如下

#define CUBLAS_SAFE_CALL(call)                                                     \
{                                                                                  \
  const cublasStatus_t stat = call;                                                \
  if (stat != CUBLAS_STATUS_SUCCESS) {                                             \
    cout << "cuBlas Error: " << __FILE__ << ":" << __LINE__ << endl;               \
    cout << "  Code: " << stat << endl;                                            \
    exit(1);                                                                       \
  }                                                                                \
}

GPU_Random_VectorGPU_Print_Matrix已被确认可以使用。此外,cublasHandle[singleGPU]在被调用之前已初始化。当我运行程序时,我有以下输出

// GPU_Print_Matrix
GPU Matrix of Size: 10x1
0.0652332678 
0.0747700930 
0.0274266358 
-0.0885794610 
-0.0192640368 
-0.0942506194 
0.0283640027 
-0.0411146656 
-0.0460337885 
-0.0970785618 

cuBlas Error: nmt.cu:2252
  Code: 14

发生了什么事?是否有任何参考资料如何解释cuBLAS的错误编号?非常感谢。

1 个答案:

答案 0 :(得分:2)

CUBLAS错误14是CUBLAS_STATUS_INTERNAL_ERROR,通常意味着在L2规范调用结束时托管副本的内部设备失败。但是,如果没有关于你的代码正在做什么的一些背景,为什么发生这种情况是不可能的。

如果你发布的代码被组装并充实到一个完整的演示案例中(简单的随机数播种错误正确),就像这样:

#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <cublas_v2.h>
#include <thrust/transform.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>

typedef float real_t;

#define CUBLAS_SAFE_CALL(call)                                                     \
{                                                                                  \
  const cublasStatus_t stat = call;                                                \
  if (stat != CUBLAS_STATUS_SUCCESS) {                                             \
    std::cout << "cuBlas Error: " << __FILE__ << ":" << __LINE__ << std::endl;     \
    std::cout << "  Code: " << stat << std::endl;                                  \
    exit(1);                                                                       \
  }                                                                                \
}

#define PRINT_PRECISION (6)

struct RANDOM
{
    real_t a, b;

    __host__ __device__
    RANDOM(real_t _a=0, real_t _b=1) : a(_a), b(_b) {};

    __host__ __device__
        real_t operator()(const unsigned int n) const
        {
            thrust::default_random_engine rng;
            thrust::uniform_real_distribution<float> dist(a, b);
            rng.discard(n);

            return dist(rng);
        }
};

void GPU_Print_Matrix(real_t *A, int nrows, int ncols) {
  real_t *hostA = (real_t*)malloc(nrows*ncols * sizeof(real_t));
  cudaMemcpy(hostA, A, nrows*ncols * sizeof(real_t), cudaMemcpyDeviceToHost);

  std::cout << "GPU Matrix of Size: " << nrows << "x" << ncols << std::endl;
  for (int i = 0; i < nrows; ++i) {
    for (int j = 0; j < ncols; ++j) {
      std::cout << std::fixed << std::setprecision(PRINT_PRECISION) << hostA[j*nrows + i] << " ";
    }
    std::cout << std::endl;
  }

  free(hostA);
  std::cout << std::endl;
}

void GPU_Random_Vector(thrust::device_vector <real_t> &vec) {
  const real_t initRange = 10;
  thrust::counting_iterator<unsigned int> index_sequence_begin(std::rand());
  thrust::transform(index_sequence_begin, index_sequence_begin + vec.size(), vec.begin(), RANDOM(-initRange, initRange));
}

int main(int argc, char *argv[]) {
  std::srand(std::time(0));
  std::cout << "# Running NMT" << std::endl;

  cublasHandle_t handle;
  CUBLAS_SAFE_CALL(cublasCreate(&handle));
  thrust::device_vector <real_t> x(10);
  GPU_Random_Vector(x);
  GPU_Print_Matrix(thrust::raw_pointer_cast(&x[0]), 10, 1);
  real_t nrm = 0; 
  CUBLAS_SAFE_CALL(cublasSnrm2(handle, 10, thrust::raw_pointer_cast(&x[0]), 1, &nrm));
  std::cout << "nrm2 = " << nrm << std::endl;
}

编译并运行如下(CUDA 6.5,如果重要的话):

>nvcc -arch=sm_21 -run runkkari.cu -lcublas
runkkari.cu
   Creating library a.lib and object a.exp
# Running NMT
GPU Matrix of Size: 10x1
-5.712992
8.181723
-0.086308
-6.177320
-5.442665
-2.889552
-1.555665
6.506872
-6.800190
8.024273

nrm2 = 18.196394

它按预期工作。你应该能够编译并运行它来自己确认。因此,我们只能得出结论,你有另一个你未能描述的问题。但也许这有助于缩小可能性列表。