对于我第一次尝试CUDA / cuBLAS的工作,我试图编写一个简单的函数,将MxN矩阵(用向量std::vector
表示)乘以Nx1个“ ones”向量,因此以获取矩阵的rowwise(?)和。这将利用cublas_gemv()
以及其他基本的CUDA操作,我认为这是一个不错的起点。
在处理设置问题并读取/复制示例代码之后,这就是我所拥有的:
std::vector<double> test(std::vector<std::vector<double>> in)
{
std::vector<double> out;
long in_m = in.size();
long in_n = in[0].size();
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
// This just converts a vector-of-vectors into a col-first array
double* p_in = vec2d_to_colfirst_array(in);
double* p_ones = new double[in_n];
double* p_out = new double[in_m];
std::fill(p_ones, p_ones + in_n, 1.0);
double* dev_in;
double* dev_ones;
double* dev_out;
cudaStat = cudaMalloc((void**)&dev_in, in_m * in_n * sizeof(double));
cudaStat = cudaMalloc((void**)&dev_ones, in_n * sizeof(double));
cudaStat = cudaMalloc((void**)&dev_out, in_m * sizeof(double));
stat = cublasCreate(&handle);
cudaStat = cudaMemcpy(dev_in, p_in, in_m*in_n * sizeof(double), cudaMemcpyHostToDevice);
cudaStat = cudaMemcpy(dev_ones, p_ones, in_n * sizeof(double), cudaMemcpyHostToDevice);
double alpha = 1.0;
double beta = 0.0;
stat = cublasDgemv(handle, CUBLAS_OP_N, in_m, in_n, &alpha, dev_in, in_m, dev_ones, 1, &beta, dev_ones, 1);
cudaStat = cudaMemcpy(p_out, dev_out, in_m * sizeof(double), cudaMemcpyDeviceToHost);
out.assign(p_out, p_out + in_m);
cudaFree(dev_in);
cudaFree(dev_ones);
cudaFree(dev_out);
cublasDestroy(handle);
free(p_in);
free(p_ones);
free(p_out);
return out;
}
它与我阅读的样本看起来并没有太大区别,因此我希望它可以“正常工作”。但是,当我检查p_out
时,它们全为零。当然,我没有输入零in
矩阵。
我证实vec2d_to_colfirst_array()
可以很好地完成工作,并且还可以通过将数据从设备复制回主机然后读取来正确填充dev_in
/ dev_ones
。也许问题出在cublasDgemv()
的调用之内,但是由于我是新手(而且与Eigen相比,BLAS语法也不太直观),经过无奈之后,我什至看不到出什么问题了。 / p>
任何帮助表示赞赏!
答案 0 :(得分:2)
该错误似乎非常简单。您希望从dev_out
复制结果:
cudaStat = cudaMemcpy(p_out, dev_out, in_m * sizeof(double), cudaMemcpyDeviceToHost);
但是您绝不会在cublas通话中使用dev_out
:
stat = cublasDgemv(handle, CUBLAS_OP_N, in_m, in_n, &alpha, dev_in, in_m, dev_ones, 1, &beta, dev_ones, 1);
这似乎只是复制粘贴错误。如果将cublas调用中的dev_ones
的最后一个实例替换为dev_out
,则您的代码对我有用:
stat = cublasDgemv(handle, CUBLAS_OP_N, in_m, in_n, &alpha, dev_in, in_m, dev_ones, 1, &beta, dev_out, 1);
这是一个完全有效的示例,其中进行了更改:
$ cat t315.cu
#include <vector>
#include <cublas_v2.h>
#include <iostream>
const long idim1 = 8;
const long idim2 = 8;
double* vec2d_to_colfirst_array(std::vector<std::vector<double>> in){
long dim1 = in.size();
long dim2 = in[0].size();
long k = 0;
double *res = new double[dim1*dim2];
for (int i = 0; i < dim1; i++)
for (int j = 0; j < dim2; j++) res[k++] = in[i][j];
return res;
}
std::vector<double> test(std::vector<std::vector<double>> in)
{
std::vector<double> out;
long in_m = in.size();
long in_n = in[0].size();
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
// This just converts a vector-of-vectors into a col-first array
double* p_in = vec2d_to_colfirst_array(in);
double* p_ones = new double[in_n];
double* p_out = new double[in_m];
std::fill(p_ones, p_ones + in_n, 1.0);
double* dev_in;
double* dev_ones;
double* dev_out;
cudaStat = cudaMalloc((void**)&dev_in, in_m * in_n * sizeof(double));
cudaStat = cudaMalloc((void**)&dev_ones, in_n * sizeof(double));
cudaStat = cudaMalloc((void**)&dev_out, in_m * sizeof(double));
stat = cublasCreate(&handle);
cudaStat = cudaMemcpy(dev_in, p_in, in_m*in_n * sizeof(double), cudaMemcpyHostToDevice);
cudaStat = cudaMemcpy(dev_ones, p_ones, in_n * sizeof(double), cudaMemcpyHostToDevice);
double alpha = 1.0;
double beta = 0.0;
stat = cublasDgemv(handle, CUBLAS_OP_N, in_m, in_n, &alpha, dev_in, in_m, dev_ones, 1, &beta, dev_out, 1);
cudaStat = cudaMemcpy(p_out, dev_out, in_m * sizeof(double), cudaMemcpyDeviceToHost);
out.assign(p_out, p_out + in_m);
cudaFree(dev_in);
cudaFree(dev_ones);
cudaFree(dev_out);
cublasDestroy(handle);
free(p_in);
free(p_ones);
free(p_out);
return out;
}
int main(){
std::vector<double> a(idim2, 1.0);
std::vector<std::vector<double>> b;
for (int i = 0; i < idim1; i++) b.push_back(a);
std::vector<double> c = test(b);
for (int i = 0; i < c.size(); i++) std::cout << c[i] << ",";
std::cout << std::endl;
}
$ nvcc -std=c++11 -o t315 t315.cu -lcublas
t315.cu(24): warning: variable "cudaStat" was set but never used
t315.cu(25): warning: variable "stat" was set but never used
$ cuda-memcheck ./t315
========= CUDA-MEMCHECK
8,8,8,8,8,8,8,8,
========= ERROR SUMMARY: 0 errors
$
请注意,我认为free()
不是与new
一起使用的正确API,但这似乎并不是您所遇到问题的症结所在。