我正在尝试制作一个现有软件,该软件使用特殊CSC矩阵的手动调谐稀疏乘法,每列具有恰好k个非零元素。我决定使用cusparse来完成这项工作,但不幸的是我遇到矩阵乘法在某些情况下需要7秒以上,这比代码的CPU版本慢得多。 (最大的稀疏矩阵关注的是19871x1000最大的密集矩阵,关注的是1000 * 150,nnz = 101000)。
当尝试在自包含的示例中重现问题时,我总是会遇到"非法内存访问错误"当nnz!= sparse_cols。
经过一番调查后发现,如果我将矩阵的大小增加10倍,问题就会消失。如果我让矩阵足够小,我就不会遇到崩溃。然而,对于大矩阵,稀疏矩阵必须不跨越某种程度的密集度,否则乘法会产生一堆非法的存储器访问。 以下是解决问题的代码:
#include <cuda.h>
#include <cusparse.h>
#include <iostream>
#include <stdlib.h>
#define CALL_CUDA( err ) \
{ if (err != cudaSuccess) \
{std::cout<<"cuda Error "<< cudaGetErrorString(err)<<" in "<<__FILE__<<" at line "<<__LINE__<<"\n"; exit(EXIT_FAILURE); }\
}
int main(){
//cusparse status and handle
cusparseStatus_t status;
cusparseHandle_t handle = 0;
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS){
std::cout << "Error creating handle: " << status << std::endl;
}
//Set matrix description
cusparseMatDescr_t descr; //Describe the matrices
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
//Sparse matrix properties
int sparse_rows = 19871;
int sparse_cols = 1000;
int nnz_new = 101000;
//int nnz_new = 1000; //Works with that value
//Dense matrix properties
int bmat_rows = 1000;
int bmat_cols = 150;
//Generate a special type of sparse matrix that has exactly k nonzero elements in each column in CSC format
float * amat_vals;
CALL_CUDA(cudaMallocHost((void **)&amat_vals, nnz_new*sizeof(float)));
int * amat_idx;
CALL_CUDA(cudaMallocHost((void **)&amat_idx, nnz_new*sizeof(int)));
int * crccolptr;
CALL_CUDA(cudaMallocHost((void **)&crccolptr, (sparse_cols+1)*sizeof(int)));
//Fill in values with random values
for (int i = 0; i < nnz_new; i++){
amat_vals[i] = (float)rand()/(float)RAND_MAX;
}
//Generate indexes for those rows
for (int i = 0; i < nnz_new; i++){
amat_idx[i] = rand() % (sparse_rows - 1);
}
//generate crccolptr
int k = (int)(nnz_new/sparse_cols); //Number of elements per row
for (int i = 0; i < sparse_cols; i++){
crccolptr[i] = k*i;
}
crccolptr[sparse_cols] = nnz_new;
//Generate bmat_array with random floats
float * bmat_array;
CALL_CUDA(cudaMallocHost((void **)&bmat_array, bmat_rows*bmat_cols*sizeof(float)));
for (int i = 0; i < bmat_rows*bmat_cols; i++){
bmat_array[i] = (float)rand()/(float)RAND_MAX;
}
//generate array for output
float * outmatrix_test;
CALL_CUDA(cudaMallocHost((void **)&outmatrix_test, sparse_rows*bmat_cols*sizeof(float)));
//Allocate and copy device memory for sparse matrix
float * cudavals;
int * colIdx;
int * colPtr;
CALL_CUDA(cudaMalloc((void **)&colPtr, (sparse_cols + 1)*sizeof(int)));
CALL_CUDA(cudaMemcpy(colPtr, crccolptr, (sparse_cols + 1)*sizeof(int), cudaMemcpyHostToDevice));
CALL_CUDA(cudaMalloc((void **)&cudavals, nnz_new*sizeof(float)));
CALL_CUDA(cudaMalloc((void **)&colIdx, nnz_new*sizeof(int)));
CALL_CUDA(cudaMemcpy(cudavals, amat_vals, nnz_new*sizeof(float), cudaMemcpyHostToDevice));
CALL_CUDA(cudaMemcpy(colIdx, amat_idx, nnz_new*sizeof(int), cudaMemcpyHostToDevice));
//Allocate and copy device memory for dense matrix
float * B_gpumatrix;
CALL_CUDA(cudaMalloc((void **)&B_gpumatrix, bmat_rows*bmat_cols*sizeof(float)));
CALL_CUDA(cudaMemcpy(B_gpumatrix, bmat_array, bmat_rows*bmat_cols*sizeof(float), cudaMemcpyHostToDevice));
//Allocate output matrix
float * outmatrix_gpu;
CALL_CUDA(cudaMalloc((void **)&outmatrix_gpu, (sparse_rows*bmat_cols)*sizeof(float)));
//sparse_cols is passed as sparse_rows, because we're multiplying a CSC matrix instead of a CSR so we need
// to transpose it and invert the rows and columns.
const float alpha = 1.0;
const float beta = 0.0;
/*
float * outmatrix_gpu2;
CALL_CUDA(cudaMalloc((void **)&outmatrix_gpu2, (sparse_rows*sparse_cols)*sizeof(float)));
cusparseStatus_t mat_mul = cusparseScsc2dense(handle, sparse_rows, sparse_cols, descr, cudavals, colIdx, colPtr, outmatrix_gpu2, sparse_rows);
float * outmatrix_test2;
CALL_CUDA(cudaMallocHost((void **)&outmatrix_test2, sparse_rows*sparse_cols*sizeof(float)));
CALL_CUDA(cudaMemcpy(outmatrix_test2, outmatrix_gpu2, (sparse_rows*sparse_cols)*sizeof(float), cudaMemcpyDeviceToHost));
*/
cusparseStatus_t mat_mul = cusparseScsrmm(handle, //Cusparse handle
CUSPARSE_OPERATION_TRANSPOSE, //Transposing the matrix
sparse_cols, //Number of sparse rows. Since we're using CSC matrix it's the columns.
bmat_cols, //Number of columns of the dense matrix
sparse_rows, //Number of sparse cols, Since we're using CSC matrix it's the rows
nnz_new, //Non zero elements
&alpha, //Pointer to alpha (1.0)
descr, //Description of the matrix
cudavals, //The values vector
colPtr, //The column pointer
colIdx, //The indexes of the sparse matrix
B_gpumatrix, //Dense matrix array
bmat_rows, //ldb - the rows of the dense matrix
&beta, //Pointer to beta. It's 0
outmatrix_gpu, //Pointer to the output matrix
sparse_rows); //ldc - leading dimensions of the output matrix.
if (mat_mul != CUSPARSE_STATUS_SUCCESS){
std::cout << "MULTIPLICATION ERROR: " << mat_mul << std::endl;
}
cudaThreadSynchronize(); //Syncs before copy back to memory should not be necessary
cudaDeviceSynchronize();
//Copy matrix back to host
CALL_CUDA(cudaMemcpy(outmatrix_test, outmatrix_gpu, (sparse_rows*bmat_cols)*sizeof(float), cudaMemcpyDeviceToHost));
CALL_CUDA(cudaFree(outmatrix_gpu));
CALL_CUDA(cudaFree(cudavals));
CALL_CUDA(cudaFree(colPtr));
CALL_CUDA(cudaFree(colIdx));
CALL_CUDA(cudaFree(B_gpumatrix));
CALL_CUDA(cudaFreeHost(crccolptr));
CALL_CUDA(cudaFreeHost(amat_vals));
CALL_CUDA(cudaFreeHost(amat_idx));
CALL_CUDA(cudaFreeHost(bmat_array));
CALL_CUDA(cudaFreeHost(outmatrix_test));
return 1;
}
我相信我正在生成一个有效的稀疏矩阵,因为我可以使用appropariate cusparse函数将其转换为密集矩阵,而不会触发任何无效的内存访问。
在cuda-memcheck下运行上述代码时,您可以在cusparseScsrmm中看到许多非法访问。在没有cuda-memcheck的情况下运行,你会在矩阵乘法后的第一个cuda操作中看到错误。
任何想法我做错了什么?我希望如果我能解决这个问题,我就能够诊断出(或者至少是孤立的)一个自包含的例子,它表现出痛苦的慢速矩阵乘法。
编辑:
使用较小的矩阵我不会遇到问题。 50 * 200的稀疏矩阵对于NNZ工作正常,直到大约1000,但是NNZ = 5000(我在半分钟后将其杀死)。将矩阵大小增加到200 * 500即可立即执行NNZ = 5000 ....奇怪。
EDIT2:
如果我将矩阵的大小增加10倍,则nnz的原始数量有效。
答案 0 :(得分:2)
这是不明智的:
//Generate indexes for those rows
for (int i = 0; i < nnz_new; i++){
amat_idx[i] = rand() % (sparse_rows - 1);
}
CSR matrix format expects值向量以从左到右,从上到下的顺序存储。因此,每行中的列索引必须是递增顺序。您正在以随机顺序生成列索引,实际上,您可能会在同一行中生成具有相同列索引的两个元素。这简直就是打破了。
你的变量命名也暗示了我的一些困惑。 CSR是压缩的稀疏行格式,它期望:
由于您使用的是Scsrmm
功能,因此CSR格式是必需的。
crccolptr
这样的变量名称对我来说对CSR格式没有意义。
作为简单的证明点,用以下内容替换上述摘录代码:
//Generate indexes for those rows
int my_idx = 0;
int j;
for (int i = 0; i < sparse_rows; i++){
//amat_idx[i] = rand() % (sparse_rows - 1);
for (j = 0; j < (nnz_new/sparse_rows); j++)
amat_idx[my_idx++] = j;
}
while (my_idx < nnz_new) amat_idx[my_idx++] = j++;
我相信错误会消失,因为现在的矩阵符合CSR格式预期。