如何从cuda中的设备功能调用现有的主机功能

时间:2014-12-03 21:45:23

标签: c++ linux cuda cublas

我见过类似的问题here

但是,我在这里找不到确切的答案,而且是在2012年写的。 我试图在cublasStatus_t cublasSgbmv(...)函数中调用__global__函数,该函数在“cublas_v2.h”中定义。但是,我无法使用动态并行功能。我只有1个source.cu文件。但是,我已经读过我应该以动态的方式编译它,以便它分离设备和主机功能,然后我可以链接这些输出。 有谁知道怎么做,或者是解释它的好来源? 提前致谢

编辑:如果投票不足,请解释原因至少让我了解我的错误?

edit2: 我的具体问题是,我在Source.cu中使用以下代码:

#include <iostream> 
#include <vector> 
#include <cuda.h>
#include <cstdio>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <stdlib.h> //srand(), rand()
#include <time.h>
#include <builtin_types.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#define IDX2C(i ,j , ld ) ((( j )*( ld ))+( i ))
#define HEIGHT 4
#define WIDTH 4
#define V 4
#define KL 2
#define KU 1

#define THREADS_PER_BLOCK 512

#pragma comment(lib, "cublas") 
//#pragma comment(lib, "helper_cuda")
using namespace std;

void create_Matrix(int* matrix, int width, int height){
    int i, len;
    len = height * width;
    srand(time(NULL));
    for (i = 0; i < len; i++){
        matrix[i] = rand() % 10 + 1; //generates number between 1-10
}
}
template <typename T>
void print_vector(T* vector, int len){
    for (int i = 0; i < len; i++)
        cout << vector[i] << " ";
    cout << endl;
}

template <typename T>
void creating_bandedMatrix(T* bandedMatrix, int height, int width, int ku, int kl){
    //fill matrix with zeros at the beginning
    int i, len;
    len = height * width;
    for (i = 0; i < len; i++){
        bandedMatrix[i] = 0; //generates number between 1-10
    }
    srand(time(NULL));

    //filling banded diagonal
    int start, end;
    for (int i = 0; i < height; i++){
        start = i - kl;
        if (start < 0)
            start = 0;
        end = i + ku + 1;
        if (end > width)
            end = width;
        for (int j = start; j < end; j++){
            *(bandedMatrix + (i*width) + j) = (float)(rand() % (10) + 1); //rand() / (T)RAND_MAX;;
    }

    }
}

template <typename T>
void print_matrix(T* matrix, int width, int height){
    int len = width*height;
    cout << "asdsffffff" << endl;
    for (int i = 0; i < len; i++){
        if (!(i%width))
            cout << endl;
        cout << i << ":" <<matrix[i] << " ";
    }
    cout << endl;
}

template <typename T>
void computeMatrixVectorMultiplication(T* bandedMatrix, T* vector2){
    T row_sum = 0;
    T* bandedHostResult = (T*)malloc(WIDTH * sizeof(T));
    for (int i = 0; i < HEIGHT; i++){
        row_sum = 0;
        for (int j = 0; j < WIDTH; j++){
            row_sum += (*(bandedMatrix + i*WIDTH + j)) * vector2[j];
    }
    bandedHostResult[i] = row_sum;
}

//priting the result
cout << "\n\nBanded Host Result...\n";
print_vector(bandedHostResult, WIDTH);
}

template <typename T>
void fillLapackMatrix(T* lapack_matrix, T* bandedMatrix, int kl, int ku, int banded_w, int banded_h, int lapack_w, int lapack_h){
int i, j, lapack_i;
int len = lapack_h * lapack_w;
for (i = 0; i < len; i++){
    lapack_matrix[i] = 0; //generates number between 1-10
}
for (i = 0; i < banded_w; i++){
    for (j = 0; j < banded_h; j++){
        lapack_i = ku + i - j;
        *(lapack_matrix + lapack_i*lapack_w + j) = *(bandedMatrix + i*banded_w + j);
        //lapack_matrix[lapack_i*lapack_w + j] = bandedMatrix[i*bandedMatrix + j];
    }
}


}




__global__ void  device_cublasSgbmv(int m,int n,int kl, int ku,float* alpha, float* A, int lda ,float* B,int ldb,float*R, int ldr, float* beta){

int index = blockIdx.x * blockDim.x + threadIdx.x;
cublasHandle_t handle;
cublasCreate(&handle);
cublasOperation_t trans = CUBLAS_OP_N;

float* dev_x;
cudaMalloc((void**)&dev_x,sizeof(float) * n);

if(index < ldr){
    cublasSgbmv(handle, trans,m, n, kl, ku, alpha, A, m, B+index*n, 1, beta, R+index*n, 1);
    index = 0;
}

}


void fillNormalMatrix(float* B,int h,int w){
for(int i = 0; i < h;i++){
    for(int j = 0; j < w;j++){
        B[i*w + j] = 1;
    }
}
}

int main()
{
cublasStatus_t status;
float *A;

float *x, *y;
float *dev_x, *dev_y;
int incx, incy;
float *dev_A = 0;

float alpha = 1.0f;
float beta = 0.0f;
int matrixSize = WIDTH * HEIGHT;
int i, j;

cublasHandle_t handle;


/* Initialize CUBLAS */

status = cublasCreate(&handle);

if (status != CUBLAS_STATUS_SUCCESS)
{
    fprintf(stderr, "!!!! CUBLAS initialization error\n");
    return EXIT_FAILURE;
}

//Allocate host memory for the matrices
A = (float *)malloc(matrixSize* sizeof(float));


//Allocate memory for host vectors
x = (float *)malloc(WIDTH * sizeof(float));
y = (float*)malloc(WIDTH * sizeof(float));

// Fill the matrices with test data
creating_bandedMatrix(A, WIDTH, HEIGHT, KU, KL);
cout << "Banded Matrix\n";
print_matrix(A, WIDTH, HEIGHT);
//Fill the vectors with random data
for (i = 0; i < WIDTH; i++){
    x[i] = 1;// (float)(rand() % (10) + 1);:
    y[i] = (float)(rand() % (10) + 1);
}
cout << "\nvector x...\n";
print_vector(x, WIDTH);
//cout << "\nvector y...\n";
//print_vector(y, WIDTH);

//Allocate device memory for the matrix
if (cudaMalloc((void **)&dev_A, matrixSize * sizeof(float)) != cudaSuccess)
{
    fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
    return EXIT_FAILURE;
}

//Allocate device memory for vectors
if (cudaMalloc((void**)&dev_x, WIDTH * sizeof(float)) != cudaSuccess){
    fprintf(stderr, "Device Vector Allocation PROBLEM\n");
    return EXIT_FAILURE;
}
if (cudaMalloc((void**)&dev_y, WIDTH * sizeof(float)) != cudaSuccess){
    fprintf(stderr, "Device Vector Allocation PROBLEM\n");
    return EXIT_FAILURE;
}

// Initialize the device vectors with the host vectors
status = cublasSetVector(WIDTH, sizeof(float), x, 1, dev_x, 1);

if (status != CUBLAS_STATUS_SUCCESS)
{
    fprintf(stderr, "!!!! device access error (write x vector)\n");
    return EXIT_FAILURE;
}

status = cublasSetVector(WIDTH, sizeof(float), y, 1, dev_y, 1);

if (status != CUBLAS_STATUS_SUCCESS)
{
    fprintf(stderr, "!!!! device access error (write y vector)\n");
    return EXIT_FAILURE;
}

//initialize matrix with lapack format
int lapack_width = WIDTH > HEIGHT ? HEIGHT : WIDTH;
int lapack_height = KL + KU + 1;
int lapackSize = lapack_height * lapack_width;
float* lapack_matrix = (float*)malloc(lapackSize * sizeof(float));
fillLapackMatrix(lapack_matrix, A, KL, KU, WIDTH, HEIGHT, lapack_width, lapack_height);
cout << "\n\nLAPACK MAtrix\n";
print_matrix(lapack_matrix, lapack_width, lapack_height);
//convert to column column matrix
float* col = (float*)malloc(lapackSize * sizeof(float));
for (i = 0; i < WIDTH; i++){
    for (j = 0; j < HEIGHT; j++){
        col[i + WIDTH*j] = lapack_matrix[WIDTH*i + j];
    }
}

cout << "Lapack Column Based Matrix\n";
print_matrix(col,HEIGHT-1,WIDTH);

//status = cublasSetVector(lapackSize, sizeof(float), A, 1, dev_A, 1);
cublasSetMatrix(HEIGHT, WIDTH, sizeof(float), col, HEIGHT, dev_A, HEIGHT);

cublasOperation_t trans = CUBLAS_OP_N;
incy = incx = 1;
    ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////  Banded Matrix Matrix Multipllicatio   ///////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float* B,*dev_B,*dev_R,*R;
B = (float*)malloc(WIDTH*HEIGHT*sizeof(float));
R = (float*)malloc(WIDTH*HEIGHT*sizeof(float));
fillNormalMatrix(B,WIDTH,HEIGHT);

cudaMalloc((void**)&dev_B,matrixSize*sizeof(*B));
cudaMalloc((void**)&dev_R,matrixSize*sizeof(*R));

cublasSetMatrix(HEIGHT, WIDTH, sizeof(*B), B, HEIGHT, dev_B, HEIGHT);

cout << "Matrix B\n";
print_matrix(B,HEIGHT,WIDTH);
cout << "gfsdf\n";
device_cublasSgbmv<<<1,4>>>(HEIGHT, WIDTH, KL, KU, &alpha, dev_A, WIDTH, dev_B, HEIGHT, dev_R, HEIGHT,&beta);
cout << "after\n";
cublasGetMatrix(HEIGHT,WIDTH, sizeof (*R) ,dev_R ,WIDTH,R,WIDTH);

getchar();
return 0;

}

并将其编译为:

nvcc -gencode=arch=compute_35,code=sm_35 -lcublas -lcudadevrt -O3 Source.cu -o Source.o -dc

g++ Source.o -lcublas -lcudart

然后,我得到以下内容:

In function     `__sti____cudaRegisterAll_48_tmpxft_00001f1e_00000000_6_Source_cpp1_ii_ebe2258a()':
tmpxft_00001f1e_00000000-3_lapack_vector.cudafe1.cpp:(.text.startup+0x575): undefined  reference to  `__cudaRegisterLinkedBinary_48_tmpxft_00001f1e_00000000_6_Source_cpp1_ii_ebe2258a'
collect2: error: ld returned 1 exit status

1 个答案:

答案 0 :(得分:0)

您可以使用以下单个命令编译和链接您现在显示的代码:

nvcc -arch=sm_35 -rdc=true -lcublas -lcublas_device -lcudadevrt -o test Source.cu

您可能会收到一些警告:

nvlink warning : SM Arch ('sm_35') not found in '/usr/local/cuda/bin/..//lib64/libcublas_device.a:maxwell_sgemm.asm.o'
nvlink warning : SM Arch ('sm_35') not found in '/usr/local/cuda/bin/..//lib64/libcublas_device.a:maxwell_sm50_sgemm.o'
nvlink warning : SM Arch ('sm_35') not found in '/usr/local/cuda/bin/..//lib64/libcublas_device.a:maxwell_sm50_ssyrk.o'

可以安全地忽略这些。