我在使用CUDA时遇到了麻烦,并将类传递给了内核。我有一些函数可以为GPU上的类分配内存,将其传递并正常工作。但是,还有另一种是行不通的。我注意到只有在使用数组时才会发生这种情况。这是一个例子。
File1.hh
#ifndef PROVA1_HH
#define PROVA1_HH
#include <cstdio>
class cls {
public:
int *x, y;
cls();
void kernel();
};
#endif
File1.cu
#include "Prova1.hh"
__global__ void kernel1(cls* c){
printf("%d\n", c->y);
c->y=2;
printf("%d\n", c->y);
c->x[0]=0; c->x[1]=1;
printf("%d %d\n", c->x[0], c->x[1]);
}
void cls::kernel(){
cls* dev_c; cudaMalloc(&dev_c, sizeof(cls));
cudaMemcpy(dev_c, this, sizeof(cls), cudaMemcpyHostToDevice);
printf("(%d, %d)\n", x[0], x[1]);
kernel1<<<1, 1>>> (dev_c);
cudaDeviceSynchronize();
cudaMemcpy(this, dev_c, sizeof(cls), cudaMemcpyDeviceToHost);
printf("(%d, %d)\n", x[0], x[1]);
}
cls::cls(){
y=3;
x=(int*) malloc(sizeof(int)*2);
x[0]=1; x[1]=2;
}
File.cu
#include<cstdio>
#include "Prova1.hh"
int main(){
cls c=cls();
c.kernel();
return 0;
}
我正在使用:
nvcc -std=c++11 -arch=sm_35 -rdc=true -c -o File1.o File1.cu
nvcc -std=c++11 -arch=sm_35 -rdc=true -g -G -o File.out File1.o File.cu
运行simpy时,输出为:
(1, 2)
3
2
(1, 2)
调试时,我得到:
Starting program:
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/aarch64-linux-gnu/libthread_db.so.1".
[New Thread 0x7fb10eb1e0 (LWP 806)]
(1, 2)
CUDA Exception: Warp Illegal Address
The exception was triggered at PC 0x84fa10
Thread 1 "File.out" received signal CUDA_EXCEPTION_14, Warp Illegal Address.
[Switching focus to CUDA kernel 0, grid 1, block (0,0,0), thread (0,0,0), device 0, sm 0, warp 0, lane 0]
0x000000000084fad0 in kernel1(ciao*)<<<(1,1,1),(1,1,1)>>> ()
你们中有人知道我在犯错误吗?
答案 0 :(得分:1)
您发布的代码有很多问题,但是错误的核心来源是您试图访问内核内部的主机指针(设备上的x
和值也不会被复制)。除非您使用托管内存,否则显然将永远无法工作。
您可以将示例重做为类似以下内容:
#include <cstdio>
class cls {
public:
int *x, y;
__host__ __device__
cls(int *x_, int y_) : x(x_), y(y_) {};
void kernel();
};
__global__ void kernel1(cls* c){
printf("%d\n", c->y);
c->y=2;
printf("%d\n", c->y);
c->x[0]=0; c->x[1]=1;
printf("%d %d\n", c->x[0], c->x[1]);
}
void cls::kernel(){
int* dev_x; cudaMalloc(&dev_x, sizeof(int)*2);
cudaMemcpy(dev_x, x, sizeof(int)*2, cudaMemcpyHostToDevice);
cls h_dev_c(dev_x, y);
cls* dev_c; cudaMalloc(&dev_c, sizeof(cls));
cudaMemcpy(dev_c, &h_dev_c, sizeof(cls), cudaMemcpyHostToDevice);
printf("(%d)\n", y);
printf("(%d, %d)\n", x[0], x[1]);
kernel1<<<1, 1>>> (dev_c);
cudaDeviceSynchronize();
cudaMemcpy(&y, &(dev_c->y), sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(x, dev_x, sizeof(int)*2, cudaMemcpyDeviceToHost);
printf("(%d)\n", y);
printf("(%d, %d)\n", x[0], x[1]);
}
int main(){
int y=3;
int* x=(int*) malloc(sizeof(int)*2);
x[0]=1; x[1]=2;
cls c(x,y);
c.kernel();
return 0;
}
请注意,您必须基本上在主机内存中构建该类的设备副本,然后将其复制到该设备以使其正常工作(这是指针数组或包含指针的结构和类的非常常见的设计模式,尽管出于复杂性和性能方面的原因,几乎从不建议这样做。