我试图将数组表示为cuda中的tex2D ....经过数小时的调试后,我注意到100万个元素中的19个被错误地复制到纹理中,意味着作为二进制数组,我得到了0不过是1。
void evolve_gpu( byte* h_in, byte* h_out)
{
//int SIZE = N * N * N * N * sizeof( float );
cudaEvent_t start, stop;
size_t d_in_pitch;
size_t d_out_pitch;
int len = 1002;
checkCudaErrors( cudaEventCreate(&start) );
checkCudaErrors( cudaEventCreate(&stop) );
// Allocate the device input image array
unsigned char *d_in = NULL;
unsigned char *d_out = NULL;
checkCudaErrors(cudaMallocPitch(&d_in, &d_in_pitch, sizeof(unsigned char)*len, len));
checkCudaErrors(cudaMallocPitch(&d_out, &d_out_pitch, sizeof(unsigned char)*len, len));
// Copy the host input image to the device memory
checkCudaErrors(cudaMemcpy2D(d_in, d_in_pitch, h_in, sizeof(unsigned char)*len
, sizeof(unsigned char)*len, len, cudaMemcpyHostToDevice));
/**************************** TEXTURE CONFIGURATION ******************************/
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_in;
resDesc.res.pitch2D.pitchInBytes = d_in_pitch;
resDesc.res.pitch2D.width = len;
resDesc.res.pitch2D.height = len;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<unsigned char>();
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords=false;
texDesc.addressMode[0]=cudaAddressModeBorder;
texDesc.addressMode[1]=cudaAddressModeBorder;
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
/*********************************************************************************/
checkCudaErrors( cudaEventRecord(start, NULL) );
// Launch the CUDA Kernel
dim3 block = dim3(THREADS_X, THREADS_Y);
dim3 grid = dim3((len+block.x-1)/block.x,(len+block.y-1)/block.y);//25*50
evolve_kernel<<<grid, block>>>( tex, d_out );
//******** kernel<<< number of blocks, number of threads, dynamic memory per block, associated stream >>> *******//
// Copy the device result to the host
checkCudaErrors(cudaMemcpy2D(h_out, d_out_pitch,
d_out, d_out_pitch,
sizeof(unsigned char)*len, len,
cudaMemcpyDeviceToHost));
for(int i=0;i<1002*1002;i++){
if(h_in[i] != h_out[i])
printf("i = %d\n",i);
}
checkCudaErrors( cudaGetLastError() );
checkCudaErrors( cudaEventRecord(stop, NULL) );
checkCudaErrors( cudaEventSynchronize(stop) );
checkCudaErrors( cudaFree(d_in) );
checkCudaErrors( cudaFree(d_out) );
float msec = 0.f;
checkCudaErrors( cudaEventElapsedTime(&msec, start, stop) );
printf("Basic version took: %f ms\n", msec);
}
答案 0 :(得分:2)
我可以在您的代码中看到的一个问题是您的设备 - &gt;主机副本:
checkCudaErrors(cudaMemcpy2D(h_out, d_out_pitch,
d_out, d_out_pitch,
sizeof(unsigned char)*len, len,
cudaMemcpyDeviceToHost));
参考documentation,此cudaMemcpy2D
来电的第二个参数是目的地分配的音高(即h_out
的音高,在此案件)。但是h_out
不太可能指的是分配,即使它不知何故,d_out_pitch
也不太可能给出音调。
虽然您没有显示完整的代码,但假设h_out
和h_in
是类似的分配,那么第二个参数应该更改为{{1}的(无节距)宽度} array:
h_out
我还很好奇当你没有将checkCudaErrors(cudaMemcpy2D(h_out, len*sizeof(unsigned char),
d_out, d_out_pitch,
sizeof(unsigned char)*len, len,
cudaMemcpyDeviceToHost));
的音调传递给d_out
时,你的内核如何在d_out
(一个音调分配)上正常运行:
evolve_kernel<<<grid, block>>>( tex, d_out );
我原本希望看到这样的电话:
evolve_kernel<<<grid, block>>>( tex, d_out, d_out_pitch);
但你没有展示你的内核代码。
这是一个完整的示例,我围绕您展示的代码创建,修复了上述问题,并进行了一些其他更改以构建示例:
$ cat t648.cu
#include <stdio.h>
#include <helper_cuda.h>
#define THREADS_X 16
#define THREADS_Y 16
const int len = 1002;
typedef unsigned char byte;
__global__ void evolve_kernel(cudaTextureObject_t tex, unsigned char *d_out, size_t pitch ){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int idy = threadIdx.y+blockDim.y*blockIdx.y;
if ((idx < len) && (idy < len))
d_out[idy*pitch+idx] = tex2D<unsigned char>(tex, idx, idy);
}
void evolve_gpu( byte* h_in, byte* h_out)
{
//int SIZE = N * N * N * N * sizeof( float );
cudaEvent_t start, stop;
size_t d_in_pitch;
size_t d_out_pitch;
checkCudaErrors( cudaEventCreate(&start) );
checkCudaErrors( cudaEventCreate(&stop) );
// Allocate the device input image array
unsigned char *d_in = NULL;
unsigned char *d_out = NULL;
checkCudaErrors(cudaMallocPitch(&d_in, &d_in_pitch, sizeof(unsigned char)*len, len));
checkCudaErrors(cudaMallocPitch(&d_out, &d_out_pitch, sizeof(unsigned char)*len, len));
// Copy the host input image to the device memory
checkCudaErrors(cudaMemcpy2D(d_in, d_in_pitch, h_in, sizeof(unsigned char)*len
, sizeof(unsigned char)*len, len, cudaMemcpyHostToDevice));
/**************************** TEXTURE CONFIGURATION ******************************/
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_in;
resDesc.res.pitch2D.pitchInBytes = d_in_pitch;
resDesc.res.pitch2D.width = len;
resDesc.res.pitch2D.height = len;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<unsigned char>();
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords=false;
texDesc.addressMode[0]=cudaAddressModeBorder;
texDesc.addressMode[1]=cudaAddressModeBorder;
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
/*********************************************************************************/
checkCudaErrors( cudaEventRecord(start, NULL) );
// Launch the CUDA Kernel
dim3 block = dim3(THREADS_X, THREADS_Y);
dim3 grid = dim3((len+block.x-1)/block.x,(len+block.y-1)/block.y);//25*50
evolve_kernel<<<grid, block>>>( tex, d_out, d_out_pitch );
//******** kernel<<< number of blocks, number of threads, dynamic memory per block, associated stream >>> *******//
// Copy the device result to the host
checkCudaErrors(cudaMemcpy2D(h_out, len*sizeof(unsigned char),
d_out, d_out_pitch,
sizeof(unsigned char)*len, len,
cudaMemcpyDeviceToHost));
for(int i=0;i<1002*1002;i++){
if(h_in[i] != h_out[i])
printf("i = %d\n",i);
}
checkCudaErrors( cudaGetLastError() );
checkCudaErrors( cudaEventRecord(stop, NULL) );
checkCudaErrors( cudaEventSynchronize(stop) );
checkCudaErrors( cudaFree(d_in) );
checkCudaErrors( cudaFree(d_out) );
float msec = 0.f;
checkCudaErrors( cudaEventElapsedTime(&msec, start, stop) );
printf("Basic version took: %f ms\n", msec);
}
int main(){
byte *h_data_in, *h_data_out;
h_data_in = (byte *)malloc(len*len*sizeof(byte));
h_data_out = (byte *)malloc(len*len*sizeof(byte));
for (int i = 0; i < len*len; i++){
h_data_in[i] = 3;
h_data_out[i] = 0;}
evolve_gpu(h_data_in, h_data_out);
return 0;
}
$ nvcc -arch=sm_35 -I/usr/local/cuda/samples/common/inc t648.cu -o t648
$ ./t648
Basic version took: 3.868576 ms
$
它似乎正常工作并通过了您创建的测试。