我写了一些CUDA代码,在我尝试从代码中获取结果之前,一切看起来都很棒:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <ctime>
#include <iostream>
#define maskSize 3
__constant__ float masks[32*maskSize*maskSize];
__global__ void myConv(float *res, const float* mats, int mSize)
{
extern __shared__ float curr[];
int rSize=maskSize+mSize-1;
int idxmod=(threadIdx.x+maskSize-1) % (mSize+2*maskSize-2); //these two map any value not within (mSize-1,mSize-1) to the boarders for padding.
int idymod=(threadIdx.y+maskSize-1) % (mSize+2*maskSize-2);
if (threadIdx.x < mSize && threadIdx.y < mSize) //put the value of mats in the middle of the curr matrix
curr[(threadIdx.x+ maskSize-1)*(mSize+2*(maskSize-1)) + threadIdx.y + maskSize-1]=mats[mSize*(blockIdx.y*mSize + threadIdx.x) + threadIdx.y];
else //zero padding
if (threadIdx.x < mSize)
curr[threadIdx.x*(mSize+2*(maskSize-1)) +idymod] =0;
else
curr[idxmod*(mSize+2*(maskSize-1)) +threadIdx.y] =0;
__syncthreads();
float tmp=0;
if (threadIdx.x < mSize+maskSize-1 && threadIdx.y < mSize+maskSize-1)
{
#pragma unroll
for (int i=0;i<maskSize;i++)
#pragma unroll
for (int j=0;j<maskSize;j++)
tmp+=curr[(threadIdx.x+i)*(mSize+2*(maskSize-1)) + threadIdx.y+j]*masks[blockIdx.x*maskSize*maskSize +maskSize*i +j];
res[blockIdx.y*rSize*rSize + threadIdx.x*rSize + threadIdx.y]=tmp;
}
}
int main()
{
int MatSize=5;
int bSize=2000;
int maskNum=10;
int resSize=MatSize+maskSize-1;
float* ms;
ms=(float *)malloc(maskSize*maskSize*maskNum*sizeof(float));
float* resPtr=(float *)malloc((MatSize+maskSize-1)*(MatSize+maskSize-1)*bSize*maskNum*sizeof(float));
for (int i=0; i<maskSize;i++)
for (int j=0; j<maskSize; j++)
for (int k=0; k<maskNum; k++)
ms[k*maskSize*maskSize + j*maskSize + i]=(float)(rand() % 1000)/100;
float* inp=(float *)malloc(MatSize*MatSize*bSize*sizeof(float));
for (int i=0; i<MatSize; i++)
for (int j=0; j<MatSize; j++)
for (int k=0;k<bSize;k++)
inp[k*MatSize*MatSize + j*MatSize + i]=(float)(rand() % 500)/100;
float *cudams, *cudaresPtr,*cudainp;
cudaMalloc((void **) &cudams,maskSize*maskSize*maskNum*sizeof(float));
cudaMalloc((void **) &cudaresPtr,(MatSize+maskSize-1)*(MatSize+maskSize-1)*bSize*maskNum*sizeof(float));
cudaMalloc((void **) &cudainp,MatSize*MatSize*bSize*sizeof(float));
cudaMemcpy((void *)cudams,(void *)ms,maskSize*maskSize*maskNum*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy((void *)cudainp,(void *)inp,MatSize*MatSize*bSize*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(masks,(void *)cudams,maskSize*maskSize*maskNum*sizeof(float),0,cudaMemcpyDeviceToDevice);
dim3 threadSize(MatSize+2*(maskSize-1),MatSize+2*(maskSize-1));
dim3 blockSize(1, 1); //for testing purposes. should be dim3 blockSize(maskNum,bSize);
myConv<<<blockSize, threadSize, (MatSize+2*(maskSize-1))*(MatSize+2*(maskSize-1))>>>(cudaresPtr,cudainp,MatSize);
cudaMemcpy((void *)resPtr,(const void *)cudaresPtr,(MatSize+maskSize-1)*(MatSize+maskSize-1)*bSize*maskNum*sizeof(float),cudaMemcpyDeviceToHost);
//The problem is here - They copying won't work!
free(inp);
free(ms);
free(resPtr);
return 0;
}
我把printf放在不同的地方,按照这里的建议使用错误检查,打印错误字符串......找不到任何会导致将指针内容复制回主机的错误。
编辑:memcheck结果:如果我理解正确,则没有错误:
O:\ CudaTst&gt; cuda-memcheck CUDA_TST ========= CUDA-MEMCHECK
花费的时间:0.144000秒错误:无法读取字符串 错误记录 =========错误摘要:0错误
重新运行-l(泄漏) - 0泄漏。
答案 0 :(得分:1)
看起来您(至少)启动内核时动态分配的共享内存不足,无法在内核中发生缓冲区溢出。
每个块的共享内存量在 bytes 中是特定的,所以我怀疑你想要这样的东西:
size_t shmsz = sizeof(float)*size_t((MatSize+2*(maskSize-1))*
(MatSize+2*(maskSize-1));
myConv<<<blockSize, threadSize, shmz)>>>(cudaresPtr,cudainp,MatSize);
除此之外,我将调试留给您。