CUDA Matrix Multiplication写入错误的内存位置

时间:2012-02-11 22:50:31

标签: c matrix cuda matrix-multiplication

我一直试图编写的简单程序的想法是从用户那里获取输入,以查看有多大的矩阵相乘。

dd@cuda-Linux:~/Desktop/multi$ ./program
What is the rowSize of a? 33
What is the colSize of a? 33
What is the rowSize of b? 33
What is the colSize of b? 33
Would you like to write the results to a file?(y or n)
y
Creating the random numbers now
Writing Matrix A to file now...
Writing Matrix B to file now...
Starting it on the device
Writing Matrix C to file now...
Finish

然而,问题在于我的线程计算。我可以去32x32矩阵,它会运行正常,并给我正确的结果。然而,一旦我运行33x33,我得到如下结果:
[Matrix A] x [Matrix B] = [Matrix C](链接到它们而不是将几个巨大的矩阵粘贴到这个帖子中。但是使用矩阵c,你可以看到它的一半开始写错了数字。我的显卡限制为1024个线程,这是一个32x32矩阵。当我运行100x100矩阵时,矩阵C全为0。

令mem_size_X为sizeof(float)* size_X,size_X为矩阵的高度*宽度。现在高度和宽度必须相同,因此32x32。 “block_size”也只是高度。因此,对于32x32矩阵,块大小对应于32 主机代码(启动):

    float* deviceMatrixA;
    float* deviceMatrixB;
    cudaMalloc((void**) &deviceMatrixA, mem_size_A);//allocate mem_size_x on the device. 
    cudaMalloc((void**) &deviceMatrixB, mem_size_B);


    cudaMemcpy(deviceMatrixA, a.elements, mem_size_A, cudaMemcpyHostToDevice);
    cudaMemcpy(deviceMatrixB, b.elements, mem_size_B, cudaMemcpyHostToDevice);



    int size_C = c.rowSize * c.colSize;
    int mem_size_C = sizeof(float) * size_C;
    c.elements = (float*) malloc(mem_size_C);


    float* deviceMatrixC;
    cudaMalloc((void**) &deviceMatrixC, mem_size_C);


    dim3 threads(block_size, block_size);
    dim3 grid(c.colSize / threads.x, c.rowSize / threads.y);



    matrixMul<<< grid, threads,2*block_size*block_size*sizeof(float)>>>(deviceMatrixC, deviceMatrixA, deviceMatrixB, a.colSize, b.colSize, block_size);//sizeof(float)*block_size*block_size
    cudaThreadSynchronize();

内核代码:

// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int wA, int wB,size_t block_size)
{
    int bx = blockIdx.x;
    int by = blockIdx.y;
    int tx = threadIdx.x;
    int ty = threadIdx.y;

    int aBegin = wA * block_size * by;
    int aEnd   = aBegin + wA - 1;
    int aStep  = block_size;

    int bBegin = block_size * bx;

    int bStep  = block_size * wB;
    float Csub=0;

    for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) 
    {
        extern __shared__ float As[];
        extern __shared__ float Bs[];
        extern __shared__ float smem[];

        smem[ty*block_size+tx] = A[a + wA * ty + tx];

        smem[block_size*block_size+ty*block_size+tx]  = B[b + wB * ty + tx];

        __syncthreads();

        for (int k = 0; k < block_size; ++k)
            Csub += smem[ty*block_size+k] * smem[block_size*block_size+k*block_size+tx] ;

        __syncthreads();
    }

    int c = wB * block_size * by + block_size * bx;
    C[c + wB * ty + tx] = Csub;


}

由于

1 个答案:

答案 0 :(得分:3)

正如我在earlier, almost identical question上告诉你的那样,这个矩阵乘法代码只是为了对矩阵进行计算,矩阵的大小是block_size的整数倍。如果你选择block_size = 32,那么它只能用于32x32,64x64,96x96,128x128等。你have done with dynamically allocated shared memory没有改变它。

要验证是否是这种情况,让我们从一个完整的,可编译的repro案例开始,该案例将运行您的内核,检查它是否已执行并将其输出与在主机上完成的简单参考计算进行比较。此代码是您发布的内核,以及启动参数计算的核心。它将从stdin读取一个大小然后运行该案例。如果结果相差超过一定的容差,则会引发断言错误。这是代码,它应该在CUDA 3.0或更高版本上编译,并在任何兼容CUDA的GPU上运行:

#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <cmath>

inline void GPUassert(cudaError_t code, char * file, int line, bool Abort=true)
{
    if (code != 0) {
        fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line);
        if (Abort) exit(code);
    }       
}

#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }

__global__ void matrixMul( float* C, float* A, float* B, int wA, int wB, size_t block_size)
{
    int bx = blockIdx.x;
    int by = blockIdx.y;
    int tx = threadIdx.x;
    int ty = threadIdx.y;

    int aBegin = wA * block_size * by;
    int aEnd   = aBegin + wA - 1;
    int aStep  = block_size;
    int bBegin = block_size * bx;
    int bStep  = block_size * wB;

    float Csub=0.f;
    for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) 
    {
        extern __shared__ float smem[];

        smem[ty*block_size+tx] = A[a + wA * ty + tx];
        smem[block_size*block_size+ty*block_size+tx]  = B[b + wB * ty + tx];

        __syncthreads();

        for (int k = 0; k < block_size; ++k)
            Csub += smem[ty*block_size+k] * smem[block_size*block_size+k*block_size+tx] ;

        __syncthreads();
    }

    int c = wB * block_size * by + block_size * bx;
    C[c + wB * ty + tx] = Csub;
}

inline float frand(){
    return (float)rand()/(float)RAND_MAX;
}

void matmul(float *C, const float *A, const float *B,  int wA, int wB)
{
    for(int k=0; k<wB; k++) {
        for(int j=0; j<wB; j++) {
            float dotp = 0.f;
            for(int i=0; i<wA; i++) {
                dotp += A[j*wA+i] * B[i*wB+k];
            }
            C[j*wB+k] = dotp;
        }
    }
}

int main(int argc, char ** argv)
{
    int val = 128;

    if ( argc == 2 ) {
        val = atoi(argv[1]);
    }

    int m = val, n = val, mn = m*n;
    size_t sz = size_t(mn) * sizeof(float);

    srand(time(NULL));

    float * A = new float[mn], * B = new float[mn], * C= new float[mn];
    float * A_, * B_, * C_;

    for(int i=0; i<mn; i++) {
        A[i] = frand(); B[i] = frand();
    }

    GPUerrchk( cudaMalloc((void **)&A_, sz) );
    GPUerrchk( cudaMalloc((void **)&B_, sz) );
    GPUerrchk( cudaMalloc((void **)&C_, sz) );

    GPUerrchk( cudaMemcpy(A_, A, sz, cudaMemcpyHostToDevice) );
    GPUerrchk( cudaMemcpy(B_, B, sz, cudaMemcpyHostToDevice) );

    // Launch configuration
    // Note that the input matrice sizes *must* be a round
    // multiple of blocksize for this code to work correctly.
    const int blocksize=16;
    const int shmsz = size_t(2*blocksize*blocksize) * sizeof(float);
    dim3 block=dim3(blocksize,blocksize), grid = dim3(m/block.x,m/block.y); 

    matrixMul<<<grid,block,shmsz>>>(C_,A_,B_,m,n,blocksize);
    GPUerrchk( cudaPeekAtLastError() );

    GPUerrchk( cudaMemcpy(C, C_, sz, cudaMemcpyDeviceToHost) );

    // Verfication on host
    float * Cref = new float[mn];
    matmul(Cref,A,B,m,n); 
    const float tol = 5e-5f;
    for(int i=0; i<mn; i++) {
        assert(fabs(C[i]-Cref[i])/C[i] < tol);
    }

    GPUerrchk( cudaThreadExit() ); // CUDA 3.2 compatible

    return 0;
}

现在,让我们运行不同大小的代码。为了验证GPU上的代码没有做错什么,我将使用cuda-memcheck实用程序运行它,它可以检测超出内存访问。所有以下测试均在具有计算能力1.2卡和CUDA 3.2的OS X 10.6计算机上进行,使用blocksize=16

$ nvcc -arch=sm_12 -Xcompiler="-Wall" -Xptxas="-v" -o matmul2 matmul2.cu
ptxas info    : Compiling entry function '_Z9matrixMulPfS_S_iim' for 'sm_12'
ptxas info    : Used 16 registers, 32+16 bytes smem, 4 bytes cmem[1]

让我们尝试一下矩阵小于blocksize的情况

$ cuda-memcheck ./matmul2 4
========= CUDA-MEMCHECK
GPUassert: invalid configuration argument matmul2.cu 101
========= ERROR SUMMARY: 0 errors

这里我们无法使用无效的配置参数错误运行内核。为什么?因此:

    dim3 block=dim3(blocksize,blocksize), grid = dim3(m/block.x,m/block.y); 

m,n < blocksize时会产生0网格大小。

接下来让我们尝试块大小的最小圆整倍数,在本例中为16:

$ cuda-memcheck ./matmul2 16
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors

运行没有错误,或断言失败。我们现在将大小增加到17:

cuda-memcheck ./matmul2 17
========= CUDA-MEMCHECK
GPUassert: unspecified launch failure matmul2.cu 103
========= Invalid __global__ read of size 4
=========     at 0x000001f8 in matrixMul
=========     by thread (0,2,0) in block (0,0)
=========     Address 0x001009c8 is out of bounds
=========
========= ERROR SUMMARY: 1 error

我们越过边界检测到的内存访问和启动失败错误,这是预期的。现在让我们尝试64,96和128:

$ cuda-memcheck ./matmul2 64
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors

$ cuda-memcheck ./matmul2 96
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors

$ cuda-memcheck ./matmul2 128
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors

最后让我们试试129:

$ cuda-memcheck ./matmul2 129
========= CUDA-MEMCHECK
GPUassert: unspecified launch failure matmul2.cu 103
========= Invalid __global__ read of size 4
=========     at 0x000001f8 in matrixMul
=========     by thread (0,1,0) in block (0,0)
=========     Address 0x00120904 is out of bounds
=========
========= ERROR SUMMARY: 1 error

即使你不遵循为什么出现越界错误,你是否至少愿意接受这个代码真正只适用于块大小的倍数矩阵?