使用二维数组时,内存分配是否更有效?

时间:2013-02-05 19:27:46

标签: c optimization cuda

我正在使用具有计算能力1.3 GPU的CUDA实现一个应用程序,该程序包括扫描二维数组以查找发生较小二维数组的位置。到目前为止,两个数组都是使用cudaMallocPitch()分配的,并使用cudaMemcpy2D()进行传输,以满足合并的内存对齐要求。

在第一个优化步骤中,我试图通过共同将数据读取到共享内存来合并对全局内存的内存访问。作为未经优化的代码中的测试(例如,存在不同的分支并且不会合并对全局存储器的存储器访问)我使用cudaMalloc()分配了更大的数组,并发现性能提高了一倍最高为50%。这怎么可能?

2 个答案:

答案 0 :(得分:3)

cudaMallocPitch()确保2-D数组(row-major)中每行的起始地址是2 ^ N的倍数(N是7~10,具体取决于计算能力)。

访问是否更有效取决于不仅取决于数据对齐,还取决于您的计算能力,全局内存访问方式以及有时缓存配置。

此博客解释了早期计算能力错误对齐数据访问的带宽减少,这可能是您的Q的A。

https://developer.nvidia.com/content/how-access-global-memory-efficiently-cuda-cc-kernels

由于性能取决于许多因素,您可能还必须发布设备模块类型和内核代码以允许进一步调查。

答案 1 :(得分:2)

正如kangshiyin已经指出的那样,使用cudaMallocPitch所带来的改进取决于计算能力,并且预计对于旧计算能力更为重要。但是,对于最新的计算功能,内存分配似乎不会导致相关的加速。

下面的代码提供了非音调和音调内存使用之间的性能测试平台。特别地,代码执行三个(非音调或音调)矩阵之间的求和。处理三个矩阵的原因是需要突出显示与计算相比的内存事务,因此要突出显示非基调和基调分配之间的差异。以下是GTX 960卡和GT 920M卡的时间结果。

GTX 960

Non-pitched - Time = 3.242208; Memory = 65320000 bytes
Pitched     - Time = 3.150944; Memory = 65433600 bytes

GT 920M

Non-pitched - Time = 20.496799; Memory = 65320000 bytes
Pitched     - Time = 20.418560; Memory = 65433600 bytes

可以看出,这两种卡的两种实现方式没有太大区别。上述结果还显示由于使用了内存分配而导致的内存占用率增加。

以下是代码:

#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>

#include "Utilities.cuh"
#include "TimingGPU.cuh"

#define BLOCKSIZE_x 16
#define BLOCKSIZE_y 16

/******************/
/* TEST KERNEL 2D */
/******************/
__global__ void test_kernel_2D(float * __restrict__ devPtrA, float * __restrict__ devPtrB, float * __restrict__ devPtrC, const int Nrows, const int Ncols)
{
    int    tidx = blockIdx.x * blockDim.x + threadIdx.x;
    int    tidy = blockIdx.y * blockDim.y + threadIdx.y;

    if ((tidx < Ncols) && (tidy < Nrows)) {
        devPtrA[tidy * Ncols + tidx] = devPtrA[tidy * Ncols + tidx] + devPtrB[tidy * Ncols + tidx] + devPtrC[tidy * Ncols + tidx];
    }
}

/**************************/
/* TEST KERNEL PITCHED 2D */
/**************************/
__global__ void test_kernel_Pitched_2D(float * __restrict__ devPtrA, float * __restrict__ devPtrB, float * __restrict__ devPtrC, const size_t pitchA, const size_t pitchB, const size_t pitchC, const int Nrows, const int Ncols)
{
    int    tidx = blockIdx.x * blockDim.x + threadIdx.x;
    int    tidy = blockIdx.y * blockDim.y + threadIdx.y;

    if ((tidx < Ncols) && (tidy < Nrows))
    {
        float *row_a = (float *)((char*)devPtrA + tidy * pitchA);
        float *row_b = (float *)((char*)devPtrB + tidy * pitchB);
        float *row_c = (float *)((char*)devPtrC + tidy * pitchC);
        row_a[tidx] = row_a[tidx] + row_b[tidx] + row_c[tidx];
    }
}

/********/
/* MAIN */
/********/
int main()
{
    const int Nrows = 7100;
    const int Ncols = 2300;

    TimingGPU timerGPU;

    float *hostPtrA = (float *)malloc(Nrows * Ncols * sizeof(float));
    float *hostPtrB = (float *)malloc(Nrows * Ncols * sizeof(float));
    float *hostPtrC = (float *)malloc(Nrows * Ncols * sizeof(float));
    float *devPtrA, *devPtrPitchedA;
    float *devPtrB, *devPtrPitchedB;
    float *devPtrC, *devPtrPitchedC;
    size_t pitchA, pitchB, pitchC;

    for (int i = 0; i < Nrows; i++)
        for (int j = 0; j < Ncols; j++) {
        hostPtrA[i * Ncols + j] = 1.f;
        hostPtrB[i * Ncols + j] = 2.f;
        hostPtrC[i * Ncols + j] = 3.f;
        //printf("row %i column %i value %f \n", i, j, hostPtr[i][j]);
        }

    // --- 2D non-pitched allocation and host->device memcopy
    gpuErrchk(cudaMalloc(&devPtrA, Nrows * Ncols * sizeof(float)));
    gpuErrchk(cudaMalloc(&devPtrB, Nrows * Ncols * sizeof(float)));
    gpuErrchk(cudaMalloc(&devPtrC, Nrows * Ncols * sizeof(float)));
    gpuErrchk(cudaMemcpy(devPtrA, hostPtrA, Nrows * Ncols * sizeof(float), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(devPtrB, hostPtrB, Nrows * Ncols * sizeof(float), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(devPtrC, hostPtrC, Nrows * Ncols * sizeof(float), cudaMemcpyHostToDevice));

    // --- 2D pitched allocation and host->device memcopy
    gpuErrchk(cudaMallocPitch(&devPtrPitchedA, &pitchA, Ncols * sizeof(float), Nrows));
    gpuErrchk(cudaMallocPitch(&devPtrPitchedB, &pitchB, Ncols * sizeof(float), Nrows));
    gpuErrchk(cudaMallocPitch(&devPtrPitchedC, &pitchC, Ncols * sizeof(float), Nrows));
    gpuErrchk(cudaMemcpy2D(devPtrPitchedA, pitchA, hostPtrA, Ncols * sizeof(float), Ncols*sizeof(float), Nrows, cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy2D(devPtrPitchedB, pitchB, hostPtrB, Ncols * sizeof(float), Ncols*sizeof(float), Nrows, cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy2D(devPtrPitchedC, pitchC, hostPtrC, Ncols * sizeof(float), Ncols*sizeof(float), Nrows, cudaMemcpyHostToDevice));

    dim3 gridSize(iDivUp(Ncols, BLOCKSIZE_x), iDivUp(Nrows, BLOCKSIZE_y));
    dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x);

    timerGPU.StartCounter();
    test_kernel_2D << <gridSize, blockSize >> >(devPtrA, devPtrB, devPtrC, Nrows, Ncols);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
    printf("Non-pitched - Time = %f; Memory = %i bytes \n", timerGPU.GetCounter(), Nrows * Ncols * sizeof(float));

    timerGPU.StartCounter();
    test_kernel_Pitched_2D << <gridSize, blockSize >> >(devPtrPitchedA, devPtrPitchedB, devPtrPitchedC, pitchA, pitchB, pitchC, Nrows, Ncols);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
    printf("Pitched - Time = %f; Memory = %i bytes \n", timerGPU.GetCounter(), Nrows * pitchA);

    //gpuErrchk(cudaMemcpy2D(hostPtr, Ncols * sizeof(float), devPtrPitched, pitch, Ncols * sizeof(float), Nrows, cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(hostPtrA, devPtrA, Nrows * Ncols * sizeof(float), cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(hostPtrB, devPtrB, Nrows * Ncols * sizeof(float), cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(hostPtrC, devPtrC, Nrows * Ncols * sizeof(float), cudaMemcpyDeviceToHost));

    //for (int i = 0; i < Nrows; i++) 
    //  for (int j = 0; j < Ncols; j++) 
    //      printf("row %i column %i value %f \n", i, j, hostPtr[i * Ncols + j]);

    return 0;

}