通过增加占用来提高内核性能?

时间:2011-10-12 09:15:55

标签: cuda

这是我在GT 440上的内核的Compute Visual Profiler的输出:

  • 内核详细信息:网格大小:[100 1 1],块大小:[256 1 1]
  • 注册比率:0.84375(27648/32768)[每线程35个注册]
  • 共享内存比率:0.336914(16560/49152)[每个5520字节 块]
  • 每个SM的活动块数:3(每个SM的最大活动块数:8)
  • 每个SM的活动线程数:768(每个SM的最大活动线程数:1536)
  • 潜在占用率:0.5(24/48)
  • 占用限制因素:注册

请注意标有粗体的子弹。内核执行时间为121195 us

我通过将一些局部变量移动到共享内存来减少每个线程的一些寄存器。 Compute Visual Profiler输出变为:

  • 内核详细信息:网格大小:[100 1 1],块大小:[256 1 1]
  • 注册比例:1(32768/32668)[每个线程30个注册]
  • 共享内存比率:0.451823(22208/49152)[每块5552字节]
  • 每个SM的活动块数:4(每个SM的最大活动块数:8)
  • 每个SM的活动线程数:1024(每个SM的最大活动线程数:1536)
  • 潜在占用率:0.666667(32/48)
  • 占用限制因素:注册

因此,现在4块在前一版本中的单个SM与3块上同时执行。但是,执行时间为115756 us,几乎相同!为什么?是不是完全独立的块在不同的CUDA核心上执行?

2 个答案:

答案 0 :(得分:14)

您隐含地假设更高的入住率会自动转化为更高的性能。通常情况并非如此。

NVIDIA架构每个MP需要一定数量的活动warp才能隐藏GPU的指令流水线延迟。在您的Fermi卡上,该要求转化为最低入住率约30%。针对比最小值更高的占用率不一定会导致更高的吞吐量,因为延迟瓶颈可能已经转移到GPU的另一部分。您的入门级GPU没有大量的内存带宽,每MP的3个块很可能足以使代码内存带宽受限,在这种情况下,增加块数不会对性能产生任何影响(它甚至可能由于内存控制器争用和缓存未命中而增加)。此外,您说您将变量溢出到共享内存以减少内核的寄存器占用空间。在Fermi上,共享内存只有大约1000 Gb / s的带宽,相比之下,寄存器的带宽大约为8000 Gb / s(请参阅下面的链接,了解显示此内容的微基准测试结果)。因此,您已将变量移至较慢的内存,这可能会对性能产生负面影响,抵消高占用率所带来的任何好处。

如果您还没有看过,我强烈推荐Vasily Volkov在GTC 2010上的演讲“在较低入住率时表现更佳”(pdf)。下面是如何利用指令级并行性将GPU吞吐量提高到非常非常低的占用率。

答案 1 :(得分:2)

talonmies已经回答了你的问题,所以我只想分享一个受上面答案中提到的V. Volkov的第一部分启发的代码。

这是代码:

#include<stdio.h>

#define N_ITERATIONS 8192

//#define DEBUG

/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
    if (code != cudaSuccess) 
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

/********************************************************/
/* KERNEL0 - NO INSTRUCTION LEVEL PARALLELISM (ILP = 0) */
/********************************************************/
__global__ void kernel0(int *d_a, int *d_b, int *d_c, unsigned int N) {

    const int tid = threadIdx.x + blockIdx.x * blockDim.x ;

    if (tid < N) {

        int a = d_a[tid];
        int b = d_b[tid];
        int c = d_c[tid];

        for(unsigned int i = 0; i < N_ITERATIONS; i++) {
            a = a * b + c;
        }

        d_a[tid] = a;
    }

}

/*****************************************************/
/* KERNEL1 - INSTRUCTION LEVEL PARALLELISM (ILP = 2) */
/*****************************************************/
__global__ void kernel1(int *d_a, int *d_b, int *d_c, unsigned int N) {

    const int tid = threadIdx.x + blockIdx.x * blockDim.x;

    if (tid < N/2) {

        int a1 = d_a[tid];
        int b1 = d_b[tid];
        int c1 = d_c[tid];

        int a2 = d_a[tid+N/2];
        int b2 = d_b[tid+N/2];
        int c2 = d_c[tid+N/2];

        for(unsigned int i = 0; i < N_ITERATIONS; i++) {
            a1 = a1 * b1 + c1;
            a2 = a2 * b2 + c2;
        }

        d_a[tid]        = a1;
        d_a[tid+N/2]    = a2;
    }

}

/*****************************************************/
/* KERNEL2 - INSTRUCTION LEVEL PARALLELISM (ILP = 4) */
/*****************************************************/
__global__ void kernel2(int *d_a, int *d_b, int *d_c, unsigned int N) {

    const int tid = threadIdx.x + blockIdx.x * blockDim.x;

    if (tid < N/4) {

        int a1 = d_a[tid];
        int b1 = d_b[tid];
        int c1 = d_c[tid];

        int a2 = d_a[tid+N/4];
        int b2 = d_b[tid+N/4];
        int c2 = d_c[tid+N/4];

        int a3 = d_a[tid+N/2];
        int b3 = d_b[tid+N/2];
        int c3 = d_c[tid+N/2];

        int a4 = d_a[tid+3*N/4];
        int b4 = d_b[tid+3*N/4];
        int c4 = d_c[tid+3*N/4];

        for(unsigned int i = 0; i < N_ITERATIONS; i++) {
            a1 = a1 * b1 + c1;
            a2 = a2 * b2 + c2;
            a3 = a3 * b3 + c3;
            a4 = a4 * b4 + c4;
        }

        d_a[tid]        = a1;
        d_a[tid+N/4]    = a2;
        d_a[tid+N/2]    = a3;
        d_a[tid+3*N/4]  = a4;
    }

}

/********/
/* MAIN */
/********/
void main() {

    const int N = 1024;

    int *h_a                = (int*)malloc(N*sizeof(int));
    int *h_a_result_host    = (int*)malloc(N*sizeof(int));
    int *h_a_result_device  = (int*)malloc(N*sizeof(int));
    int *h_b                = (int*)malloc(N*sizeof(int));
    int *h_c                = (int*)malloc(N*sizeof(int));

    for (int i=0; i<N; i++) {
        h_a[i] = 2;
        h_b[i] = 1;
        h_c[i] = 2;
        h_a_result_host[i] = h_a[i];
        for(unsigned int k = 0; k < N_ITERATIONS; k++) {
            h_a_result_host[i] = h_a_result_host[i] * h_b[i] + h_c[i];
        }
    }

    int *d_a; gpuErrchk(cudaMalloc((void**)&d_a, N*sizeof(int)));
    int *d_b; gpuErrchk(cudaMalloc((void**)&d_b, N*sizeof(int)));
    int *d_c; gpuErrchk(cudaMalloc((void**)&d_c, N*sizeof(int)));

    gpuErrchk(cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_b, h_b, N*sizeof(int), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_c, h_c, N*sizeof(int), cudaMemcpyHostToDevice));

    // --- Creating events for timing
    float time;
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    /***********/
    /* KERNEL0 */
    /***********/
    cudaEventRecord(start, 0);
    kernel0<<<1, N>>>(d_a, d_b, d_c, N);
#ifdef DEBUG
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
#endif
    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("GFlops = %f\n", (1.e-6)*(float)(N*N_ITERATIONS)/time);
    gpuErrchk(cudaMemcpy(h_a_result_device, d_a, N*sizeof(int), cudaMemcpyDeviceToHost));
    for (int i=0; i<N; i++) if(h_a_result_device[i] != h_a_result_host[i]) { printf("Error at i=%i! Host = %i; Device = %i\n", i, h_a_result_host[i], h_a_result_device[i]); return; }

    /***********/
    /* KERNEL1 */
    /***********/
    gpuErrchk(cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice));
    cudaEventRecord(start, 0);
    kernel1<<<1, N/2>>>(d_a, d_b, d_c, N);
#ifdef DEBUG
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
#endif
    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("GFlops = %f\n", (1.e-6)*(float)(N*N_ITERATIONS)/time);
    gpuErrchk(cudaMemcpy(h_a_result_device, d_a, N*sizeof(int), cudaMemcpyDeviceToHost));
    for (int i=0; i<N; i++) if(h_a_result_device[i] != h_a_result_host[i]) { printf("Error at i=%i! Host = %i; Device = %i\n", i, h_a_result_host[i], h_a_result_device[i]); return; }

    /***********/
    /* KERNEL2 */
    /***********/
    gpuErrchk(cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice));
    cudaEventRecord(start, 0);
    kernel2<<<1, N/4>>>(d_a, d_b, d_c, N);
#ifdef DEBUG
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
#endif
    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("GFlops = %f\n", (1.e-6)*(float)(N*N_ITERATIONS)/time);
    gpuErrchk(cudaMemcpy(h_a_result_device, d_a, N*sizeof(int), cudaMemcpyDeviceToHost));
    for (int i=0; i<N; i++) if(h_a_result_device[i] != h_a_result_host[i]) { printf("Error at i=%i! Host = %i; Device = %i\n", i, h_a_result_host[i], h_a_result_device[i]); return; }

    cudaDeviceReset();

}

在我的GeForce GT540M上,结果是

kernel0   GFlops = 21.069281    Occupancy = 66%
kernel1   GFlops = 21.183354    Occupancy = 33%
kernel2   GFlops = 21.224517    Occupancy = 16.7%

这意味着如果利用指令级并行(ILP),占用率较低的内核仍然可以表现出高性能。