为什么我的cuda C代码单精度不会变得更快?

时间:2016-05-04 04:41:48

标签: c cuda

费米生成GPU的单精度计算应该比双精度快2倍。 但是,虽然我重写了所有声明' double'为了“漂浮”,我没有加快速度。 前面有什么错误吗编译选项等..?

GPU:特斯拉C2075 操作系统:win7 pro 编译:VS2013(NVCC) CUDA:v.7.5 命令行:nvcc test.cu

我写了测试代码:

#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<conio.h>

#include<cuda_runtime.h>
#include<cuda_profiler_api.h> 
#include<device_functions.h>
#include<device_launch_parameters.h>

#define DOUBLE 1

#define MAXI 10

__global__ void Kernel_double(double*a,int nthreadx)
{
    double b=1.e0;
    int i;
    i = blockIdx.x * nthreadx + threadIdx.x + 0;
    a[i] *= b;
}
__global__ void Kernel_float(float*a,int nthreadx)
{
    float b=1.0F;
    int i;
    i = blockIdx.x * nthreadx + threadIdx.x + 0;
    a[i] *= b;
}

int main()
{
#if DOUBLE
    double a[10];
    for(int i=0;i<MAXI;++i){
        a[i]=1.e0;
    }
    double*d_a;
    cudaMalloc((void**)&d_a, sizeof(double)*(MAXI));
    cudaMemcpy(d_a, a, sizeof(double)*(MAXI), cudaMemcpyHostToDevice);
#else
    float a[10];
    for(int i=0;i<MAXI;++i){
        a[i]=1.0F;
    }
    float*d_a;
    cudaMalloc((void**)&d_a, sizeof(float)*(MAXI));
    cudaMemcpy(d_a, a, sizeof(float)*(MAXI), cudaMemcpyHostToDevice);
#endif

    dim3 grid(2, 2, 1);
    dim3 block(2, 2, 1);

    clock_t start_clock, end_clock;
    double sec_clock;

    printf("[%d] start\n", __LINE__);
    start_clock = clock();

    for (int i = 1; i <= 100000; ++i){
#if DOUBLE
        Kernel_double << < grid, block >> > (d_a, 2);
        cudaMemcpy(a, d_a, sizeof(double)*(MAXI), cudaMemcpyDeviceToHost);
#else
        Kernel_float << < grid, block >> > (d_a, 2);
        cudaMemcpy(a, d_a, sizeof(float)*(MAXI), cudaMemcpyDeviceToHost);
#endif
    }

    end_clock = clock();
    sec_clock = (end_clock - start_clock) / (double)CLOCKS_PER_SEC;
    printf("[%d] %f[s]\n", __LINE__, sec_clock);
    printf("[%d] end\n", __LINE__);

    return 0;
}

1 个答案:

答案 0 :(得分:6)

嗯,经过一些调查,那是因为你只是用常数1进行乘法运算,它被优化为二进制中的“什么都不做”:

enter image description here

相反,如果您对数组进行平方(以防止这种简单的优化),则会得到以下程序集:

enter image description here

并且在以下(简化)代码段中恢复了性能提升,其中我更改了一些内容:

  • 大一些阵列(100M)
  • 使用blockDim.x而不是参数参数
  • 为我的机器使用更好的内核配置(GTX 980)
  • 在堆上分配输入数组而不是堆栈(允许超过1M)

这是代码:

#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<conio.h>

#include<cuda_runtime.h>
#include<cuda_profiler_api.h> 
#include<device_functions.h>
#include<device_launch_parameters.h>

#define DOUBLE float

#define ITER 10
#define MAXI 100000000

__global__ void kernel(DOUBLE*a)
{
    for(int i = blockIdx.x * blockDim.x + threadIdx.x ; i < MAXI; i += blockDim.x * gridDim.x) 
    {
        a[i] *= a[i];
    }
}

int main()
{
    DOUBLE* a = (DOUBLE*) malloc(MAXI*sizeof(DOUBLE));
    for(int i=0;i<MAXI;++i)
    {
        a[i]=(DOUBLE)1.0;
    }
    DOUBLE* d_a;
    cudaMalloc((void**)&d_a, sizeof(DOUBLE)*(MAXI));
    cudaMemcpy(d_a, a, sizeof(DOUBLE)*(MAXI), cudaMemcpyHostToDevice);

    clock_t start_clock, end_clock;
    double sec_clock;

    printf("[%d] start\n", __LINE__);
    start_clock = clock();

    for (int i = 1; i <= ITER; ++i){
        kernel <<< 32, 256>>> (d_a);
    }
    cudaDeviceSynchronize();

    end_clock = clock();
    cudaMemcpy(a, d_a, sizeof(DOUBLE)*(MAXI), cudaMemcpyDeviceToHost);
    sec_clock = (end_clock - start_clock) / (double)CLOCKS_PER_SEC;
    printf("[%d] %f/%d[s]\n", __LINE__, sec_clock, CLOCKS_PER_SEC);
    printf("[%d] end\n", __LINE__);

    return 0;
} 

(你会注意到我分配了一个长度为100M的数组来获得可测量的性能。)