CUDA:所有向量元素的级联求和

时间:2017-01-28 19:14:51

标签: c++ algorithm cuda add atomic

我已经在GPU和CPU上为浮点值的大矢量实现了级联加法函数。这只是意味着这个向量shell的所有元素总结为一个结果。 CPU算法非常简单,工作正常,但GPU算法总是与期望的结果相差35200。

算法的最小工作代码和与CPU的比较如下。

输出始终如下:

CPU Time: 22.760059 ms, bandwidth: 3.514929 GB/s

GPU Time (improved): 12.077088 ms, bandwidth: 6.624114 GB/s
- CPU result does not match GPU result in improved atomic add.
   CPU: 10000000.000000, GPU: 10035200.000000, diff:-35200.000000

我用cuda-memcheck检查了它,但在该运行中没有发生错误。我尝试了许多不同的东西,但没有任何工作。如果不是由于float数据类型的不准确,因为我将所有浮点数更改为整数并仍然得到完全相同的结果。

这是我的代码:

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <chrono>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>

void reductionWithCudaImproved(float *result, const float *input);
__global__ void reductionKernelImproved(float *result, const float *input);
void reductionCPU(float *result, const float *input);

#define SIZE 10000000

#define TILE 32

#define ILP 8
#define BLOCK_X_IMPR (TILE / ILP)
#define BLOCK_Y_IMPR 32
#define BLOCK_COUNT_X_IMPR 100

int main()
{
    int i;
    float *input;
    float resultCPU, resultGPU;
    double cpuTime, cpuBandwidth;

    input = (float*)malloc(SIZE * sizeof(float));
    resultCPU = 0.0;
    resultGPU = 0.0;

    srand((int)time(NULL));

    auto start = std::chrono::high_resolution_clock::now();
    auto end = std::chrono::high_resolution_clock::now();

    for (i = 0; i < SIZE; i++)
        input[i] = 1.0;

    start = std::chrono::high_resolution_clock::now();
    reductionCPU(&resultCPU, input);
    end = std::chrono::high_resolution_clock::now();

    std::chrono::duration<double> diff = end - start;
    cpuTime = (diff.count() * 1000);
    cpuBandwidth = (sizeof(float) * SIZE * 2) / (cpuTime * 1000000);
    printf("CPU Time: %f ms, bandwidth: %f GB/s\n\n", cpuTime, cpuBandwidth);

    reductionWithCudaImproved(&resultGPU, input);

    if (resultCPU != resultGPU)
        printf("- CPU result does not match GPU result in improved atomic add. CPU: %f, GPU: %f, diff:%f\n\n", resultCPU, resultGPU, (resultCPU - resultGPU));
    else
        printf("+ CPU result matches GPU result in improved atomic add. CPU: %f, GPU: %f\n\n", resultCPU, resultGPU);

    return 0;
}

void reductionCPU(float *result, const float *input)
{
    for (int i = 0; i < SIZE; i++)
        *result += input[i];
}

__global__ void reductionKernelImproved(float *result, const float *input)
{
    int i;
    int col = (blockDim.x * blockIdx.x + threadIdx.x) * ILP;
    int row = blockDim.y * blockIdx.y + threadIdx.y;
    int index = row * blockDim.x * BLOCK_COUNT_X_IMPR + col;
    __shared__ float interResult;

    if (threadIdx.x == 0 && threadIdx.y == 0)
        interResult = 0.0;

    __syncthreads();

#pragma unroll ILP
    for (i = 0; i < ILP; i++)
    {
        if (index < SIZE)
        {
            atomicAdd(&interResult, input[index]);
            index++;
        }
    }

    __syncthreads();

    if (threadIdx.x == 0 && threadIdx.y == 0)
        atomicAdd(result, interResult);
}

void reductionWithCudaImproved(float *result, const float *input)
{
    dim3 dim_grid, dim_block;

    float *dev_input = 0;
    float *dev_result = 0;
    cudaEvent_t start, stop;
    float elapsed = 0;
    double gpuBandwidth;

    dim_block.x = BLOCK_X_IMPR;
    dim_block.y = BLOCK_Y_IMPR;
    dim_block.z = 1;

    dim_grid.x = BLOCK_COUNT_X_IMPR;
    dim_grid.y = (int)ceil((float)SIZE / (float)(TILE * dim_block.y* BLOCK_COUNT_X_IMPR));
    dim_grid.z = 1;

    cudaSetDevice(0);

    cudaMalloc((void**)&dev_input, SIZE * sizeof(float));
    cudaMalloc((void**)&dev_result, sizeof(float));
    cudaMemcpy(dev_input, input, SIZE * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_result, result, sizeof(float), cudaMemcpyHostToDevice);

    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    cudaEventRecord(start);
    reductionKernelImproved << <dim_grid, dim_block >> >(dev_result, dev_input);

    cudaEventRecord(stop);
    cudaEventSynchronize(stop);

    cudaEventElapsedTime(&elapsed, start, stop);

    gpuBandwidth = (sizeof(float) * SIZE * 2) / (elapsed * 1000000);
    printf("GPU Time (improved): %f ms, bandwidth: %f GB/s\n", elapsed, gpuBandwidth);

    cudaDeviceSynchronize();

    cudaMemcpy(result, dev_result, sizeof(float), cudaMemcpyDeviceToHost);

    cudaFree(dev_input);
    cudaFree(dev_result);

    return;
}

1 个答案:

答案 0 :(得分:3)

我认为你的内核调用中有重叠的索引:

int col = (blockDim.x * blockIdx.x + threadIdx.x) * ILP;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int index = row * blockDim.x * BLOCK_COUNT_X_IMPR + col;

如果我没弄错,你的blockDim.x = 4和BLOCK_COUNT_X_IMPR = 100,所以每一行都会跳400个索引。 但是,你的col可以高达400 * 8。

考虑:

blockIdx = (12, 0)
threadIdx = (3, 0)
=> col = (12*4 + 3) * 8 = 408
   row = 0
   index = 408

blockIdx = (0, 0)
threadIdx = (1, 1)
=> col = (0*4 + 1) * 8 = 8
   row = 1
   index = 1 * 400 + 8 = 408

所以我猜你应该重写索引

// gridDim.x = BLOCK_COUNT_X_IMPR
int index = row * blockDim.x * gridDim.x * ILP + col;