cuda理解并发内核执行

时间:2012-09-10 00:51:13

标签: concurrency cuda

我正在尝试理解并发内核执行的工作原理。我写了一个简单的程序来试图理解它。内核将使用2个流填充2D数组。当有1个流,没有并发时,我得到了正确的结果。当我尝试使用2个流,尝试并发时,我得到了错误的结果。我相信它与内存传输有关,因为我不太确定我是否正确或我设置内核的方式。编程指南对我来说不够好。 出于我的目的,我需要Matlab来调用内核。

据我了解,主程序将:

  • 在主机上分配固定内存
  • 分配单个流所需的GPU内存(2个流=主机总内存的一半)
  • 创建流
  • 循环流
  • 使用cudaMemcpyAsync()将单个流的内存从主机复制到设备
  • 为流执行内核
  • 将流的内存复制回主机cudaMemcpyAsync()
    • 我相信我通过根据每个流的数据大小和流编号使用偏移量从每个流所需的位置引用内存来做正确的事。
  • 摧毁溪流
  • 释放记忆

这是我试图使用的代码。

concurrentKernel.cpp

__global__ void concurrentKernel(int const width, 
                                  int const streamIdx,
                                  double *array)
 {
     int thread = (blockIdx.x * blockDim.x) + threadIdx.x;;

     for (int i = 0; i < width; i ++)
     {
        array[thread*width+i] = thread+i*width+1;
//         array[thread*width+i+streamIdx] = thread+i*width+streamIdx*width/2;
     }

 }

concurrentMexFunction.cu

#include <stdio.h>
#include <math.h>
#include "mex.h"

/* Kernel function */
#include "concurrentKernel.cpp"


void mexFunction(int        nlhs,
                 mxArray    *plhs[],
                 int        nrhs,
                 mxArray    *prhs[])
{

    int const numberOfStreams = 2; // set number of streams to use here.
    cudaError_t cudaError;
    int offset;

    int width, height, fullSize, streamSize;
    width = 512;
    height = 512;
    fullSize = height*width;
    streamSize = (int)(fullSize/numberOfStreams);
    mexPrintf("fullSize: %d, streamSize: %d\n",fullSize, streamSize);

    /* Return the populated array */
    double *returnedArray;
    plhs[0] = mxCreateDoubleMatrix(height, width, mxREAL);
    returnedArray = mxGetPr(plhs[0]);

    cudaStream_t stream[numberOfStreams];
    for (int i = 0; i < numberOfStreams; i++)
    {
        cudaStreamCreate(&stream[i]);    
    }

    /* host memory */
    double *hostArray;
    cudaError = cudaMallocHost(&hostArray,sizeof(double)*fullSize);    // full size of array.
    if (cudaError != cudaSuccess) {mexPrintf("hostArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    for (int i = 0; i < height; i++)
    {
        for (int j = 0; j < width; j++)
        {
            hostArray[i*width+j] = -1.0;
        }
    }

    /* device memory */
    double *deviceArray;
    cudaError = cudaMalloc( (void **)&deviceArray,sizeof(double)*streamSize);    // size of array for each stream.
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }


    for (int i = 0; i < numberOfStreams; i++)
    {
        offset = i;//*streamSize;
        mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

        cudaMemcpyAsync(deviceArray, hostArray+offset, sizeof(double)*streamSize, cudaMemcpyHostToDevice, stream[i]);
        if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

        concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray);

        cudaMemcpyAsync(returnedArray+offset, deviceArray, sizeof(double)*streamSize, cudaMemcpyDeviceToHost, stream[i]);
        if (cudaError != cudaSuccess) {mexPrintf("returnedArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

        mexPrintf("returnedArray[offset]: %g, [end]: %g\n",returnedArray[offset/sizeof(double)],returnedArray[(i+1)*streamSize-1]);
    }


    for (int i = 0; i < numberOfStreams; i++)
    {
        cudaStreamDestroy(stream[i]);    
    }

    cudaFree(hostArray);
    cudaFree(deviceArray);

}

当有2个流时,结果是一个零数组,这让我觉得它我的内存有问题。 谁能解释我做错了什么? 如果有人需要帮助从Matlab编译和运行这些,我可以提供命令。

更新

for (int i = 0; i < numberOfStreams; i++)
{
    offset = i*streamSize;
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

    cudaMemcpyAsync(deviceArray, hostArray+offset, sizeof(double)*streamSize, cudaMemcpyHostToDevice, stream[i]);
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray);


}
cudaDeviceSynchronize();


for (int i = 0; i < numberOfStreams; i++)
{
    offset = i*streamSize;
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

    cudaMemcpyAsync(returnedArray+offset, deviceArray, sizeof(double)*streamSize, cudaMemcpyDeviceToHost, stream[i]);
    if (cudaError != cudaSuccess) {mexPrintf("returnedArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    mexPrintf("returnedArray[offset]: %g, [end]: %g\n",returnedArray[offset/sizeof(double)],returnedArray[(i+1)*streamSize-1]);

    cudaStreamDestroy(stream[i]);    
}

1 个答案:

答案 0 :(得分:1)

此外,看起来您正在为不同的并发流重用deviceArray指针。很可能如果当前代码按原样运行,那是因为@Tom提到的错误依赖性导致硬件顺序运行流。每个流应该有一个单独的deviceArray:

/* device memory */
double *deviceArray[numberOfStreams];
for (int i = 0; i < numberOfStreams; i++)
{
    cudaError = cudaMalloc( (void **)&deviceArray[i],sizeof(double)*streamSize);    // size of array for each stream.
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }
}

for (int i = 0; i < numberOfStreams; i++)
{
    offset = i;//*streamSize;
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

    cudaMemcpyAsync(deviceArray[i], hostArray+offset, sizeof(double)*streamSize, cudaMemcpyHostToDevice, stream[i]);
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray[i]); 

    cudaMemcpyAsync(returnedArray+offset, deviceArray[i], sizeof(double)*streamSize,
                    cudaMemcpyDeviceToHost, stream[i]);     
}