一个大内核与多个小内核和内存复制(CUDA)的并发性

时间:2019-07-16 16:15:19

标签: c++ cuda cuda-streams

我正在开发多GPU加速流求解器。目前,我正在尝试实现通讯隐藏。这意味着,在交换数据时,一旦通信完成,GPU将计算网格中不参与通信的部分,并计算网格的其余部分。

我正在尝试通过为长时间运行的内核(computeStream)提供一个流(fluxKernel)和为通信的不同阶段提供一个流(communicationStream)来解决此问题。 computeStream的优先级非常低,以使communicationStream上的内核即使使用所有资源也可以交织fluxKernel

这些是我正在使用的流:

int priority_high, priority_low;
cudaDeviceGetStreamPriorityRange(&priority_low , &priority_high ) ;
cudaStreamCreateWithPriority (&communicationStream, cudaStreamNonBlocking, priority_high );
cudaStreamCreateWithPriority (&computeStream      , cudaStreamNonBlocking, priority_low  );

所需的并发模式如下:

enter image description here

在通过MPI发送数据之前,我需要同步communicationStream,以确保在发送数据之前已完全下载了数据。

在下面的清单中,我显示了我当前正在做的结构。首先,我开始fluxKernel上网格主体的长时间运行computeStream。然后,我启动sendKernel,收集应发送到第二个GPU的数据,然后将其下载到主机(由于硬件限制,我无法使用支持cuda的MPI)。然后,按照MPI_Isend发送非阻塞数据,然后使用阻塞接收(MPI_recv)。接收到数据后,该过程将反向执行。首先将数据上传到设备,然后通过recvKernel传播到主数据结构。最后,fluxKernel被调用communicationStream上网格的其余部分。

请注意,显示的代码内核之前和之后均在默认流上运行。

{ ... } // Preparations

// Start main part of computatation on first stream

fluxKernel<<< ..., ..., 0, computeStream >>>( /* main Part */ );

// Prepare send data

sendKernel<<< ..., ..., 0, communicationStream >>>( ... );

cudaMemcpyAsync ( ..., ..., ..., cudaMemcpyDeviceToHost, communicationStream );
cudaStreamSynchronize( communicationStream );

// MPI Communication

MPI_Isend( ... );
MPI_Recv ( ... );

// Use received data

cudaMemcpyAsync ( ..., ..., ..., cudaMemcpyHostToDevice, communicationStream );

recvKernel<<< ..., ..., 0, communicationStream >>>( ... );

fluxKernel<<< ..., ..., 0, communicationStream >>>( /* remaining Part */ );

{ ... } // Rest of the Computations

我使用nvprof和Visual Profiler来查看流是否实际上并发执行。结果是:

result with on communication

我发现sendKernel(紫色),上载,MPI通信和下载与fluxKernel并发。 recvKernel(红色)仅在其他流结束后才开始。转动同步不能解决问题:

enter image description here

对于我的实际应用程序,我不仅有一种交流,而且有多种交流。我也通过两种通讯方式对此进行了测试。步骤是:

sendKernel<<< ..., ..., 0, communicationStream >>>( ... );
cudaMemcpyAsync ( ..., ..., ..., cudaMemcpyDeviceToHost, communicationStream );
cudaStreamSynchronize( communicationStream );
MPI_Isend( ... );

sendKernel<<< ..., ..., 0, communicationStream >>>( ... );
cudaMemcpyAsync ( ..., ..., ..., cudaMemcpyDeviceToHost, communicationStream );
cudaStreamSynchronize( communicationStream );
MPI_Isend( ... );

MPI_Recv ( ... );
cudaMemcpyAsync ( ..., ..., ..., cudaMemcpyHostToDevice, communicationStream );
recvKernel<<< ..., ..., 0, communicationStream >>>( ... );

MPI_Recv ( ... );
cudaMemcpyAsync ( ..., ..., ..., cudaMemcpyHostToDevice, communicationStream );
recvKernel<<< ..., ..., 0, communicationStream >>>( ... );

结果类似于一次通信(以上),在某种意义上,第二次内核调用(这次是sendKernel)被延迟到computeStream上的内核被延迟为止。完成。

enter image description here

因此,总体观察结果是,第二个内核调用被延迟,而与这是哪个内核无关。

您能解释一下为什么GPU以这种方式进行同步吗,或者我如何才能在communicationStream上获得第二个内核以同时在computeStream上同时运行吗?

非常感谢您。

编辑1:完成问题的重做


最小可复制示例

我建立了一个最小的可复制示例。最后,代码将int数据绘制到终端。正确的最后一个值为32778(=(32 * 1024-1)+1 + 10)。在开始时,我添加了一个选项整数来测试3个不同的选项:

  • 0:预期版本,在修改CPU之前先进行同步
  • 1:与0相同,但没有同步
  • 2:用于memcpys的专用流,并且没有同步化
#include <iostream>

#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>

const int option = 0;

const int numberOfEntities = 2 * 1024 * 1024;
const int smallNumberOfEntities = 32 * 1024;

__global__ void longKernel(float* dataDeviceIn, float* dataDeviceOut, int numberOfEntities)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    if(index >= numberOfEntities) return;

    float tmp = dataDeviceIn[index];

#pragma unroll
    for( int i = 0; i < 2000; i++ ) tmp += 1.0;

    dataDeviceOut[index] = tmp;
}

__global__ void smallKernel_1( int* smallDeviceData, int numberOfEntities )
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    if(index >= numberOfEntities) return;

    smallDeviceData[index] = index;
}

__global__ void smallKernel_2( int* smallDeviceData, int numberOfEntities )
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    if(index >= numberOfEntities) return;

    int value = smallDeviceData[index];

    value += 10;

    smallDeviceData[index] = value;
}


int main(int argc, char **argv)
{
    cudaSetDevice(0);

    float* dataDeviceIn;
    float* dataDeviceOut;

    cudaMalloc( &dataDeviceIn , sizeof(float) * numberOfEntities );
    cudaMalloc( &dataDeviceOut, sizeof(float) * numberOfEntities );

    int* smallDataDevice;
    int* smallDataHost;

    cudaMalloc    ( &smallDataDevice, sizeof(int) * smallNumberOfEntities );
    cudaMallocHost( &smallDataHost  , sizeof(int) * smallNumberOfEntities );

    cudaStream_t streamLong;
    cudaStream_t streamSmall;
    cudaStream_t streamCopy;

    int priority_high, priority_low;
    cudaDeviceGetStreamPriorityRange(&priority_low , &priority_high ) ;
    cudaStreamCreateWithPriority (&streamLong , cudaStreamNonBlocking, priority_low  );
    cudaStreamCreateWithPriority (&streamSmall, cudaStreamNonBlocking, priority_high );
    cudaStreamCreateWithPriority (&streamCopy , cudaStreamNonBlocking, priority_high );

    //////////////////////////////////////////////////////////////////////////

    longKernel <<< numberOfEntities / 32, 32, 0, streamLong >>> (dataDeviceIn, dataDeviceOut, numberOfEntities);

    //////////////////////////////////////////////////////////////////////////

    smallKernel_1 <<< smallNumberOfEntities / 32, 32, 0 , streamSmall >>> (smallDataDevice, smallNumberOfEntities);

    if( option <= 1 ) cudaMemcpyAsync( smallDataHost, smallDataDevice, sizeof(int) * smallNumberOfEntities, cudaMemcpyDeviceToHost, streamSmall );
    if( option == 2 ) cudaMemcpyAsync( smallDataHost, smallDataDevice, sizeof(int) * smallNumberOfEntities, cudaMemcpyDeviceToHost, streamCopy  );

    if( option == 0 ) cudaStreamSynchronize( streamSmall );

    // some CPU modification of data
    for( int i = 0; i < smallNumberOfEntities; i++ ) smallDataHost[i] += 1;

    if( option <= 1 ) cudaMemcpyAsync( smallDataDevice, smallDataHost, sizeof(int) * smallNumberOfEntities, cudaMemcpyHostToDevice, streamSmall );
    if( option == 2 ) cudaMemcpyAsync( smallDataDevice, smallDataHost, sizeof(int) * smallNumberOfEntities, cudaMemcpyHostToDevice, streamCopy  );

    smallKernel_2 <<< smallNumberOfEntities / 32, 32, 0 , streamSmall >>> (smallDataDevice, smallNumberOfEntities);

    //////////////////////////////////////////////////////////////////////////

    cudaDeviceSynchronize();

    cudaMemcpy( smallDataHost, smallDataDevice, sizeof(int) * smallNumberOfEntities, cudaMemcpyDeviceToHost );

    for( int i = 0; i < smallNumberOfEntities; i++ ) std::cout << smallDataHost[i] << "\n";

    return 0;
}

使用代码,我看到与上述相同的行为:

选项0(正确的结果): enter image description here

选项1(错误的重排,从CPU丢失+1): enter image description here

选项2(完全错误的结果,全部10,在smallKernel_1之前下载) enter image description here


解决方案:

在Linux下运行Option 0(根据Roberts Answere的建议),带来了预期的行为! enter image description here

1 个答案:

答案 0 :(得分:1)

这就是我要尝试实现的方式。

  1. 按照您的建议使用高优先级/低优先级流安排。
  2. 只需要2个流
  3. 确保固定主机内存以允许计算/复制重叠
  4. 由于您不打算使用支持cuda的MPI,因此您的MPI事务纯粹是主机活动。因此,我们可以使用流回调将主机活动插入到高优先级流中。
  5. 为了允许高优先级内核轻松地将自身插入低优先级内核,我为高优先级复制内核选择了grid-stride-loop设计策略,但为低优先级选择了非grid-stride-loop设计策略内核。我们希望低优先级内核具有更多的块,以使块始终启动和退出,从而轻松地使GPU块调度程序可以在高优先级块可用时插入它们。
  6. 每个“框架”的工作签发不使用任何类型的同步调用。我每个循环/帧使用一次cudaDeviceSynchronize(),以中断(分离)下一帧的处理。框架内的活动安排完全由CUDA流语义处理,以便为相互依赖的活动强制执行序列化,但允许不相关活动的并发。

下面是实现这些想法的示例代码:

#include <iostream>
#include <unistd.h>
#include <cstdio>

#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

typedef double mt;
const int nTPB = 512;
const size_t ds = 100ULL*1048576;
const size_t bs = 1048576ULL;
const int  my_intensity = 1;
const int loops = 4;
const size_t host_func_delay_us = 100;
const int max_blocks = 320; // chosen based on GPU, could use runtime calls to set this via cudaGetDeviceProperties

template <typename T>
__global__ void fluxKernel(T * __restrict__ d, const size_t n, const int intensity){

  size_t idx = ((size_t)blockDim.x) * blockIdx.x + threadIdx.x;
  if (idx < n){
    T temp = d[idx];
    for (int i = 0; i < intensity; i++)
      temp = sin(temp);  // just some dummy code to simulate "real work"
    d[idx] = temp;
    }
}

template <typename T>
__global__ void sendKernel(const T * __restrict__ d, const size_t n, T * __restrict__ b){

  for (size_t idx = ((size_t)blockDim.x) * blockIdx.x + threadIdx.x; idx < n; idx += ((size_t)blockDim.x)*gridDim.x)
    b[idx] = d[idx];
}

template <typename T>
__global__ void recvKernel(const T * __restrict__ b, const size_t n, T * __restrict__ d){

  for (size_t idx = ((size_t)blockDim.x) * blockIdx.x + threadIdx.x; idx < n; idx += ((size_t)blockDim.x)*gridDim.x)
    d[idx] = b[idx];
}

void CUDART_CB MyCallback(cudaStream_t stream, cudaError_t status, void *data){
    printf("Loop %lu callback\n", (size_t)data);
    usleep(host_func_delay_us); // simulate: this is where non-cuda-aware MPI calls would go, operating on h_buf
}
int main(){

  // get the range of stream priorities for this device
  int priority_high, priority_low;
  cudaDeviceGetStreamPriorityRange(&priority_low, &priority_high);
  // create streams with highest and lowest available priorities
  cudaStream_t st_high, st_low;
  cudaStreamCreateWithPriority(&st_high, cudaStreamNonBlocking, priority_high);
  cudaStreamCreateWithPriority(&st_low, cudaStreamNonBlocking, priority_low);
  // allocations
  mt *h_buf, *d_buf, *d_data;
  cudaMalloc(&d_data, ds*sizeof(d_data[0]));
  cudaMalloc(&d_buf, bs*sizeof(d_buf[0]));
  cudaHostAlloc(&h_buf, bs*sizeof(h_buf[0]), cudaHostAllocDefault);
  cudaCheckErrors("setup error");
  // main processing loop
  for (unsigned long i = 0; i < loops; i++){
    // issue low-priority
    fluxKernel<<<((ds-bs)+nTPB)/nTPB, nTPB,0,st_low>>>(d_data+bs, ds-bs, my_intensity);
    // issue high-priority
    sendKernel<<<max_blocks,nTPB,0,st_high>>>(d_data, bs, d_buf);
    cudaMemcpyAsync(h_buf, d_buf, bs*sizeof(h_buf[0]), cudaMemcpyDeviceToHost, st_high);
    cudaStreamAddCallback(st_high, MyCallback, (void*)i, 0);
    cudaMemcpyAsync(d_buf, h_buf, bs*sizeof(h_buf[0]), cudaMemcpyHostToDevice, st_high);
    recvKernel<<<max_blocks,nTPB,0,st_high>>>(d_buf, bs, d_data);
    fluxKernel<<<((bs)+nTPB)/nTPB, nTPB,0,st_high>>>(d_data, bs, my_intensity);
    cudaDeviceSynchronize();
    cudaCheckErrors("loop error");
    }
  return 0;
}

这是视觉事件探查器时间轴输出(在Linux上,Tesla V100):

visual profiler timeline

请注意,在Windows WDDM上,安排复杂的并发方案可能非常困难。我建议避免这种情况,这个答案并不打算讨论那里的所有挑战。我建议使用linux或Windows TCC GPU来做到这一点。

如果您在计算机上尝试使用此代码,则可能需要调整一些常量以使事情看起来像这样。