cudaThreadSynchronise()返回错误代码6

时间:2017-01-23 18:33:22

标签: multithreading parallel-processing cuda reduction

我正在尝试使用Cuda中的并行缩减运行代码来查找数组的最大元素

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>
/* a is the array that holds the values and c is the array used to store the maximum in a block */ 
cudaError_t reduce_max(int *a,int *c,int size);

/*The kernel that performs the reduction */
__global__ void global_max(int *d_c, int * d_a)
{
    int myId=threadIdx.x+blockDim.x*blockIdx.x;
    int tid=threadIdx.x;
    for(int s=(blockDim.x)/2; s>0; s>>1)
    {
        if(tid<s)
        {
            d_a[myId]=max(d_a[myId],d_a[myId+s]);
        }
        __syncthreads();
    }
    if(tid==0)
    {
        d_c[blockIdx.x]=d_a[myId];
    }
}

int main()
{
    const int arraySize = 1024;
    int i;
    int a[arraySize];
    for(i=0;i<arraySize;i++)
    {
        a[i]=i;
    }
    int c[arraySize];
    cudaError_t cudaStatus = reduce_max(a,c,arraySize);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "The required operation failed");
        return 1;
    }
    cudaStatus = cudaThreadExit();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaThreadExit failed!");
        return 1;
    }

    return 0;
}

// Helper function for using CUDA to add vectors in parallel.
cudaError_t reduce_max(int *a,int *c,int size)
{
    int *dev_a = 0;
    int *dev_c = 0;
    /*
    dev_a and dev_c are the arrays on the device 
    */
    cudaError_t cudaStatus;
    const dim3 blockSize(64,1,1);
    const dim3 gridSize(size/blockSize.x,1,1);

    // Choose which GPU to run on, change this on a multi-GPU system.
    cudaStatus = cudaSetDevice(0);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
        goto Error;
    }

    /*Allocating the memory on the device */
    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    /*Copying array from host to device */
    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

    /*Calling the kernel */
   global_max<<<gridSize,blockSize>>>(dev_c, dev_a);

    cudaStatus = cudaThreadSynchronize();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaThreadSynchronize returned error code %d\n", cudaStatus);
        goto Error;
    }

    // Copy output vector from GPU buffer to host memory.
    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(dev_c);
    cudaFree(dev_a);   
    return cudaStatus;
}

但是在执行上面的代码时我收到错误: cudaThreadSynchronize返回错误代码6。

我无法弄清楚问题。

1 个答案:

答案 0 :(得分:2)

您的代码将永远运行。结果你正在超时。

此行已损坏,您的编译器应该发出警告:

for(int s=(blockDim.x)/2; s>0; s>>1)

s>>1不会修改s变量。我非常确定你的意思s>>=1修改了s。在不修改s的情况下,您的循环将永远运行,因此您会遇到内核超时。

请改为:

for(int s=(blockDim.x)/2; s>0; s>>=1)