Udacity并行编程,未指定的启动失败

时间:2017-01-23 12:48:35

标签: cuda

我正在尝试为Udacity课程并行编程完成作业#3。我遇到了以下CUDA错误:

CUDA error at: student_func.cu:150  
unspecified launch failure cudaGetLastError()

当我启动一个旨在减少数组的内核时会遇到错误。

这是我的cuda代码(copy_f)一切正常:

__global__
void copy_f(const float* const d_src,
            float* const d_dst,
            size_t length) {
  size_t pos = blockIdx.x * blockDim.x + threadIdx.x;
  if (pos >= length) {
    return;
  }
  d_dst[pos] = d_src[pos];
}

__global__
void reduce_min(float* const d_buf,
                float* global_min_value,
                size_t length) {
  size_t pos = blockIdx.x * blockDim.x + threadIdx.x;

  while (length > 1) {
    size_t mid = (length + 1) >> 1;
    if (pos >= 2 * mid) {
      return;
    }
    if (pos >= mid) {
      float min_value = 1.f;
      if (pos < length) {
        min_value = d_buf[pos];
      }
      if (d_buf[pos - mid] > min_value) {
        d_buf[pos - mid] = min_value;
      }
    }
    __syncthreads();
    length = mid;
  }
  if (pos == 0) {
    *global_min_value = d_buf[0];
  }
}

正在使用它的功能:

float *d_buf_f;
unsigned int *d_hist, *d_buf_ui;

void your_histogram_and_prefixsum(const float* const d_logLuminance,
                                  unsigned int* const d_cdf,
                                  float &min_logLum,
                                  float &max_logLum,
                                  const size_t numRows,
                                  const size_t numCols,
                                  const size_t numBins)
{
  // Step 0: Setting kernels and allocating memory
  const size_t length = numRows * numCols;
  const size_t blockCols = 512;
  const size_t gridCols = (length + blockCols - 1) / blockCols;
  const dim3 blockSize(blockCols, 1, 1);
  const dim3 gridSize(gridCols, 1, 1);

  checkCudaErrors(cudaMalloc(&d_buf_f, sizeof(float) * length));
  checkCudaErrors(cudaMalloc(&d_hist, sizeof(unsigned int) * numBins));
  checkCudaErrors(cudaMalloc(&d_buf_ui, sizeof(unsigned int) * numBins));

  // Step 1: Min and max
  copy_f<<<gridSize, blockSize>>>(d_logLuminance, d_buf_f, length);
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
  reduce_min<<<gridSize, blockSize>>>(d_buf_f, &min_logLum, length);
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());

  // Step 5: Releasing memory
  checkCudaErrors(cudaFree(d_buf_f));
  checkCudaErrors(cudaFree(d_hist));
  checkCudaErrors(cudaFree(d_buf_ui));

  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}

1 个答案:

答案 0 :(得分:1)

问题的原因是:

reduce_min<<<gridSize, blockSize>>>(d_buf_f, &min_logLum, length);

&min_logLum是一个裸主机地址,在内核中使用是非法的。这需要由设备存储器中的有效地址替换。

修复此问题后,如果运行单个块,您应该会发现内核只能正常工作。内核存在设计问题需要修复。关于NVIDIA的这个white paper中的还原内核设计有一个非常好的讨论。我建议阅读它。