在这么多循环之后,cuda计算给出了nan

时间:2017-03-10 21:05:57

标签: c++ cuda

这是我第一次与cuda合作。我在NxNxN网格(N = 128)上运行一些涉及cufft和两个简单​​内核的计算。它似乎工作正常,直到4040和4050循环之间的某个时间,我的网格点的值变为nan。在较小的网格上,它可以在失败之前完成更多循环。这让我觉得某处存在内存泄漏。我试过运行cuda-memcheck,但没有返回任何错误。你能发现可能造成这种问题的任何问题吗?我已将代码减少到最低限度,但它仍然很长,我很抱歉。谢谢你的帮助。

#define _USE_MATH_DEFINES
#include <iostream>
#include <math.h>
#include <cstdlib>

#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
using namespace std;

__global__ void Cube (cufftComplex *data, cufftComplex *data3, int n) {

    int i = blockIdx.x * blockDim.x + threadIdx.x;

    if (i<n){
        data3[i].x = pow(data[i].x, 3);
        data3[i].y = 0;
    }
    __syncthreads();
}

__global__ void Spectral (cufftComplex *data, cufftComplex *data3, float *w, float *v, int n) {

    int i = blockIdx.x * blockDim.x + threadIdx.x;   

    if (i<n){
        data[i].x = (w[i] * data[i].x + data3[i].x * v[i]) / n;
        data[i].y = 0;
    }
    __syncthreads();
}

float ran();

int main (int argc, char **argv) {
    float QQ, C;

    float tmax = 5000;
    int N = 128;
    int n = N*N*N;
    float dn = M_PI/8;
    float dt = .075;
    float psi0 = -0.175;
    float r = -0.1;

    tmax *= dt;

    //setup cuda complex arrays
    int mem_size = sizeof(cufftComplex)*n;  
    int float_mem_size = sizeof(float)*n;

    cufftComplex *h_data = (cufftComplex*)malloc(mem_size);
    cufftComplex *d_data;
    cudaMalloc((void**)&d_data, mem_size);

    cufftComplex *h_data3 = (cufftComplex*)malloc(mem_size);
    cufftComplex *d_data3;
    cudaMalloc((void**)&d_data3, mem_size);

    float * h_w = (float*)malloc(float_mem_size);   
    float *d_w;
    cudaMalloc(&d_w, float_mem_size);

    float * h_v = (float*)malloc(float_mem_size);
    float *d_v;
    cudaMalloc(&d_v, float_mem_size);

    for (int i=0; i<n; i++){
    h_data[i].x = psi0 + r * ran();
    h_data[i].y = 0;
    }

    int nx, ny, nz;
    float B = -4 * M_PI * M_PI / ( pow((N*dn),2));
    for (int i=0; i<n; i++){
    nx = (i % N);
        ny = (i / N) % N;
        nz = i / (N * N);

        if (nx > (N / 2)) {
            nx = (N - nx);
        }
        if (ny > (N / 2)) {
            ny = (N - ny);
        }
        if (nz > (N / 2)) {
            nz = (N - nz);
        }

    QQ = B * (pow(nx, 2.0) + pow(ny, 2.0) + pow(nz, 2.0));
    C = -r - 2.0 * QQ - pow(QQ, 2.0);

    h_w[i] = exp(QQ * (1.0 - C) * dt);
    h_v[i] = (h_w[i] - 1.0) / (1.0 - C);

    }

    cudaMemcpy(d_w, h_w, float_mem_size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_v, h_v, float_mem_size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_data, h_data, mem_size, cudaMemcpyHostToDevice);

    cufftHandle plan;
    cufftPlan3d(&plan, N, N, N, CUFFT_C2C); 

    int maxThreads=(N>1024)?1024:N;
    int threadsPerBlock = maxThreads;
    int numBlocks = n/maxThreads;    

    for (float t = 0; t < tmax; t += dt) {

    Cube <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, n);
    cudaDeviceSynchronize();

    cufftExecC2C(plan, d_data3, d_data3, CUFFT_FORWARD);
    cudaDeviceSynchronize();

    cufftExecC2C(plan, d_data, d_data, CUFFT_FORWARD);
    cudaDeviceSynchronize();

    Spectral <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, d_w, d_v, n);
    cudaDeviceSynchronize();

    cufftExecC2C(plan, d_data, d_data, CUFFT_INVERSE);
    cudaDeviceSynchronize();

}

    //check output (should be a number)
    cudaMemcpy(h_data, d_data, mem_size, cudaMemcpyDeviceToHost);
    cout <<h_data[0].x <<endl;

    //clean up
    cufftDestroy(plan);
    cudaFree(d_data);
    cudaFree(d_data3);
    cudaFree(d_w);
    cudaFree(d_v);
    free(h_w);
    free(h_v);
    free(h_data);
    free(h_data3);

    return 0;
}

float ran(){    //random in range [-1,1]
    float u= float (rand())/(RAND_MAX);
    //return round(u);
    return 2*u-1;
}

2 个答案:

答案 0 :(得分:1)

到目前为止,这是我的代码检测。当我在my_assert中启用设备断言时,它表示nan5点处的d_data3输入失败(即它是nan)。这表明在d_data3之前立即调用cufftExecC2C正在生成nan数据。如果输入无效,我相信FFT会产生超出范围的结果。

代码经过检测,允许您转储数据并查看数据。您必须修改dump_data以显示您希望看到的内容。

当我运行下面的代码时,它最终打印出来:

4850.14
4851.14
4852.14
4853.14
4854.14
4855.14
4856.14
4857.14
4858.14
4859.14
4860.14
d_data3 output nan check failed
$

因此nan首先在迭代4860发生,d_data3输入检查没有失败,因此nan由于FFT运算而出现在d_data3中在循环迭代4860.您将需要研究输入和输出数据,看看您是否可以确定原因。 d_data3内核中导致此问题的Cube数据可能会有一些修改。例如,既然你是重复地对数据进行立方体化,那么它在某些时候似乎是否合理,它会超过float范围?

这是我的检测代码:

#include <iostream>
#include <math.h>
#include <cstdlib>

#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <assert.h>
#include <stdio.h>
using namespace std;

__host__ __device__ void my_assert(bool cond){

  //assert(cond);

}

__global__ void Cube (cufftComplex *data, cufftComplex *data3, int n) {

    int i = blockIdx.x * blockDim.x + threadIdx.x;

    if (i<n){
        float temp = data[i].x;
        if (isnan(temp)) {printf("nan1: %d\n", i); my_assert(0);}
        data3[i].x = pow(data[i].x, 3);
        if (isnan(data3[i].x)) {printf("nan2: %d %f\n", i, data[i].x); my_assert(0);}
        data3[i].y = 0;
    }
    __syncthreads();
}

__global__ void Spectral (cufftComplex *data, cufftComplex *data3, float *w, float *v, int n) {

    int i = blockIdx.x * blockDim.x + threadIdx.x;

    if (i<n){
        float temp1 = w[i];
        if (isnan(temp1)) {printf("nan3: %d\n", i); my_assert(0);}
        float temp2 = data[i].x;
        if (isnan(temp2)) {printf("nan4: %d\n", i); my_assert(0);}
        float temp3 = data3[i].x;
        if (isnan(temp3)) {printf("nan5: %d\n", i); my_assert(0);}
        float temp4 = v[i];
        if (isnan(temp4)) {printf("nan6: %d\n", i); my_assert(0);}

        data[i].x = (w[i] * data[i].x + data3[i].x * v[i]) / n;
        if (isnan(data[i].x)) {printf("nan7: %d, %f, %f, %f, %f, %d\n",i, temp1, temp2, temp3, temp4, n); my_assert(0);}
        data[i].y = 0;
    }
    __syncthreads();
}


__global__  void nan_kernel(cufftComplex *d, int len, bool *res){
  int idx=threadIdx.x+blockDim.x*blockIdx.x;
  if (idx < len)
    if (isnan(d[idx].x) || isnan(d[idx].y)) *res = true;
}
bool *d_nan;

bool checknan(cufftComplex *d, int len){
  bool h_nan = false;
  cudaMemcpy(d_nan, &h_nan, sizeof(bool), cudaMemcpyHostToDevice);
  nan_kernel<<<(len/1024)+1, 1024>>>(d, len, d_nan);
  cudaMemcpy(&h_nan, d_nan, sizeof(bool), cudaMemcpyDeviceToHost);
  return h_nan;
}

void dump_data(cufftComplex *d1, cufftComplex *d2, int len)
{
 // add code here to spit out the data however you would like to see it
 // perhaps to a file
 std::cout << "input:         output: " << std::endl;
 for (int i = 0; i < len; i++)
   std::cout << d1[i].x << "," << d1[i].y << "  " << d2[i].x << "," << d2[i].y << std::endl;

};

float ran();

int main (int argc, char **argv) {
    float QQ, C;

    float tmax = 5000;
    int N = 128;
    int n = N*N*N;
    float dn = M_PI/8;
    float dt = .075;
    float psi0 = -0.175;
    float r = -0.1;

    tmax *= dt;

    //setup cuda complex arrays
    int mem_size = sizeof(cufftComplex)*n;
    int float_mem_size = sizeof(float)*n;

    cufftComplex *h_data = (cufftComplex*)malloc(mem_size);
    cufftComplex *d_data;
    cudaMalloc((void**)&d_data, mem_size);

    cufftComplex *h_data3 = (cufftComplex*)malloc(mem_size);
    cufftComplex *d_data3;
    cudaMalloc((void**)&d_data3, mem_size);

    float * h_w = (float*)malloc(float_mem_size);
    float *d_w;
    cudaMalloc(&d_w, float_mem_size);

    float * h_v = (float*)malloc(float_mem_size);
    float *d_v;
    cudaMalloc(&d_v, float_mem_size);

    for (int i=0; i<n; i++){
      h_data[i].x = psi0 + r * ran();
      h_data[i].y = 0;
    }

    int nx, ny, nz;
    float B = -4 * M_PI * M_PI / ( pow((N*dn),2));
    for (int i=0; i<n; i++){
        nx = (i % N);
        ny = (i / N) % N;
        nz = i / (N * N);

        if (nx > (N / 2)) {
            nx = (N - nx);
        }
        if (ny > (N / 2)) {
            ny = (N - ny);
        }
        if (nz > (N / 2)) {
            nz = (N - nz);
        }

        QQ = B * (pow(nx, 2.0) + pow(ny, 2.0) + pow(nz, 2.0));
        C = -r - 2.0 * QQ - pow(QQ, 2.0);

        h_w[i] = exp(QQ * (1.0 - C) * dt);
        h_v[i] = (h_w[i] - 1.0) / (1.0 - C);

    }

    cudaMemcpy(d_w, h_w, float_mem_size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_v, h_v, float_mem_size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_data, h_data, mem_size, cudaMemcpyHostToDevice);

    cufftHandle plan;
    cufftPlan3d(&plan, N, N, N, CUFFT_C2C);

    int maxThreads=(N>1024)?1024:N;
    int threadsPerBlock = maxThreads;
    int numBlocks = n/maxThreads;
    cufftResult res;
    cudaMalloc(&d_nan, sizeof(bool));
    cufftComplex *i3, *o3;
    i3 = (cufftComplex *)malloc(mem_size);
    o3 = (cufftComplex *)malloc(mem_size);
    std::cout << "start loop" << std::endl;
    for (float t = 0; t < tmax; t += dt) {
      std::cout << t/dt << std::endl;
      Cube <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, n);
      cudaDeviceSynchronize();
      cudaMemcpy(i3, d_data3, mem_size, cudaMemcpyDeviceToHost);
      if (checknan(d_data3, n)) {std::cout << "d_data3 input nan check failed" << std::endl; return -1;}
      res = cufftExecC2C(plan, d_data3, d_data3, CUFFT_FORWARD);
      if (res != CUFFT_SUCCESS) {std::cout << "cufft1 error: " << (int)res << " , " << t/dt << std::endl; return 1;}
      cudaDeviceSynchronize();
      if (checknan(d_data3, n)) {std::cout << "d_data3 output nan check failed" << std::endl; cudaMemcpy(o3, d_data3, mem_size, cudaMemcpyDeviceToHost); dump_data(i3, o3, n); return -1;}

      res = cufftExecC2C(plan, d_data, d_data, CUFFT_FORWARD);
      if (res != CUFFT_SUCCESS) {std::cout << "cufft2 error: " << (int)res << " , " << t/dt << std::endl; return 1;}
      cudaDeviceSynchronize();

      Spectral <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, d_w, d_v, n);
      cudaDeviceSynchronize();

      res = cufftExecC2C(plan, d_data, d_data, CUFFT_INVERSE);
      if (res != CUFFT_SUCCESS) {std::cout << "cufft3 error: " << (int)res << " , " << t/dt << std::endl; return 1;}
      cudaDeviceSynchronize();
    }

    //check output (should be a number)
    cudaMemcpy(h_data, d_data, mem_size, cudaMemcpyDeviceToHost);
    cout <<h_data[0].x <<endl;
    cudaError_t cres = cudaGetLastError();
    if (cres != cudaSuccess) std::cout << "cuda error: " << cudaGetErrorString(cres) << std::endl;
    //clean up
    cufftDestroy(plan);
    cudaFree(d_data);
    cudaFree(d_data3);
    cudaFree(d_w);
    cudaFree(d_v);
    free(h_w);
    free(h_v);
    free(h_data);
    free(h_data3);

    return 0;
}

float ran(){    //random in range [-1,1]
    float u= float (rand())/(RAND_MAX);
    //return round(u);
    return 2*u-1;
}

编辑: 在向dump_data添加一些打印输出代码后(见上面的修改),我看到了这一点:

...
4859.14
4860.14
d_data3 output nan check failed
input:         output:
3.37127e+19,0  nan,nan
3.21072e+19,0  nan,nan
2.76453e+19,0  nan,nan
2.13248e+19,0  nan,nan
1.44669e+19,0  nan,nan
8.37214e+18,0  nan,nan
3.93645e+18,0  nan,nan
1.35501e+18,0  nan,nan
2.55741e+17,0  nan,nan
5.96468e+15,0  nan,nan
-1.36656e+16,0  nan,nan
-2.33688e+17,0  nan,nan
-8.37407e+17,0  nan,nan
-1.79915e+18,0  nan,nan
-2.96302e+18,0  nan,nan
-4.11485e+18,0  nan,nan
-5.03876e+18,0  nan,nan
-5.57617e+18,0  nan,nan
-5.65307e+18,0  nan,nan
-5.28957e+18,0  nan,nan
-4.5872e+18,0  nan,nan
-3.68309e+18,0  nan,nan
...

我不是FFT专家,但可能的情况是,如果对填充了大值的大型数组进行FFT,使用float精度,则可能会发生溢出。如果您只需要进行5000次迭代并且在4860处失败,那么如果您将所有数据类型从double更改为float,则可能会到达那里,但我不确定数字意义你在这做什么。

最后,请注意cufftfftw执行非规范化转换。这可能在您的数据集中看似增长的幅度中发挥作用。正如我已经说过的,我不熟悉你试图在这里实现的算法或算法。

答案 1 :(得分:0)

是否有可能在迭代4040周围发生浮动下溢?获取数据的立方体3将导致我检查这种可能性。如果你不小心的话,很容易在float32上螺旋形下溢。你可以在那里扔一张支票,将你的价值限制在某个最小的epsilon以防止这种情况发生。