CUDA内核中的线程索引超出范围

时间:2019-05-07 09:51:11

标签: indexing cuda gpu dynamic-memory-allocation

我正在运行一个CUDA内核,该内核似乎超出了索引范围,我不知道为什么。我在cuda-memcheck中收到大小写为8的错误。

我尝试更改每个块的块数和线程数,并且只运行所需迭代的一部分。这是一些有用的信息,以及给出错误的可复制示例:

blockSize:128

numBlocks:512

Nvidia GTX 970

#include <iostream>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <vector>
#include <iterator>
#include <cuda_profiler_api.h>
#include <algorithm>
#include <cmath>
#include <numeric>
#include <stdio.h> 
#include <fstream>



__host__ 
int NchooseK(const int &N, const int &K)
{
    int result = 1;
    for (int i = 1; i <= K; i++)
    {
        result *= N - (K - i);
        result /= i;
    }
    return result;
}


__host__
inline int get_flatten_size(const unsigned int N){
    int sum = 0;
    for(int i=1; i<=N ; i++){
        sum +=i*NchooseK(N,i);
    }
    return sum;
}


__host__
std::vector<int> comb(const int &N, const int &K, const int &length)
//void comb(int N, int K, int length)
{
    int k;
    std::vector<int> vec(K);
    std::vector<int> flatten_vec(0);
    std::string bitmask(K, 1); // K leading 1's
    bitmask.resize(N, 0); // N-K trailing 0's

    for (int j = 0; j < length; j++) {
        k = 0;
        for (int i = 0; i < N; ++i) // [0..N-1] integers
        {
            if (bitmask[i]) {
                //std::cout << i << " ";
                vec[k] = i;
                k++;
            }
            //std::cout << std::endl;
        }
        std::prev_permutation(bitmask.begin(), bitmask.end());
        flatten_vec.insert(flatten_vec.end(), vec.begin(),vec.end());
    }
    return flatten_vec;
}

__host__
void get_matrix_indices(const unsigned int N, int *sub_col, int *sub_size, int *cumulative_size)
{
    int size, itterator = 0;    
    cumulative_size[0] = 0;
    std::vector<int> size_i_columns;
    std::vector<int> all_columns(0);

    for(int i=1; i<=N; i++){
        size = NchooseK(N,i);
        size_i_columns = comb(N,i,size);
        for(int j=0; j<size; j++){
            sub_size[itterator]=i;
            cumulative_size[itterator+1]=cumulative_size[itterator]+i;
            itterator++; 
        }
        all_columns.insert(all_columns.end(),size_i_columns.begin(),size_i_columns.end());
    }   
    //sub_col = &all_columns[0];
    for(int i = 0; i < all_columns.size(); i++) sub_col[i] = all_columns[i];
}



__global__
void comb_ols(const unsigned int M, const unsigned int N, int* sub_col, int *sub_size, int* cumulative_size, const unsigned int numberOfCalculations, const unsigned int max_size){

    int size;   
    int start_index;

    int index = blockIdx.x*blockDim.x+threadIdx.x;
    int stride = blockDim.x*gridDim.x;

    double *sub_matrix = new double[M*(1+max_size)];


    for(int i = index; i < numberOfCalculations; i+=stride){    


        size = sub_size[i];
        start_index = cumulative_size[i];             



            for(int j = 0; j < size; j++){
            for(int k  = 0; k<M; k++){

                sub_matrix[k] = 1;


                                        }       
            }
        }


    delete [] sub_matrix;


}

然后是我们的主要功能:

int main() 
{   

    int N = 17;
    int M = 263;

    const unsigned int regressors = N-1;
    const unsigned int numberOfCalculations = (int) (exp2((double) regressors) - 1);
    const unsigned int size_sub_col = get_flatten_size(regressors);

    int blockSize =128;
    int numBlocks = (numberOfCalculations + blockSize-1)/blockSize;

    std::cout << "\nblockSize :" << blockSize;      
    std::cout << "\nnumBlocks :" << numBlocks;      
    std::cout << "\nblockSize*numBlocks :" << blockSize*numBlocks;      


    std::cout << "\nregressors :" << regressors;        
    std::cout << "\nNumberOfCalculations :" << numberOfCalculations;        
    std::cout << "\nsize_sub_col :" << size_sub_col << '\n' ;       


    int *sub_size, *cumulative_size, *sub_columns;

    cudaMallocManaged(&sub_size, numberOfCalculations*sizeof(int));
    cudaMallocManaged(&cumulative_size, (numberOfCalculations+1)*sizeof(int));
    cudaMallocManaged(&sub_columns, size_sub_col*sizeof(int));

    get_matrix_indices(regressors,sub_columns, sub_size, cumulative_size);

    const unsigned int max_size = N*M;







    cudaProfilerStart();
    comb_ols<<<numBlocks, blockSize>>>(M,N,sub_columns, sub_size, cumulative_size, numberOfCalculations, max_size);
    cudaProfilerStop();


    cudaDeviceSynchronize();








    cudaFree(sub_size);
    cudaFree(cumulative_size);
    cudaFree(sub_columns);

    return 0;
}

我看不到为什么线程会尝试访问非法的内存空间。据我了解,矩阵sub_matrix将在每个线程上初始化一次,然后发生并行for循环。因此,每个线程应具有必要的存储空间。我在GPU上分配的内存过多吗?这里如何处理“ new sub_matrix”?

1 个答案:

答案 0 :(得分:2)

如果我正确地阅读了您的代码,则每个线程都尝试分配M * (1 + M*N)双倍数,即263 *(1 + 263 * 17)=‭1,176,136‬双倍数,或每个线程8.97Mb的堆内存。您启动128 * 512个线程。这意味着您需要588Gb的堆空间才能使内核成功运行。

很明显,您的GPU缺少该数量的内存,并且超出范围的内存访问是由于new调用(您可以检查,顺便说一句)中的失败引起的。

可能我建议您所需的堆内存的大小计算中有些错误。否则,您将对GPU遇到一个极其不现实的问题,并需要其他方法。

请注意,即使您设法重新设计内容以将代码限制为可行的malloc堆内存大小,您仍然极有可能需要在运行内核之前将malloc堆的大小调整为合适的大小。 cudaDeviceSetLimit API可以用于此目的。