如何使用Eratosthenes筛子增加生成素数的限制?

时间:2014-07-17 03:01:24

标签: memory-management cuda limit primes

我需要在程序中更改哪些内容才能计算素数的上限?

  • 目前我的算法仅适用于高达8500万的数字。在我看来,应该使用高达30亿的数字。

我正在CUDA编写自己的Eratosthenes筛子实施方案,而且我已经碰壁了。 到目前为止,该算法似乎适用于少数(低于8500万)。

然而,当我尝试计算高达1亿,20亿,30亿的素数时,系统冻结(当它在CUDA设备中计算时),然后几秒钟后,我的linux机器恢复正常(解冻),但CUDA程序崩溃时出现以下错误消息:

CUDA error at prime.cu:129 code=6(cudaErrorLaunchTimeout) "cudaDeviceSynchronize()"

我有一个GTX 780(3 GB)并且我在一个char数组中分配了sieves,所以如果我要计算高达100,000的素数,它将在设备中分配100,000个字节。

我认为GPU可以容纳多达30亿个数字,因为它有3 GB的内存,但是,它只允许我做8500万个顶部(8500万字节= 0.08 GB)

这是我的 prime.cu 代码:

#include <stdio.h>
#include <helper_cuda.h> // checkCudaErrors() - NVIDIA_CUDA-6.0_Samples/common/inc
// #include <cuda.h>
// #include <cuda_runtime_api.h>
// #include <cuda_runtime.h>

typedef unsigned long long int uint64_t;

/******************************************************************************
* kernel that initializes the 1st couple of values in the primes array.
******************************************************************************/
__global__ static void sieveInitCUDA(char* primes)
{
   primes[0] = 1; // value of 1 means the number is NOT prime
   primes[1] = 1; // numbers "0" and "1" are not prime numbers
}

/******************************************************************************
* kernel for sieving the even numbers starting at 4.
******************************************************************************/
__global__ static void sieveEvenNumbersCUDA(char* primes, uint64_t max)
{
   uint64_t index = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 4;
   if (index < max)
      primes[index] = 1;
}

/******************************************************************************
* kernel for finding prime numbers using the sieve of eratosthenes
* - primes: an array of bools. initially all numbers are set to "0".
*           A "0" value means that the number at that index is prime.
* - max: the max size of the primes array
* - maxRoot: the sqrt of max (the other input). we don't wanna make all threads
*   compute this over and over again, so it's being passed in
******************************************************************************/
__global__ static void sieveOfEratosthenesCUDA(char *primes, uint64_t max,
                                               const uint64_t maxRoot)
{
   // get the starting index, sieve only odds starting at 3
   // 3,5,7,9,11,13...
   /* int index = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 3; */

   // apparently the following indexing usage is faster than the one above. Hmm
   int index = blockIdx.x * blockDim.x + threadIdx.x + 3;

   // make sure index won't go out of bounds, also don't start the execution
   // on numbers that are already composite
   if (index < maxRoot && primes[index] == 0)
   {
      // mark off the composite numbers
      for (int j = index * index; j < max; j += index)
      {
         primes[j] = 1;
      }
   }
}

/******************************************************************************
* checkDevice()
******************************************************************************/
__host__ int checkDevice()
{
   // query the Device and decide on the block size
   int devID = 0; // the default device ID
   cudaError_t error;
   cudaDeviceProp deviceProp;
   error = cudaGetDevice(&devID);
   if (error != cudaSuccess)
   {
      printf("CUDA Device not ready or not supported\n");
      printf("%s: cudaGetDevice returned error code %d, line(%d)\n", __FILE__, error, __LINE__);
      exit(EXIT_FAILURE);
   }

   error = cudaGetDeviceProperties(&deviceProp, devID);
   if (deviceProp.computeMode == cudaComputeModeProhibited || error != cudaSuccess)
   {
      printf("CUDA device ComputeMode is prohibited or failed to getDeviceProperties\n");
      return EXIT_FAILURE;
   }

   // Use a larger block size for Fermi and above (see compute capability)
   return (deviceProp.major < 2) ? 16 : 32;
}

/******************************************************************************
* genPrimesOnDevice
* - inputs: limit - the largest prime that should be computed
*           primes - an array of size [limit], initialized to 0
******************************************************************************/
__host__ void genPrimesOnDevice(char* primes, uint64_t max)
{
   int blockSize = checkDevice();
   if (blockSize == EXIT_FAILURE)
      return;

   char* d_Primes = NULL;
   int sizePrimes = sizeof(char) * max;
   uint64_t maxRoot = sqrt(max);

   // allocate the primes on the device and set them to 0
   checkCudaErrors(cudaMalloc(&d_Primes, sizePrimes));
   checkCudaErrors(cudaMemset(d_Primes, 0, sizePrimes));

   // make sure that there are no errors...
   checkCudaErrors(cudaPeekAtLastError());

   // setup the execution configuration
   dim3 dimBlock(blockSize);
   dim3 dimGrid((maxRoot + dimBlock.x) / dimBlock.x);
   dim3 dimGridEvens(((max + dimBlock.x) / dimBlock.x) / 2);

   //////// debug
   #ifdef DEBUG
   printf("dimBlock(%d, %d, %d)\n", dimBlock.x, dimBlock.y, dimBlock.z);
   printf("dimGrid(%d, %d, %d)\n", dimGrid.x, dimGrid.y, dimGrid.z);
   printf("dimGridEvens(%d, %d, %d)\n", dimGridEvens.x, dimGridEvens.y, dimGridEvens.z);
   #endif

   // call the kernel
   // NOTE: no need to synchronize after each kernel
   // http://stackoverflow.com/a/11889641/2261947
   sieveInitCUDA<<<1, 1>>>(d_Primes); // launch a single thread to initialize
   sieveEvenNumbersCUDA<<<dimGridEvens, dimBlock>>>(d_Primes, max);
   sieveOfEratosthenesCUDA<<<dimGrid, dimBlock>>>(d_Primes, max, maxRoot);

   // check for kernel errors
   checkCudaErrors(cudaPeekAtLastError());
   checkCudaErrors(cudaDeviceSynchronize());

   // copy the results back
   checkCudaErrors(cudaMemcpy(primes, d_Primes, sizePrimes, cudaMemcpyDeviceToHost));

   // no memory leaks
   checkCudaErrors(cudaFree(d_Primes));
}

测试此代码:

int main()
{
   int max = 85000000; // 85 million
   char* primes = malloc(max);
   // check that it allocated correctly...
   memset(primes, 0, max);

   genPrimesOnDevice(primes, max);

   // if you wish to display results:
   for (uint64_t i = 0; i < size; i++)
   {
      if (primes[i] == 0) // if the value is '0', then the number is prime
      {
         std::cout << i; // use printf if you are using c
         if ((i + 1) != size)
            std::cout << ", ";
      }
   }

   free(primes);

}

1 个答案:

答案 0 :(得分:2)

此错误:

CUDA error at prime.cu:129 code=6(cudaErrorLaunchTimeout) "cudaDeviceSynchronize()"
除了你的内核花费太长时间之外,

并不一定意味着什么。它不一定是数字限制或计算错误,而是系统对内核允许运行的时间量施加的限制。 Linux和Windows都可以有这样的看门狗定时器。

如果您想在linux案例中解决此问题,请查看this document

你没有提到它,但我认为你的GTX780也是一个(这个)显示器。在这种情况下,默认情况下内核有一个时间限制。如果您可以使用其他设备作为显示器,则重新配置您的设备,让X不使用GTX780,如链接中所述。如果您没有其他GPU用于显示,那么唯一的选择是修改链接文档中指示的交互设置,如果您想运行长时间运行的内核。在这种情况下,键盘/鼠标/显示器将在内核运行时变得无响应。如果您的内核运行时间太长,则可能难以恢复计算机,并且可能需要重新启动硬盘。 (您也可以通过SSH连接到计算机,并终止使用GPU进行CUDA的过程。)