CUDA中大型阵列大小的问题

时间:2012-02-10 19:43:11

标签: c memory cuda

我通过编写点积计算器熟悉CUDA。我想用大数组测试它来进行时序研究,以测试两种不同的方法来收集矢量和。但是,当数组大小超过1024时,我会收到错误。我不太确定问题的来源。该卡是GTX460M,配备1.5GB内存。我正在使用该卡进行显示(这是一台笔记本电脑)。除此之外,我不确定问题可能来自哪里。

这是nvcc编译行:

nvcc D:\Research\CUDA\TestCode\test_dotProduct_1.cu --use_fast_math --gpu-architecture sm_13 --compiler-bindir="D:\Programming\VisualStudio\2010express\VC\bin" --machine 32 -o multi_dot.exe

我似乎也遇到了64位编译问题,但这是另一个问题

以下是大小为1024的数组的输出:
    主机计算:357389824.000000
    DEV PARA CALCULATION:357389824.000000
    DEV SERI CALCULATION:357389824.000000

以下是大小为2048的数组的输出:
    主机计算:2861214720.000000
    DEV PARA CALCULATION:-1。#INF00
    DEV SERI CALCULATION:-1。#INF00

这是我的代码:

 /*Code for a CUDA test project doing a basic dot product with doubles
*
*
*
*/
 #include <stdio.h>
 #include <cuda.h>


 __global__ void GPU_parallelDotProduct(double *array_a, double *array_b, double     *array_c){
     array_c[threadIdx.x] = array_a[threadIdx.x] * array_b[threadIdx.x];
 }

 __global__ void GPU_parallelSumVector(double *vector, double *sum, int base){
    sum[threadIdx.x + blockIdx.x] = vector[blockIdx.x + threadIdx.x * base] +         vector[blockIdx.x + threadIdx.x * base + 1];
 }

__global__ void GPU_serialSumVector(double *vector, double *sum, int dim){
     for(int i = 0; i < dim; ++i){
         sum[0] += vector[i];
     }
}

__host__ void CPU_serialDot(double *first, double *second, double *dot, int dim){
     for(int i=0; i<dim; ++i){
         dot[0] += first[i] * second[i];
     }
 }

__host__ void CPU_serialSetupVector(double *vector, int dim, int incrSize, int start){
     for(int i=0; i<dim; ++i){
         vector[i] = start + i * incrSize;
     }
 }

 int main(){
     //define array size to be used
     //int i,j;
     const int VECTOR_LENGTH = 2048;
           int SUM_BASE      = 2;
           int SUM_ROUNDS    = VECTOR_LENGTH / SUM_BASE;
           int ELEMENT_SIZE  = sizeof(double);
           //   int currentSize   = VECTOR_LENGTH;
     //arrays for dot product
     //host
     double *array_a                  = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
     double *array_b                  = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
     double *dev_dot_product_parallel = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
     double *dev_dot_product_serial   = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
     double  host_dot_product         = 0.0;

     //fill with values
     CPU_serialSetupVector(array_a, VECTOR_LENGTH, 1, 0);
     CPU_serialSetupVector(array_b, VECTOR_LENGTH, 1, 0);
     CPU_serialDot(array_a, array_b, &host_dot_product, VECTOR_LENGTH);

     //device
     double *dev_array_a;
     double *dev_array_b;
     double *dev_array_c;
     double *dev_dot_serial;
     double *dev_dot_parallel;
     //allocate cuda memory
     cudaMalloc((void**)&dev_array_a,      ELEMENT_SIZE * VECTOR_LENGTH);
     cudaMalloc((void**)&dev_array_b,      ELEMENT_SIZE * VECTOR_LENGTH);
     cudaMalloc((void**)&dev_array_c,      ELEMENT_SIZE * VECTOR_LENGTH);
     cudaMalloc((void**)&dev_dot_parallel, ELEMENT_SIZE * VECTOR_LENGTH);
     cudaMalloc((void**)&dev_dot_serial,   ELEMENT_SIZE * VECTOR_LENGTH);


     //copy to from host to device
     cudaMemcpy(dev_array_a, array_a, ELEMENT_SIZE * VECTOR_LENGTH,     cudaMemcpyHostToDevice);
     cudaMemcpy(dev_array_b, array_b, ELEMENT_SIZE * VECTOR_LENGTH,     cudaMemcpyHostToDevice);
     cudaMemcpy(dev_dot_parallel, &dev_dot_product_parallel, ELEMENT_SIZE, cudaMemcpyHostToDevice);
     cudaMemcpy(dev_dot_serial, &dev_dot_product_serial, ELEMENT_SIZE, cudaMemcpyHostToDevice);

     //perform CUDA dot product
     GPU_parallelDotProduct<<<1, VECTOR_LENGTH>>>(dev_array_a, dev_array_b, dev_array_c);

     //condense a second vector in serial to compare speed up of tree condensing
     GPU_serialSumVector<<<1,1>>>(dev_array_c, dev_dot_serial, VECTOR_LENGTH);

     //condense vector (parallel)
     for(int i=SUM_ROUNDS; i>1; i/=SUM_BASE){
         GPU_parallelSumVector<<<1,i>>>(dev_array_c, dev_array_c, SUM_BASE);
     }
     GPU_parallelSumVector<<<1,1>>>(dev_array_c, dev_array_c, SUM_BASE);


     //get computed product back to the machine
     cudaMemcpy(dev_dot_product_parallel, dev_array_c, VECTOR_LENGTH * ELEMENT_SIZE,    cudaMemcpyDeviceToHost);
     cudaMemcpy(dev_dot_product_serial, dev_dot_serial, VECTOR_LENGTH * ELEMENT_SIZE, cudaMemcpyDeviceToHost);

     FILE *output = fopen("test_dotProduct_1.txt", "w");
     fprintf(output, "HOST CALCULATION:     %f \n", host_dot_product);
     fprintf(output, "DEV PARA CALCULATION: %f \n", dev_dot_product_parallel[0]);
     fprintf(output, "DEV SERI CALCULATION: %f \n", dev_dot_product_serial[0]);
     /*
     fprintf(output, "VALUES OF DEV_ARRAY_C VEC: \n");
     for(int i=0; i<VECTOR_LENGTH; ++i){
        fprintf(output, "value %i is: %f \n", i, dev_dot_product_parallel[i]);
     }
     */
     free(array_a);
     free(array_b);
     //free(host_dot_product);
     cudaFree(dev_array_a);
     cudaFree(dev_array_b);
     cudaFree(dev_array_c);
     cudaFree(dev_dot_parallel);
     cudaFree(dev_dot_serial);

    return(0);
}        

1 个答案:

答案 0 :(得分:6)

您的卡块的最大线程数为1024,这就是您收到错误的原因(对于某些旧卡而言为512)。您需要拆分块以使用多个维度(在卡的x,y,z方向上再次限制为1024)或在网格中使用多个块。