简单的CUDA向量搜索/索引程序无法正常工作

时间:2015-11-11 01:07:12

标签: c++ cuda

我似乎在尝试写一个CUDA程序时遇到了一些问题。我们的想法是添加两个随机数组,然后记录一个高于某个值的索引。我已经让程序运行正常,然后当我更改阈值并重新编译它时,好像该值没有改变。我有一个sh用于编译,删除原始的可执行文件和.o文件。我是CUDA的新手,所以也许它就像编译问题一样简单,或者我没有正确解锁内存。我在设备和主机上都发布了数组,但我不确定如何释放我在设备上声明的两个浮点数。 任何帮助将不胜感激提前感谢

规格:nvidia gt220在340.93驱动程序CUDA 6.5

Heres kernel.cu

#include <cuda_runtime.h> 
#include <stdio.h>

__device__ void my_push_back(int gg, float *index, int num)
{
int insert_pt = atomicAdd(&dev_count, 1);
  if (insert_pt < num)
  {
  index[insert_pt] = gg;
  }
}

__device__ int dev_count = 0;

__global__ void
vectorAdd(const float *A, const float *B, float *C, float *index, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float dev_value = 1.990;
float dev_value2 = 2.000;

  if (i < numElements)
  {
  C[i] = A[i] + B[i];
  float temp = C[i];
    if(temp > dev_value && temp < dev_value2)
    {
    my_push_back(i, index, numElements);

    }

  }
}


void kernel_wrap(float *h_A, float *h_B, float *h_C, float *h_Index, int numElements)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size = numElements * sizeof(float);
// Print the vector length to be used, and compute its size


//now allocate memory on device GPU
    // Allocate the device input vector A
    float *d_A = NULL;
    err = cudaMalloc((void **)&d_A, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    // Allocate the device input vector B
    float *d_B = NULL;
    err = cudaMalloc((void **)&d_B, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    // Allocate the device output vector C
    float *d_C = NULL;
    err = cudaMalloc((void **)&d_C, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    float *d_Index = NULL;
    err = cudaMalloc((void **)&d_Index, size);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    // Copy the host input vectors A and B in host memory to the device input vectors in
    // device memory
    printf("Copy input data from the host memory to the CUDA device\n");
    err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

    err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

    if (err != cudaSuccess)
    {
    fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
    }

// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_Index, numElements);
err = cudaGetLastError();

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

//Retrieve data from GPU memeory

// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

    printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_Index, d_Index, size, cudaMemcpyDeviceToHost);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

//Free up memeory on GPU

// Free device global memory
err = cudaFree(d_A);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

err = cudaFree(d_B);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

err = cudaFree(d_C);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

 err = cudaFree(d_Index);

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}


// Free host memory



// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice.  It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();

if (err != cudaSuccess)
{
    fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
    exit(EXIT_FAILURE);
}

printf("Done\n");

}

继承人main.cpp

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <cstdlib>
#include <fstream>
#include <vector>
#include <algorithm>
#include <sstream>

extern void kernel_wrap(float *h_A, float *h_B, float *h_C, float *h_Index, int numElements);

int main(int argc, char *argv[]){

int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);

//allocate memeory of vectors

// Allocate the host input vector A
float *h_AA = (float *)malloc(size);

// Allocate the host input vector B
float *h_BB = (float *)malloc(size);

// Allocate the host output vector C
float *h_CC = (float *)malloc(size);

float *h_Indexx = (float *)malloc(size);

// Verify that allocations succeeded
if (h_AA == NULL || h_BB == NULL || h_CC == NULL || h_Indexx == NULL)
{
    fprintf(stderr, "Failed to allocate host vectors!\n");
    exit(EXIT_FAILURE);
}

//create intial values for A and B

// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
    h_AA[i] = rand()/(float)RAND_MAX;
    h_BB[i] = rand()/(float)RAND_MAX;
}

    for (int i = 0; i < numElements; ++i)
{
    h_Indexx[i] = -1;
}



kernel_wrap(h_AA, h_BB, h_CC, h_Indexx, numElements);

for (int i = 0; i < numElements; ++i)
{

int temp = h_Indexx[i];
if(temp > 0 && temp <= numElements)
{
float tomp = h_AA[temp];
float tom = h_BB[temp];
float to = h_CC[temp];
std::cout << "\n@ i = "<< temp << " is  " << tomp << " + " << tom << " = " << to;
}
}
free(h_AA);
free(h_BB);
free(h_CC);
free(h_Indexx);




return 0;
}

以下是我编译的方式:

rm main
rm *.o
g++ -c main.cpp
nvcc -arch=sm_11 -c kernel.cu 
nvcc -o main main.o kernel.o

并且首先使用dev_value @ 1.99进行示例输出:

@ i = 39948是0.998919 + 0.993153 = 1.99207

然后使用dev_value @ 1.98:

@ i = 5485是0.986223 + 0.995066 = 1.98129

@ i = 1348是0.999652 + 0.983039 = 1.98269

@ i = 6921是0.992085 + 0.992336 = 1.98442

@ i = 24666是0.993531 + 0.994337 = 1.98787

@ i = 27882是0.985079 + 0.998244 = 1.98332

@ i = 39948是0.998919 + 0.993153 = 1.99207

@ i = 46811是0.992908 + 0.993858 = 1.98677

@ i = 47066是0.991757 + 0.992284 = 1.98404

然后将dev_value返回到1.99:

@ i = 39948是0.998919 + 0.993153 = 1.99207

@ i = 1348是0.999652 + 0.983039 = 1.98269

@ i = 6921是0.992085 + 0.992336 = 1.98442

@ i = 24666是0.993531 + 0.994337 = 1.98787

@ i = 27882是0.985079 + 0.998244 = 1.98332

@ i = 39948是0.998919 + 0.993153 = 1.99207

@ i = 46811是0.992908 + 0.993858 = 1.98677

@ i = 47066是0.991757 + 0.992284 = 1.98404

不确定发生了什么,重新编译dosn修复问题,通常如果更改变量名称它会再次开始工作。

1 个答案:

答案 0 :(得分:1)

正如Robert Crovella所说:

您永远不会将设备上的索引数组初始化为-1。您在主机端将其设置为-1,但您从不将该数据复制到设备。在内核启动之前添加额外的host-&gt;设备cudaMemcpy操作,将h_Index复制到d_Index,我认为您的问题将得到解决。而且,当我这样做时,所有的initcheck错误都消失了。

即我添加了

err = cudaMemcpy(d_Index, h_Index, size, cudaMemcpyHostToDevice);