为什么使用数组操作会破坏值?

时间:2012-02-11 11:59:50

标签: memory corruption cuda

我正在尝试在CUDA上实施Particle Swarm Optimization。我在主机上部分初始化数据数组,然后在CUDA上分配内存并将其复制到那里,然后尝试继续初始化。

问题是,当我试图像这样修改数组元素时

__global__ void kernelInit(
    float* X, 
    size_t pitch, 
    int width, 
    float X_high, 
    float X_low
) {
    // Silly, but pretty reliable way to address array elements
    unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
    int r = tid / width;
    int c = tid % width;
    float* pElement = (float*)((char*)X + r * pitch) + c;
    *pElement = *pElement * (X_high - X_low) - X_low;
    //*pElement = (X_high - X_low) - X_low;
}

它会破坏这些值并将1.#INF00作为数组元素。当我取消注释最后一行*pElement = (X_high - X_low) - X_low;并对前一行进行注释时,它会按预期工作:我得到的值为15.36等等。

我认为问题在于我的内存分配和复制,和/或是否通过对特定的数组元素进行处理。我阅读了关于这两个主题的CUDA手册,但是我无法发现错误:如果我使用数组元素任何,我仍会得到损坏的数组。例如,当*pElement = *pElement * 2初始779616...00000000.00000只是pElement中的浮点数时,[0;1]会给出不合理的大结果,例如main

这是完整的来源。数组的初始化始于f1(源代码的底部),然后kernelInit函数为CUDA工作并启动初始化内核#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> const unsigned f_n = 3; const unsigned n = 2; const unsigned p = 64; typedef struct { unsigned k_max; float c1; float c2; unsigned p; float inertia_factor; float Ef; float X_low[f_n]; float X_high[f_n]; float X_min[n][f_n]; } params_t; typedef void (*kernelWrapperType) ( float *X, float *X_highVec, float *V, float *X_best, float *Y, float *Y_best, float *X_swarmBest, bool &termination, const float &inertia, const params_t *params, const unsigned &f ); typedef float (*twoArgsFuncType) ( float x1, float x2 ); __global__ void kernelInit( float* X, size_t pitch, int width, float X_high, float X_low ) { // Silly, but pretty reliable way to address array elements unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; int r = tid / width; int c = tid % width; float* pElement = (float*)((char*)X + r * pitch) + c; *pElement = *pElement * (X_high - X_low) - X_low; //*pElement = (X_high - X_low) - X_low; } __device__ float kernelF1( float x1, float x2 ) { float y = pow(x1, 2.f) + pow(x2, 2.f); return y; } void f1( float *X, float *X_highVec, float *V, float *X_best, float *Y, float *Y_best, float *X_swarmBest, bool &termination, const float &inertia, const params_t *params, const unsigned &f ) { float *X_d = NULL; float *Y_d = NULL; unsigned length = n * p; const cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>(); size_t pitch; size_t dpitch; cudaError_t err; unsigned width = n; unsigned height = p; err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height); pitch = n * sizeof(float); err = cudaMemcpy2D(X_d, dpitch, X, pitch, width * sizeof(float), height, cudaMemcpyHostToDevice); err = cudaMalloc (&Y_d, sizeof(float) * p); err = cudaMemcpy (Y_d, Y, sizeof(float) * p, cudaMemcpyHostToDevice); dim3 threads; threads.x = 32; dim3 blocks; blocks.x = (length/threads.x) + 1; kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]); err = cudaMemcpy2D(X, pitch, X_d, dpitch, n*sizeof(float), p, cudaMemcpyDeviceToHost); err = cudaFree(X_d); err = cudaMemcpy(Y, Y_d, sizeof(float) * p, cudaMemcpyDeviceToHost); err = cudaFree(Y_d); } float F1( float x1, float x2 ) { float y = pow(x1, 2.f) + pow(x2, 2.f); return y; } /* * Generates random float in [0.0; 1.0] */ float frand(){ return (float)rand()/(float)RAND_MAX; } /* * This is the main routine which declares and initializes the integer vector, moves it to the device, launches kernel * brings the result vector back to host and dumps it on the console. */ int main() { const params_t params = { 100, 0.5, 0.5, p, 0.98, 0.01, {-5.12, -2.048, -5.12}, {5.12, 2.048, 5.12}, {{0, 1, 0}, {0, 1, 0}} }; float X[p][n]; float X_highVec[n]; float V[p][n]; float X_best[p][n]; float Y[p] = {0}; float Y_best[p] = {0}; float X_swarmBest[n]; kernelWrapperType F_wrapper[f_n] = {&f1, &f1, &f1}; twoArgsFuncType F[f_n] = {&F1, &F1, &F1}; for (unsigned f = 0; f < f_n; f++) { printf("Optimizing function #%u\n", f); srand ( time(NULL) ); for (unsigned i = 0; i < p; i++) for (unsigned j = 0; j < n; j++) X[i][j] = X_best[i][j] = frand(); for (int i = 0; i < n; i++) X_highVec[i] = params.X_high[f]; for (unsigned i = 0; i < p; i++) for (unsigned j = 0; j < n; j++) V[i][j] = frand(); for (unsigned i = 0; i < p; i++) Y_best[i] = F[f](X[i][0], X[i][1]); for (unsigned i = 0; i < n; i++) X_swarmBest[i] = params.X_high[f]; float y_swarmBest = F[f](X_highVec[0], X_highVec[1]); bool termination = false; float inertia = 1.; for (unsigned k = 0; k < params.k_max; k++) { F_wrapper[f]((float *)X, X_highVec, (float *)V, (float *)X_best, Y, Y_best, X_swarmBest, termination, inertia, &params, f); } for (unsigned i = 0; i < p; i++) { for (unsigned j = 0; j < n; j++) { printf("%f\t", X[i][j]); } printf("F = %f\n", Y[i]); } getchar(); } }

err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height);
if (err != cudaSuccess) {
    fprintf(stderr, cudaGetErrorString(err));
    exit(1);
}

更新:我尝试添加错误处理,如此

{{1}}
每次API调用后

,但它没有给我任何内容,没有返回(我仍然得到所有结果,程序工作到最后)。

1 个答案:

答案 0 :(得分:3)

这是一个不必要的复杂代码,应该是一个简单的repro案例,但这会立即跳出来:

const unsigned n = 2;
const unsigned p = 64;

unsigned length = n * p

dim3 threads; threads.x = 32;
dim3 blocks; blocks.x = (length/threads.x) + 1;

kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);

因此,您首先计算不正确的块数,然后在内核启动时反转每个网格的块顺序和每个块参数的线程。这可能会导致超出内存访问范围,无论是在GPU内存中输入某些内容还是导致未指定的启动失败,您的错误处理可能无法捕获。有一个名为cuda-memcheck的工具,自CUDA 3.0开始随工具箱一起提供。如果你运行它,它会给你valgrind样式的内存访问违规报告。你应该养成使用它的习惯,如果你还没有这样做的话。

至于无限的价值,那不是预期的吗?您的代码以(0,1)中的值开头,然后执行

X[i] = X[i] * (5.12--5.12) - -5.12

100次,相当于乘以10 ^ 100,然后是

X[i] = X[i] * (2.048--2.048) - -2.048

100次,这相当于乘以4 ^ 100,最后是

X[i] = X[i] * (5.12--5.12) - -5.12

一次。因此,您的结果应该是1E250的数量级,这远大于最大3.4E38,这是IEEE 754单精度中可表示数字的粗略上限。