我正在编写一个数值积分程序,它实现了具有自适应步长的梯形规则。在没有过多细节的情况下,该算法使用递归来计算具有指定相对容差的给定间隔中的编码数学函数的积分。 我已经简化了发布的代码,但保留了所有必要的要点,因此某些部分可能看起来不必要或过于复杂。这是:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#include <iostream>
#include <iomanip>
class Integral {
public:
double value; // the value of the integral
__device__ __host__ Integral() : value(0) {};
__device__ __host__ Integral& operator+=(Integral &I);
};
__device__ Integral trapezoid(double a, double b, double tolerance, double fa, double fb);
__device__ __host__ double f(double x); // the integrand function
const int BLOCKS = 1;
const int THREADS = 1;
__global__ void controller(Integral *blockIntegrals, double a, double b, double tolerance) {
extern __shared__ Integral threadIntegrals[]; // an array of thread-local integrals
double fa = f(a), fb = f(b);
threadIntegrals[threadIdx.x] += trapezoid(a, b, tolerance, fa, fb);
blockIntegrals[blockIdx.x] += threadIntegrals[0];
}
int main() {
// *************** Input parameters ***************
double a = 1, b = 800; // integration bounds
double tolerance = 1e-7;
// ************************************************
cudaError cudaStatus;
Integral blockIntegrals[BLOCKS]; // an array of total integrals computed by each block
Integral *devBlockIntegrals;
cudaStatus = cudaMalloc((void**)&devBlockIntegrals, BLOCKS * sizeof(Integral));
if (cudaStatus != cudaSuccess)
std::cout << "cudaMalloc failed!\n";
double estimate = 0; // a rough 10-point estimate of the whole integral
double h = (b - a) / 10;
for (int i = 0; i < 10; i++)
estimate += f(a + i*h);
estimate *= h;
tolerance *= estimate; // compute relative tolerance
controller<<<BLOCKS, THREADS, THREADS*sizeof(Integral)>>>(devBlockIntegrals, a, b, tolerance);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
std::cout << "addKernel launch failed: " << cudaGetErrorString(cudaStatus) << "\n";
cudaStatus = cudaMemcpy(blockIntegrals, devBlockIntegrals, BLOCKS * sizeof(Integral), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
std::cout << "cudaMemcpy failed: " << cudaGetErrorString(cudaStatus) << "\n";
Integral result; // final result
for (int i = 0; i < BLOCKS; i++) // final reduction that sums the results of all blocks
result += blockIntegrals[i];
std::cout << "Integral = " << std::setprecision(15) << result.value;
cudaFree(devBlockIntegrals);
getchar();
return 0;
}
__device__ double f(double x) {
return log(x);
}
__device__ Integral trapezoid(double a, double b, double tolerance, double fa, double fb) {
double h = b - a; // compute the new step
double I1 = h*(fa + fb) / 2; // compute the first integral
double m = (a + b) / 2; // find the middle point
double fm = f(m); // function value at the middle point
h = h / 2; // make step two times smaller
double I2 = h*(0.5*fa + fm + 0.5*fb); // compute the second integral
Integral I;
if (abs(I2 - I1) <= tolerance) { // if tolerance is satisfied
I.value = I2;
}
else { // if tolerance is not satisfied
if (tolerance > 1e-15) // check that we are not requiring too high precision
tolerance /= 2; // request higher precision in every half
I += trapezoid(a, m, tolerance, fa, fm); // integrate the first half [a m]
I += trapezoid(m, b, tolerance, fm, fb); // integrate the second half [m b]
}
return I;
}
__device__ Integral& Integral::operator+=(Integral &I) {
this->value += I.value;
return *this;
}
为简单起见,我在这里只使用一个线程。 现在,如果我运行此代码,我收到一条消息“cudaMemcpy失败:遇到非法内存访问”。当我运行“cuda-memcheck”时,我收到了这个错误:
========= Invalid __local__ write of size 4
========= at 0x00000b18 in C:/Users/User/Desktop/Integrator Stack/Integrator_GPU/kernel.cu:73:controller(Integral*, double, double, double)
========= by thread (0,0,0) in block (0,0,0)
========= Address 0x00fff8ac is out of bounds
它表示问题在于第73行只是
double m = (a + b) / 2;
在这一点上,我的内存耗尽了吗?
如果我通过在b = 800
中将b = 700
的右边界限更改为main
来缩小积分间隔,程序就可以正常工作并提供正确的结果。
为什么在创建新变量时会出现非法内存访问错误?
另外,我有一个相同的这个程序的CPU版本,它运行完美,所以计算算法很可能是正确的。
答案 0 :(得分:1)
在这一点上,我的内存耗尽了吗?
不完全是。随着递归深度的增加,我猜你的调用堆栈空间已经用完了。运行时为每个线程调用堆栈分配预设的默认值,通常大约为1kb(尽管它可能因硬件和CUDA版本而异)。如果该函数的递归深度大于16,我认为不需要花费很长时间。
您可以使用cudaDeviceGetLimit
查询每个线程的确切堆栈大小,并使用cudaDeviceSetLimit
进行更改,这可以让您在大的递归深度处正确运行代码。一般来说,CUDA中的高递归代码并不是一个好主意,编译器和硬件在使用约定循环时会比深度递归代码做得更好。