我正在CUDA项目中工作,但我遇到了一些我无法找到解决方案的严重问题。
我使用NVIDIA Quadro K2000m在我的PC( pA )中实现了该项目,它可以正常运行。但是,当我在具有Nvidia Tesla GPU的群集上部署项目时,以及在另一台PC( pB )(NVIDIA gtx 960m)中,它将无法执行!
有趣的是,当我在Visual Studio上使用Nsight Debugger pB (第二台PC)时,它会执行而不会显示错误:未指定的启动失败
这是First Kernel的代码:
__global__ void calcKernel(float *dev_calcMatrix,
int *documentarray,
int *documentTermArray,
int *distincttermsarray,
int *distinctclassarray,
int *startingPointOfClassDoc,
int *endingPOintOfClassDoc,
int sizeOfDistinctClassarray,
int sizeOfTerms)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int term = distincttermsarray[index];
if (index <= sizeOfTerms) {
for (int i = 0; i < sizeOfDistinctClassarray; i++)
{
int save = (index * sizeOfDistinctClassarray) + i;
bool test = false;
for (int j = startingPointOfClassDoc[i]; j <= endingPOintOfClassDoc[i]; j++)
{
if (term == documentarray[j])
{
printf("%i \t", index);
dev_calcMatrix[save] = dev_calcMatrix[save] + documentTermArray[j];
//printf("TermArray: documentTermArray[j] %d\n", dev_calcMatrix[save], documentTermArray[j]);
test = true;
}
}
if (!test) dev_calcMatrix[save] = 0;
}
}
}
这是我用来创建线程和块的代码:
float blockNotFinal = data.sizeOfDistinctTerms / 1024;
int threads = 0;
int blocks = (int)floor(blockNotFinal);
dim3 dimGrid((blocks + 1), 1, 1);
if (data.sizeOfDistinctTerms < 1024)
{
threads = data.sizeOfDistinctTerms;
}
else
{
threads = 1024;
}
dim3 dimBlock(threads, 1, 1);
所以,我需要创建23,652个线程。我正在做的是23,652 / 1024 = 23.09。获得23.09值后,我将其舍入到23并添加+ 1 = 24个块。所以我创建了24个块* 1024个线程:24,576个线程。
我知道有些线程会被创建,即使它们不会被使用,这就是为什么我在内核的开头添加了这个if语句:
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index <= sizeOfTerms (23,652 is the size)) { .... }
问题是我在IF语句之前和IF语句之后添加了一些PRINTF()。
在IF语句之前,崩溃之前的最大线程索引是:24479 在IF语句中,崩溃之前的最大线程索引是:23487。
因此,从上面的信息来看,线程数量不会达到最大值。此外,在群集上它给我另一个错误:遇到非法内存访问。我知道这个错误意味着它可能是一个索引超出范围,但我给出了相同大小的数组与线程数。
以下是我在GPU中分配内存的代码:
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cout << "\n Allocated GPU buffers";
// Allocate GPU buffers for input and output vectors
cudaStatus = cudaMalloc((void**)&dev_calcMatrix, data.sizeOfDistinctTerms * data.sizeOfDistinctClassarray * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_probMatrix, data.sizeOfDistinctTerms * data.sizeOfDistinctClassarray * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&classSummationTerms, data.sizeOfDistinctClassarray * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&documentarray, data.sizeOfTotalTermsDocsFreq * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&documentTermArray, data.sizeOfTotalTermsDocsFreq * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&distincttermsarray, data.sizeOfDistinctTerms * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&distinctclassarray, data.sizeOfDistinctClassarray * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&startingPointOfClassDoc, data.sizeOfDistinctClassarray * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&endingPOintOfClassDoc, data.sizeOfDistinctClassarray * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cout << "\n Copied input vectors from host to GPU";
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(documentarray, data.documentarray, data.sizeOfTotalTermsDocsFreq * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(documentTermArray, data.documentTermArray, data.sizeOfTotalTermsDocsFreq * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(distincttermsarray, data.distincttermsarray, data.sizeOfDistinctTerms * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(classSummationTerms, data.classSummationTerms, data.sizeOfDistinctClassarray * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(distinctclassarray, data.distinctclassarray, data.sizeOfDistinctClassarray * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(startingPointOfClassDoc, data.startingPointOfClassDoc, data.sizeOfDistinctClassarray * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(endingPOintOfClassDoc, data.endingPOintOfClassDoc, data.sizeOfDistinctClassarray * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cout << "\n Now we call the CALCKERNL()";
// Launch a kernel on the GPU with one thread for each element.
calcKernel <<<dimGrid, dimBlock >>>(dev_calcMatrix,
documentarray,
documentTermArray,
distincttermsarray,
distinctclassarray,
startingPointOfClassDoc,
endingPOintOfClassDoc,
sizi,
sizeOfTerms);
//// cudaDeviceSynchronize waits for the kernel to finish, and returns
//// any errors encountered during the launch.
//cudaStatus = cudaDeviceSynchronize();
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
//}
cudaStatus = cudaStreamSynchronize(0);
if (cudaStatus != cudaSuccess) {
//fprintf(stderr, "calcKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
cout << "\n Synchronization failed: " << cudaGetErrorString(cudaStatus);
goto Error;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "calcKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
知道为什么会这样吗?
答案 0 :(得分:1)
如果没有Minimal, Complete, and Verifiable example甚至是完整的代码,就无法回答。但是你的内核的开头已经有两个错误可能会导致超出内存访问:
int index = blockIdx.x * blockDim.x + threadIdx.x;
int term = distincttermsarray[index];
if (index <= sizeOfTerms) {
首先,在检查它是否在所需范围内之前,使用index
作为数组索引是不安全的。其次,如果index < sizeOfTerms
是数组元素的数量,则需要检查<=
(不是sizeOfTerms
)。
答案 1 :(得分:0)
找到此错误并修复它的一种简单方法是将 cuda-memcheck 打开为@tera,并使用 Cuda Debugger 运行代码命中点。 调试器应该在发生错误的瞬间停止。
我的建议是 Nsight + Visual Studio,TDR关闭,因此如果非法错误需要一些时间,这将不会成为问题。