我写了一个CUDA程序,在1,000,000个不同的数组上运行1,000,000个二进制搜索(上限),每个数组包含100个元素。为简单起见,所有1,000,000个阵列已经组合/扁平化为一个具有100,000,000个元素的大型阵列。请记住,每个二叉搜索树的搜索值都是" canned" (一个常数填充了最终使用随机数的位置)。
我正在尝试对比我的CPU与我的Nvidia显卡的加速(或速度损失)。下面的代码描述了我当前的程序,但CPU部分不断提出" 0" [秒]和我使用Visual Studio 2013的断点似乎被忽略了。关于发生了什么的任何想法?我怀疑我正在使用(或未使用)的Thrust功能可能是罪魁祸首。
我的代码:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\random.h>
#include <thrust\generate.h>
#include <thrust\copy.h>
#include <thrust\device_vector.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <time.h>
#include <algorithm>
#include <cstdlib>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void fillArrayWithRandom(float* inputArray, int inputN)
{
for (int i = 0; i < inputN; i++)
{
inputArray[i] = (float)rand() / float(RAND_MAX);
}
}
__global__ void warmUp()
{
}
__host__ __device__ int findTarget(float* inputArray, int startingIndex, int endingIndex, float targetValue)
{
int length = endingIndex - startingIndex;
if (length > 1)
{
int leftSearchIndex = startingIndex + length / 2 + length % 2;
int rightSearchIndex = endingIndex;
float leftSearchValue = inputArray[leftSearchIndex];
float rightSearchValue = inputArray[rightSearchIndex];
if (leftSearchValue > targetValue)
{
return findTarget(inputArray, startingIndex, leftSearchIndex, targetValue);
}
else if (rightSearchValue > targetValue)
{
return findTarget(inputArray, leftSearchIndex + 1, rightSearchIndex, targetValue);
}
else
{
return -1;
}
}
else if (inputArray[startingIndex] > targetValue)
{
return startingIndex;
}
else if (inputArray[endingIndex] > targetValue)
{
return endingIndex;
}
else
{
return -1;
}
}
__global__ void findTargets(float* inputArray, int numSubElements, int numTrajectories)
{
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
while (tId < numTrajectories)
{
int beginIndex = tId*numSubElements;
int endIndex = beginIndex + numSubElements - 1;
float randomNumber = 0.5; //static for simplicity
float searchVal = inputArray[endIndex] * randomNumber;
findTarget(inputArray, beginIndex, endIndex, searchVal);
tId += blockDim.x * gridDim.x;
}
}
int main()
{
//Initiate example data
int numTrajectories = 1000000;
int numSubElements = 100;
int totalNumElements = numSubElements*numTrajectories; // Size of vector
thrust::host_vector<float> rVec(totalNumElements);
thrust::host_vector<float> rVec2(totalNumElements);
fillArrayWithRandom((float *) &rVec[0], totalNumElements);
fillArrayWithRandom((float *) &rVec2[0], totalNumElements);
thrust::device_vector<float> d_vec = rVec;
thrust::device_vector<float> o_vec(totalNumElements);
thrust::inclusive_scan(d_vec.begin(), d_vec.end(), o_vec.begin());
//GPU timing
warmUp <<<1, 1 >>>();
int threadsPerBlock = 1024;
float time_spent_GPU, time_spent_CPU;
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start, 0));
findTargets <<< (numTrajectories + threadsPerBlock - 1)/threadsPerBlock, threadsPerBlock >>>((float *) thrust::raw_pointer_cast(d_vec.data()), numSubElements, numTrajectories);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&time_spent_GPU, start, stop));
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
time_spent_GPU = (double)(time_spent_GPU / 1000);
double avg_GPU = time_spent_GPU / numTrajectories;
printf("Avg. GPU Simulation Time: %.17g [sim/sec]\n", avg_GPU);
//CPU Timing
clock_t begin_CPU, end_CPU;
begin_CPU = clock();
float* rightPointer = &rVec2[0];
for (int i = 0; i < numTrajectories; ++i)
{
float randomNumber = 0.5; //static for simplicity
float searchVal = rightPointer[i*numSubElements + numSubElements - 1] * randomNumber;
findTarget(rightPointer, i*numSubElements, i*numSubElements + numSubElements -1, searchVal);
}
end_CPU = clock();
time_spent_CPU = (double)(end_CPU - begin_CPU) / CLOCKS_PER_SEC;
double avg_CPU = time_spent_CPU / numTrajectories;
printf("Avg. CPU Simulation Time: %.17g [sim/sec]\n", avg_CPU);
printf("CPU/GPU Timing:%.17gx \n", avg_CPU/avg_GPU);
return 0;
}
我的编译器选项/输出:
1>------ Build started: Project: speedTest, Configuration: Release Win32 ------
1> Compiling CUDA source file kernel.cu...
1>
1> C:\Users\Owner\Documents\Visual Studio 2013\Projects\speedTest\speedTest>"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\bin\nvcc.exe" -gencode=arch=compute_50,code=\"sm_50,compute_50\" --use-local-env --cl-version 2013 -ccbin "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\bin" -rdc=true -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\include" --keep-dir Release -maxrregcount=0 --machine 32 --compile -cudart static -DCUB_CDP -DWIN32 -DNDEBUG -D_CONSOLE -D_MBCS -Xcompiler "/EHsc /W3 /nologo /O2 /Zi /MD " -o Release\kernel.cu.obj "C:\Users\Owner\Documents\Visual Studio 2013\Projects\speedTest\speedTest\kernel.cu"
1> kernel.cu
1>C:/Users/Owner/Documents/Visual Studio 2013/Projects/speedTest/speedTest/kernel.cu(124): warning C4244: '=' : conversion from 'double' to 'float', possible loss of data
1>C:/Users/Owner/Documents/Visual Studio 2013/Projects/speedTest/speedTest/kernel.cu(139): warning C4244: '=' : conversion from 'double' to 'float', possible loss of data
1>
1> C:\Users\Owner\Documents\Visual Studio 2013\Projects\speedTest\speedTest>"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\bin\nvcc.exe" -dlink -o Release\speedTest.device-link.obj -Xcompiler "/EHsc /W3 /nologo /O2 /Zi /MD " -L"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\lib\Win32" cudart.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib cudadevrt.lib -gencode=arch=compute_50,code=sm_50 --machine 32 Release\kernel.cu.obj
1> cudart.lib
1> kernel32.lib
1> user32.lib
1> gdi32.lib
1> winspool.lib
1> comdlg32.lib
1> advapi32.lib
1> shell32.lib
1> ole32.lib
1> oleaut32.lib
1> uuid.lib
1> odbc32.lib
1> odbccp32.lib
1> cudadevrt.lib
1> kernel.cu.obj
1> LINK : /LTCG specified but no code generation required; remove /LTCG from the link command line to improve linker performance
1> speedTest.vcxproj -> C:\Users\Owner\Documents\Visual Studio 2013\Projects\speedTest\Release\speedTest.exe
1> copy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\bin\cudart*.dll" "C:\Users\Owner\Documents\Visual Studio 2013\Projects\speedTest\Release\"
1> C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\bin\cudart32_65.dll
1> C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\bin\cudart64_65.dll
1> 2 file(s) copied.
========== Build: 1 succeeded, 0 failed, 0 up-to-date, 0 skipped ==========
答案 0 :(得分:1)
正如talonmies已经指出的那样,时钟分辨率以毫秒为单位。不要使用它。相反,尝试使用boost chrono。直接在cuda中使用它是一个问题,因此请使用以下作为标题(Timer.h
):
void startTimer();
void endTimer();
double totalTime();
cpp为
#include "Timer.h"
#include <boost/chrono.hpp>
// Nanoseconds
boost::chrono::high_resolution_clock::time_point start, finish;
void startTimer()
{
start = boost::chrono::high_resolution_clock::now();
}
void endTimer()
{
finish = boost::chrono::high_resolution_clock::now();
}
double totalTime() {
return (finish - start).count() / (1e9);
}
然后替换
begin_CPU = clock();
与
startTimer();
和
end_CPU = clock();
与
endTimer();
和
time_spent_CPU = (double)(end_CPU - begin_CPU) / CLOCKS_PER_SEC;
与
time_spent_CPU = totalTime();
这导致
Avg. GPU Simulation Time: 1.7804799301579521e-010 [sim/sec]
Avg. CPU Simulation Time: 6.4100000264286083e-013 [sim/sec]
CPU/GPU Timing:0.0036001529238579829x
(请注意,我目前正在GPU上运行其他内容,因此数字可能不太相关)
答案 1 :(得分:0)
事实证明,编译器实际上足够聪明,意识到我没有与主机端做任何事情&#34; findTarget&#34;常规因此它完全将其从编译的代码中删除 - 即它甚至没有被执行(因此解释了为什么显着增加模拟计数没有做任何事情并且断点没有得到尊重)。