我在GTX Titan上测试Nvidia Cublas Library。我有以下代码:
#include "cublas.h"
#include <stdlib.h>
#include <conio.h>
#include <Windows.h>
#include <iostream>
#include <iomanip>
/* Vector size */
#define N (1024 * 1024 * 32)
/* Main */
int main(int argc, char** argv)
{
LARGE_INTEGER frequency;
LARGE_INTEGER t1, t2;
float* h_A;
float* h_B;
float* d_A = 0;
float* d_B = 0;
/* Initialize CUBLAS */
cublasInit();
/* Allocate host memory for the vectors */
h_A = (float*)malloc(N * sizeof(h_A[0]));
h_B = (float*)malloc(N * sizeof(h_B[0]));
/* Fill the vectors with test data */
for (int i = 0; i < N; i++)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&t1);
/* Allocate device memory for the vectors */
cublasAlloc(N, sizeof(d_A[0]), (void**)&d_A);
cublasAlloc(N, sizeof(d_B[0]), (void**)&d_B);
/* Initialize the device matrices with the host vectors */
cublasSetVector(N, sizeof(h_A[0]), h_A, 1, d_A, 1);
cublasSetVector(N, sizeof(h_B[0]), h_B, 1, d_B, 1);
/* Performs operation using cublas */
float res = cublasSdot(N, d_A, 1, d_B, 1);
/* Memory clean up */
cublasFree(d_A);
cublasFree(d_B);
QueryPerformanceCounter(&t2);
double elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
std::cout << "GPU time = " << std::setprecision(16) << elapsedTime << std::endl;
std::cout << "GPU result = " << res << std::endl;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&t1);
float sum = 0.;
for (int i = 0; i < N; i++) {
sum += h_A[i] * h_B[i];
}
QueryPerformanceCounter(&t2);
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
std::cout << "CPU time = " << std::setprecision(16) << elapsedTime << std::endl;
std::cout << "CPU result = " << sum << std::endl;
free(h_A);
free(h_B);
/* Shutdown */
cublasShutdown();
getch();
return EXIT_SUCCESS;
}
当我运行代码时,我得到以下结果:
GPU time = 164.7487009845991
GPU result = 8388851
CPU time = 45.22368030957917
CPU result = 7780599.5
为什么在GTX Titan上使用cublas库比在一个Xeon 2.4GHz IvyBridge核心上计算慢3倍? 当我增加或减少矢量大小时,我得到相同的结果:GPU比CPU慢。双精度不会改变它。
答案 0 :(得分:9)
因为点积是一个只使用每个向量元素一次的函数。这意味着将它发送到视频卡的时间比计算cpu上的所有内容要大得多,因为PCIExpress比RAM慢得多。
答案 1 :(得分:6)
我想你应该读到这个:
http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/
有三个要点,我将简要评论一下:
GPU擅长通过大量计算隐藏延迟(如果您可以在计算和数据传输之间取得平衡),这里内存访问量很大(带宽有限强>问题)并且没有足够的计算来隐藏延迟,这实际上会扼杀你的表现。
此外,数据只读取一次,因此缓存功能根本不被利用,而CPU非常擅长预测接下来将访问哪些数据。
另外,你也计时分配时间 ..这意味着PCI-E总线时间与主存储器访问相比非常慢。
以上所有内容都举例说明了一个案例,其中CPU的表现优于像GPU这样的大规模并行架构。
针对此类问题的优化可能是:
另外:http://www.nvidia.com/object/nvidia_research_pub_001.html