我有两个不同的程序。
首先使用opencl进行矩阵 - 矩阵乘法。在我的GPU上它会产生更好的结果,然后在主机CPU上(例如0.2秒对18秒)。
第二个使用opencl进行矩阵向量乘法,它在GPU上运行稍慢,然后在主机CPU上运行。
原因是什么?
这是内核
__kernel void matrixVectorMul(__global float* resultVector,
__global float* matrixA,
__global float* vectorB,
int width_A)
{
int tx = get_global_id(0);
float value = 0;
for (unsigned int k = 0; k < width_A; ++k) {
value += matrixA[tx * width_A + k] * vectorB[k];
}
resultVector[tx] = value;
}
主持人代码
#include <stdlib.h>
#define __CL_ENABLE_EXCEPTIONS
#include "cl.hpp"
#include <fstream>
#include <iostream>
#include <time.h>
#include <cmath>
#define LOCAL_SIZE 512
#define WIDTH_A (4096*2)
#define HEIGHT_A (4096*2)
float *matrix_A;
float *vector_B;
float *result_vector;
float *result_vector_host;
void randomInit(float *data, int size) {
for (unsigned int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void GenerateTestData() {
srand((unsigned int)time(NULL));
unsigned int size_A = WIDTH_A * HEIGHT_A;
matrix_A = new float[size_A];
vector_B = new float[WIDTH_A];
randomInit(matrix_A, size_A);
randomInit(vector_B, WIDTH_A);
result_vector = new float[WIDTH_A];
result_vector_host = new float[WIDTH_A];
}
void PerformCalculationOnDevice(cl::Device device) {
clock_t start_t, end_t;
start_t = clock();
std::vector<cl::Device> contextDevices;
contextDevices.push_back(device);
cl::Context context(contextDevices);
cl::CommandQueue queue(context, device);
std::fill_n(result_vector, WIDTH_A, 0);
cl::Buffer cl_matrix_A = cl::Buffer(context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * HEIGHT_A * sizeof(float), matrix_A);
cl::Buffer cl_vector_B = cl::Buffer(context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * sizeof(float), vector_B);
cl::Buffer cl_result_vector = cl::Buffer(context, CL_MEM_WRITE_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * sizeof(float), result_vector);
end_t = clock();
std::cout << "Context, queue, buffers " << (float)(end_t - start_t) / CLOCKS_PER_SEC << std::endl;
std::ifstream sourceFile("MatrixVectorMultiplicationKernel.cl");
std::string sourceCode(std::istreambuf_iterator<char>(sourceFile),(std::istreambuf_iterator<char>()));
cl::Program::Sources source(1, std::make_pair(sourceCode.c_str(), sourceCode.length()+1));
cl::Program program = cl::Program(context, source);
program.build(contextDevices);
cl::Kernel kernel(program, "matrixVectorMul");
int iArg = 0;
kernel.setArg(iArg++, cl_result_vector);
kernel.setArg(iArg++, cl_matrix_A);
kernel.setArg(iArg++, cl_vector_B);
kernel.setArg(iArg++, WIDTH_A);
start_t = clock();
queue.enqueueNDRangeKernel(kernel, cl::NullRange, cl::NDRange(HEIGHT_A), cl::NDRange(LOCAL_SIZE));
queue.finish();
queue.enqueueReadBuffer(cl_result_vector, CL_TRUE, 0, WIDTH_A * sizeof(float), result_vector);
end_t = clock();
std::cout << "enqueueNDRangeKernel and enqueueReadBuffer " << (float)(end_t - start_t) / CLOCKS_PER_SEC << std::endl;
}
void PerformCalculationOnHost() {
float tmp;
for(int row_A = 0; row_A < HEIGHT_A; row_A++) {
tmp = 0;
for(int col_A = 0; col_A < WIDTH_A; col_A++) {
tmp += matrix_A[row_A * WIDTH_A + col_A] * vector_B[col_A];
}
result_vector_host[row_A] = tmp;
}
}
int main(int argc, char** argv) {
GenerateTestData();
std::vector<cl::Platform> platforms;
cl::Platform::get(&platforms);
std::vector<cl::Device> devices;
clock_t start_t = clock();
for (unsigned int iPlatform=0; iPlatform<platforms.size(); iPlatform++) {
platforms[iPlatform].getDevices(CL_DEVICE_TYPE_ALL, &devices);
for (unsigned int iDevice=0; iDevice<devices.size(); iDevice++) {
try {
PerformCalculationOnDevice(devices[iDevice]);
} catch (cl::Error error) {
std::cout << error.what() << "(" << error.err() << ")" << std::endl;
}
}
}
clock_t end_t = clock();
std::cout << "Device: " << (float)(end_t - start_t) / CLOCKS_PER_SEC << " seconds" << std::endl;
start_t = clock();
PerformCalculationOnHost();
end_t = clock();
std::cout << "Host: " << (float)(end_t - start_t) / CLOCKS_PER_SEC << " seconds" << std::endl;
int errors = 0;
float mean_deviation = 0;
FILE *f, *f_host;
f = fopen("device_result", "w");
f_host = fopen("host_result", "w");
for(int i = 0; i < WIDTH_A; i++) {
if(fabs(result_vector[i] - result_vector_host[i]) > 1E-3) {
errors++;
}
fprintf(f, "%.2f\n", result_vector[i]);
fprintf(f_host, "%.2f\n", result_vector_host[i]);
mean_deviation += fabs(result_vector[i] - result_vector_host[i]);
}
fclose(f); fclose(f_host);
mean_deviation /= WIDTH_A;
std::cout << "Errors = " << errors << std::endl;
std::cout << "Mean deviation = " << mean_deviation << std::endl;
delete[](matrix_A);
delete[](vector_B);
delete[](result_vector);
delete[](result_vector_host);
return 0;
}
当我运行它时,我得到以下结果
Context, queue, buffers 0.45
enqueueNDRangeKernel and enqueueReadBuffer 1.31
Device: 1.79 seconds
Host: 1.42 seconds
Errors = 0
Mean deviation = 8.78572e-05
答案 0 :(得分:7)
在这种情况下,为了提高GPU效率,需要更多的工作项(每个输出值一个不够),计算/内存访问率应该更高(即,在可能的情况下重复使用多次)。
如果您有兴趣,我前段时间就此问题写了几页:GPU matrix-vector product。
答案 1 :(得分:1)
您是否尝试过使用本地内存进行vectorB?所有工作项都会读取每个元素,因此从本地读取它是有意义的。我将本地内存大小硬编码为8192以下,但您可以自己玩这个号码。 (8192浮点数是opencl 1.1 / 1.2的最大值)
另外,如果可以的话,尝试使用工作组大小为16的倍数(64或128应该可以正常工作)。
__kernel void matrixVectorMul(__global float* resultVector,
__global float* matrixA,
__global float* vectorB,
int width_A)
{
int tx = get_global_id(0);
__local float vectB[4096*2];
event_t copy_event = async_work_group_copy(vectB, vectorB, 4096*2, 0);
wait_group_events(1,copy_event);
float value = 0;
for (unsigned int k = 0; k < width_A; ++k) {
value += matrixA[tx * width_A + k] * vectB[k];
}
resultVector[tx] = value;
}