我应该声明一个内部或外部尺寸为GPU块号的双精度数组吗?
例如,我应该做
int payload[LEN][BLOCKS];
或
int payload[BLOCKS][LEN];
其中LEN是一个非常大的数字。
我计划让每个块遍历double数组,保持块尺寸不变,并在LEN尺寸上迭代。
答案 0 :(得分:1)
假设您要以面向块的方式访问数据,则要进行后者。据推测,这是因为当您加载“ len”维度的第一个元素时,您已经为后续的7ish元素付出了丢失缓存的费用。在第一个选项中,GPU块之间可能存在缓存行共享,但是共享是相对有限的,而不是低级别的。
实际上,以下代码报告第二个选项需要0.481秒才能执行,第一个选项需要0.979秒。 在外尺寸上将数据块与数据块对齐大约是性能的两倍。
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <string>
#include <chrono>
#include <iostream>
#define BLOCKS 80
#define LEN (1 << 20)
void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
struct Data1 {
int payload[LEN][BLOCKS];
};
struct Data2 {
int payload[BLOCKS][LEN];
};
__global__ void f1(Data1 * data1) {
int sum = 0;
for (int i = 0; i < LEN; ++i) {
sum += data1->payload[i][blockIdx.x];
}
printf("block %i has f1 sum %i\n", blockIdx.x, sum);
}
__global__ void f2(Data2 * data2) {
int sum = 0;
for (int i = 0; i < LEN; ++i) {
sum += data2->payload[blockIdx.x][i];
}
printf("block %i has f2 sum %i\n", blockIdx.x, sum);
}
int main() {
Data1 * data1 = (Data1 *) malloc(sizeof(Data1));
Data2 * data2 = (Data2 *) malloc(sizeof(Data2));;
for (int i = 0; i < LEN; ++i) {
for (int b = 0; b < BLOCKS; ++b) {
data1->payload[i][b] = i * b;
data2->payload[b][i] = i * b;
}
}
Data1 * data1_on_gpu;
CUDA_CHECK_RETURN(cudaMalloc(&data1_on_gpu, sizeof(Data1)));
Data2 * data2_on_gpu;
cudaMalloc(&data2_on_gpu, sizeof(Data2));
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
cudaMemcpy(data1_on_gpu, data1, sizeof(Data1), cudaMemcpyHostToDevice);
cudaMemcpy(data2_on_gpu, data2, sizeof(Data1), cudaMemcpyHostToDevice);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::chrono::time_point<std::chrono::system_clock> t1 = std::chrono::system_clock::now();
f1<<<80,1>>>(data1_on_gpu);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::chrono::time_point<std::chrono::system_clock> t2 = std::chrono::system_clock::now();
f2<<<80,1>>>(data2_on_gpu);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::chrono::time_point<std::chrono::system_clock> t3 = std::chrono::system_clock::now();
std::chrono::duration<double> duration_1_to_2 = t2 - t1;
std::chrono::duration<double> duration_2_to_3 = t3 - t2;
duration_1_to_2.count();
printf("timer for 1st took %.3lf\n", duration_1_to_2.count());
printf("timer for 2nd took %.3lf\n", duration_2_to_3.count());
}