与文章(Cuda - Multiple sums in each vector element)相同。如何在变化的总和限制下沿x和y方向执行2D块跨步。可以在CPU和整体内核中看到2D算法。我为CPU添加了openmp,以便获得更公平的加速结果。如果也有提高CPU功能速度的方法,我很乐意找出答案。
此版本的代码采用2D数组并将其展平为1D数组。我仍然使用2D线程dim3索引,因此我可以更直观地索引双和。
(所有这些钱都归功于用户Robert Crovella的一维跨步代码。) 到目前为止的代码是
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <sys/time.h>
typedef double df;
#define USECPSEC 1000000ULL
#define BSX 1<<5
#define BSY 1<<5
#define N 100
#define M 100
const bool sync = true;
const bool nosync = false;
unsigned long long dtime_usec(unsigned long long start, bool use_sync = nosync){
if (use_sync == sync) cudaDeviceSynchronize();
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
int divUp(int a, int b) {return (a + b - 1) / b;}
float cpu_sum(int n, int m, df *a, df *b, df *c) {
df q, r;
#pragma omp parallel for collapse(2)
for (int x = 0; x < n; x++) {
for (int y = 0; y < m; y++) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
return 0;
}
const int P2 = 5;
const int TPB = 1<<P2;
const unsigned row_mask = ~((0xFFFFFFFFU>>P2)<<P2);
__global__ void chebyprod_imp(int n, int m, df *a, df *b, df *c){
__shared__ df sdata[TPB*TPB];
int x = blockIdx.x;
int y = blockIdx.y;
int row_width_x = (((x)>(n-x))?(x):(n-x))+1;
int row_width_y = (((y)>(m-y))?(y):(m-y))+1;
int strides_x = (row_width_x>>P2) + ((row_width_x&row_mask)?1:0);
int strides_y = (row_width_y>>P2) + ((row_width_y&row_mask)?1:0);
int i = threadIdx.x;
df tmp_a;
df sum = 0.0f;
for (int s=0; s < strides_x; s++) { // block-stride x loop
int j = threadIdx.y;
for (int u=0; u < strides_y; u++) { // block-stride y loop
if (i < n && j < m) {tmp_a = a[i * n + j];}
if (i <= x) {
if (j <= y) {sum += tmp_a * b[(x - i) * n + y - j];}
if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];}
}
if ((i > 0) && (i < (n-x))) {
if (j <= y) {sum += tmp_a * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];}
if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+ a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];}
}
j += TPB;
}
i += TPB;
}
sdata[threadIdx.x * TPB + threadIdx.y] = sum;
for (int s = TPB>>1; s > 0; s>>=1) { // sweep reduction in x
for (int u = TPB>>1; u > 0; u>>=1) { // sweep reduction in x
__syncthreads();
if (threadIdx.x < s && threadIdx.y < u) {
sdata[threadIdx.x * TPB + threadIdx.y] += sdata[(threadIdx.x + s) * TPB + threadIdx.y + u];
}
}
}
if (!threadIdx.x && !threadIdx.y) c[x * n + y] = 0.25f*sdata[0];
}
__global__ void chebyprod(int n, int m, df *a, df *b, df *c){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
df q, r;
if (x < n && y < m) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
int main(void){
int size = N*M*sizeof(df);
df *a, *b, *c, *cc, *ci, *d_a, *d_b, *d_c, *d_ci;
a = (df*)malloc(size);
b = (df*)malloc(size);
c = (df*)malloc(size);
cc = (df*)malloc(size);
ci = (df*)malloc(size);
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMalloc(&d_ci, size);
#pragma omp parallel for collapse (2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
a[i * M + j] = 0.1f;
b[i * M + j] = 0.2f;
}
}
unsigned long long dt = dtime_usec(0);
// Perform chebyprod on N elements
cpu_sum(N, M, a, b, cc);
dt = dtime_usec(dt,sync);
printf("Time taken 2D CPU: %fs\n", dt/(float)USECPSEC);
df dtc = dt/(float)USECPSEC;
std::cout << "Vector cc: [ ";
for (int k = 0; k < 10; ++k)
std::cout << cc[k] << " ";
std::cout <<"]\n";
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BSX, BSY);
dim3 dimGrid(divUp(N, BSX), divUp(M, BSY));
//std::cout << "dimBlock: " << dimBlock << "\n dimGrid: " << dimGrid << "\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
chebyprod<<< dimBlock, dimGrid >>>(N, M, d_a, d_b, d_c);
dt = dtime_usec(dt,sync);
printf("Time taken 2D monolithic kernel: %fs\n", dt/(float)USECPSEC);
printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
std::cout << "Vector c: [ ";
for (int k = 0; k < 10; ++k)
std::cout << c[k] << " ";
std::cout <<"]\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
chebyprod_imp<<< dimBlock, dimGrid >>>(N, M, d_a, d_b, d_ci);
dt = dtime_usec(dt,sync);
printf("Time taken 2D stride kernel: %fs\n", dt/(float)USECPSEC);
cudaMemcpy(ci, d_ci, size, cudaMemcpyDeviceToHost);
std::cout << "Vector ci: [ ";
for (int k = 0; k < 10; ++k)
std::cout << ci[k] << " ";
std::cout <<"]\n";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_ci);
free(a);
free(b);
free(c);
free(cc);
free(ci);
}
答案 0 :(得分:1)
无论如何,对于我来说,如果我省略了-O3
,则在有OpenMP支持和没有OpenMP支持的情况下,CPU代码的结果都不匹配。如果我还指定-O3
,似乎可以通过OpenMP编译获得正确的结果。我不确定为什么这对正确性很重要,尽管它显然会影响CPU代码性能。
您似乎已经得到了网格并向后调整了大小:
chebyprod<<< dimBlock, dimGrid >>>(....
第一个内核配置参数是 grid 维度,而不是块维度。我不确定这是怎么发生的,因为您在your previous question中正确完成了此操作。
与上一个问题一样,我们需要选择一个线程策略并正确实现它。您似乎对跨步感到困惑,因此希望下面的代码可以澄清问题。我将在此处使用的线程策略是每个输出点一个 warp 。经线是一组在x方向上尺寸为32(线),在y方向上尺寸为1的线。因此,循环步幅将在x方向上增加32,但在y方向上仅增加1,以覆盖整个空间。线程策略的选择也会影响网格的大小。
您似乎弄不清楚了我认为这两个维度应该存在的关系。 x方向N
和n
应该都已连接。同样,应该将M
和m
的y方向都连接起来(例如,M
是y方向上的尺寸)。
涉及2D线程块时,我们希望在GPU上安排索引以进行合并,以使包含threadIdx.x
的索引不乘以任何值。 (合并的简化声明是,我们希望warp中的相邻线程访问内存中的相邻元素。由于threadIdx.x
在warp中的线程与线程之间移动时会增加1,因此我们希望使用此特性来生成相邻的内存索引。如果将threadIdx.x
乘以除1以外的任何值,则会破坏模式。)您已将其取反-通常将包含threadIdx.x
的索引乘以行维({{1或N
)。这确实是不正确的,也无法实现良好的合并访问。为了解决这个问题,我们希望转置索引,并转置n
和a
(因此也b
)的数据存储。在下面的代码中,我已经为c
和a
的数据设置转置了索引,并且相关的索引也已转换为跨步内核(仅)。在您的非跨越式内核以及您的CPU版本中,我没有未调换索引,如果需要,我将其留给您练习。对于结果而言,从数值上来说并不重要,因为整个b
矩阵在每个位置都具有相同的值,并且可以对a
矩阵做出类似的陈述。在数值上,对于此示例代码,转置(或不进行)与结果无关。但这对性能至关重要(至少是跨越内核的)。 还要注意,我相信在“单片”内核上执行索引“转置”也可以提高其性能。我不知道这是否会影响CPU版本的性能。
我还添加了我先前回答中所包含的b
const
用法。根据我的测试,在“较小”的GPU上,这提供了明显的性能优势。但是,对于正确性并不是严格必需的。这是一个经过上述更改的有效示例,可以为所有3个测试用例提供数值匹配的结果:
__restrict__
CUDA 10.1.105,Fedora 29,GTX 960
请注意,当我们在Tesla V100上运行相同的测试时,它可以充分利用跨内核情况下可用的“额外”线程,因此好处更加明显:
$ cat t1498.cu
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
typedef double df;
#define USECPSEC 1000000ULL
#define BSX 1<<5
#define BSY 1<<5
#define N 100
#define M 100
const bool sync = true;
const bool nosync = false;
unsigned long long dtime_usec(unsigned long long start, bool use_sync = nosync){
if (use_sync == sync) cudaDeviceSynchronize();
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
int divUp(int a, int b) {return (a + b - 1) / b;}
void cpu_sum(int n, int m, df *a, df *b, df *c) {
df q, r;
#pragma omp parallel for collapse(2)
for (int x = 0; x < n; x++) {
for (int y = 0; y < m; y++) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
}
// choose one warp per output point
const int P2 = 5; // assumes warp size is 32
const unsigned row_mask = ~((0xFFFFFFFFU>>P2)<<P2);
__global__ void chebyprod_imp(int n, int m, const df * __restrict__ a, const df * __restrict__ b, df * __restrict__ c){
int x = blockIdx.x;
int y = threadIdx.y+blockDim.y*blockIdx.y;
int width_x = (((x)>(n-x))?(x):(n-x))+1;
int height_y = (((y)>(m-y))?(y):(m-y))+1;
int strides_x = (width_x>>P2) + ((width_x&row_mask)?1:0);
int strides_y = height_y;
int i = threadIdx.x;
df tmp_a;
df sum = 0.0f;
if ((x < n) && (y < m)){
for (int s=0; s < strides_x; s++) { // warp-stride x loop
for (int j=0; j < strides_y; j++) { // y loop
if (i < n && j < m) {tmp_a = a[j * n + i];}
if (i <= x) {
if (j <= y) {sum += tmp_a * b[(y - j) * n + x - i];}
if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(y+j) * n + x - i] + a[(y+j)* n + i] * b[j*n+(x - i)];}
}
if ((i > 0) && (i < (n-x))) {
if (j <= y) {sum += tmp_a * b[(y-j) * n + x+i] + a[j*n + (x + i)] * b[(y - j)*n + i];}
if ((j > 0) && (j < (m-y)))
{sum += tmp_a * b[(y+j) * n + x+i]
+ a[(y+j) * n + x + i] * b[j*n+(x + i)]
+ a[j*n + (x + i)] * b[(y+j)*n + i]
+ a[(y+j)*n + x + i] * b[j*n+i];}
}
}
i += 32;
}
// warp-shuffle reduction
for (int offset = warpSize>>1; offset > 0; offset >>= 1)
sum += __shfl_down_sync(0xFFFFFFFFU, sum, offset);
if (!threadIdx.x) c[y*m+x] = 0.25f*sum;}
}
__global__ void chebyprod(int n, int m, df *a, df *b, df *c){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
df q, r;
if (x < n && y < m) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
int main(void){
int size = N*M*sizeof(df);
df *a, *b, *c, *cc, *ci, *d_a, *d_b, *d_c, *d_ci;
a = (df*)malloc(size);
b = (df*)malloc(size);
c = (df*)malloc(size);
cc = (df*)malloc(size);
ci = (df*)malloc(size);
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMalloc(&d_ci, size);
#pragma omp parallel for collapse (2)
for (int j = 0; j < M; j++) {
for (int i = 0; i < N; i++) {
a[j * N + i] = 0.1f;
b[j * N + i] = 0.2f;
}
}
unsigned long long dt = dtime_usec(0);
// Perform chebyprod on N elements
cpu_sum(N, M, a, b, cc);
dt = dtime_usec(dt,sync);
printf("Time taken 2D CPU: %fs\n", dt/(float)USECPSEC);
df dtc = dt/(float)USECPSEC;
std::cout << "Vector cc: [ ";
for (int k = 0; k < 10; ++k)
std::cout << cc[k] << " ";
std::cout <<"]\n";
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BSX, BSY);
dim3 dimGrid(divUp(N, BSX), divUp(M, BSY));
//std::cout << "dimBlock: " << dimBlock << "\n dimGrid: " << dimGrid << "\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
chebyprod<<< dimGrid, dimBlock >>>(N, M, d_a, d_b, d_c);
dt = dtime_usec(dt,sync);
printf("Time taken 2D monolithic kernel: %fs\n", dt/(float)USECPSEC);
printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
std::cout << "Vector c: [ ";
for (int k = 0; k < 10; ++k)
std::cout << c[k] << " ";
std::cout <<"]\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
dim3 dimGrid2(N, (M+dimBlock.y-1)/dimBlock.y);
chebyprod_imp<<< dimGrid2, dimBlock >>>(N, M, d_a, d_b, d_ci);
dt = dtime_usec(dt,sync);
printf("Time taken 2D stride kernel: %fs\n", dt/(float)USECPSEC);
printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));
cudaMemcpy(ci, d_ci, size, cudaMemcpyDeviceToHost);
std::cout << "Vector ci: [ ";
for (int k = 0; k < 10; ++k)
std::cout << ci[k] << " ";
std::cout <<"]\n";
df max_error = 0;
for (int k = 0; k < N*M; k++)
max_error = fmax(max_error, fabs(c[k] - ci[k]));
std::cout << "Max diff = " << max_error << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_ci);
free(a);
free(b);
free(c);
free(cc);
free(ci);
}
$ nvcc -O3 -Xcompiler -fopenmp -arch=sm_52 -o t1498 t1498.cu
$ ./t1498
Time taken 2D CPU: 0.034830s
Vector cc: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D monolithic kernel: 0.033687s
Speedup: 1.033930s
Vector c: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D stride kernel: 0.013526s
Speedup: 2.575041s
Vector ci: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Max diff = 8.52651e-13
$
如果您在单片内核上执行索引“转置”(类似于我在大步前进内核中所做的操作),那么我认为您最终会遇到与上一个问题所得出的结果大致相似的性能情况。与“小型” GPU上的单片内核相比,跨步内核几乎没有性能优势。 “大型” GPU的性能提高了约5倍。