我试图将合成测试中阵列的对齐从16增加到32,性能从~4100ms降低到~4600ms。如何更高的对齐会损害性能?
以下是我用于测试的代码(我在这里尝试使用avx指令)。使用g++ test.cpp -O2 -ftree-vectorize -mavx2
构建(我不支持avx512)。
#include <chrono>
#include <iostream>
#include <memory>
#include <cassert>
#include <cstring>
#include <cstdlib>
using Time = std::chrono::time_point<std::chrono::system_clock>;
using Clock = std::chrono::system_clock;
template <typename Duration>
auto as_ms(Duration const& duration) {
return std::chrono::duration_cast<std::chrono::milliseconds>(duration);
}
static const int repeats = 10000;
struct I {
static const int size = 524288;
int* pos;
I() : pos(new int[size]) { for (int i = 0; i != size; ++i) { pos[i] = i; } }
~I() { delete pos; }
};
static const int align = 16; // try to change here 16 (4100 ms) / 32 (4600 ms)
struct S {
static const int size = I::size;
alignas(align) float data[size];
S() { for (int i = 0; i != size; ++i) { data[i] = (i * 7 + 11) % 2; } }
};
void foo(const I& p, S& a, S& b) {
const int chunk = 32;
alignas(align) float aprev[chunk];
alignas(align) float anext[chunk];
alignas(align) float bprev[chunk];
alignas(align) float bnext[chunk];
const int N = S::size / chunk;
for (int j = 0; j != repeats; ++j) {
for (int i = 1; i != N-1; i++) {
int ind = p.pos[i] * chunk;
std::memcpy(aprev, &a.data[ind-1], sizeof(float) * chunk);
std::memcpy(anext, &a.data[ind+1], sizeof(float) * chunk);
std::memcpy(bprev, &b.data[ind-1], sizeof(float) * chunk);
std::memcpy(bnext, &b.data[ind+1], sizeof(float) * chunk);
for (int k = 0; k < chunk; ++k) {
int ind0 = ind + k;
a.data[ind0] = (b.data[ind0] - 1.0f) * aprev[k] * a.data[ind0] * bnext[k] + a.data[ind0] * anext[k] * (bprev[k] - 1.0f);
}
}
}
}
int main() {
S a, b;
I p;
Time start = Clock::now();
foo(p, a, b);
Time end = Clock::now();
std::cout << as_ms(end - start).count() << std::endl;
float sum = 0;
for (int i = 0; i != S::size; ++i) {
sum += a.data[i];
}
return sum;
}
检查缓存是否导致问题:
valgrind --tool=cachegrind ./a.out
alignment = 16:
==4352== I refs: 3,905,614,100
==4352== I1 misses: 1,626
==4352== LLi misses: 1,579
==4352== I1 miss rate: 0.00%
==4352== LLi miss rate: 0.00%
==4352==
==4352== D refs: 2,049,454,623 (1,393,712,296 rd + 655,742,327 wr)
==4352== D1 misses: 66,707,929 ( 66,606,998 rd + 100,931 wr)
==4352== LLd misses: 66,681,897 ( 66,581,942 rd + 99,955 wr)
==4352== D1 miss rate: 3.3% ( 4.8% + 0.0% )
==4352== LLd miss rate: 3.3% ( 4.8% + 0.0% )
==4352==
==4352== LL refs: 66,709,555 ( 66,608,624 rd + 100,931 wr)
==4352== LL misses: 66,683,476 ( 66,583,521 rd + 99,955 wr)
==4352== LL miss rate: 1.1% ( 1.3% + 0.0% )
alignment = 32
==4426== I refs: 2,857,165,049
==4426== I1 misses: 1,604
==4426== LLi misses: 1,560
==4426== I1 miss rate: 0.00%
==4426== LLi miss rate: 0.00%
==4426==
==4426== D refs: 1,558,058,149 (967,779,295 rd + 590,278,854 wr)
==4426== D1 misses: 66,706,930 ( 66,605,998 rd + 100,932 wr)
==4426== LLd misses: 66,680,898 ( 66,580,942 rd + 99,956 wr)
==4426== D1 miss rate: 4.3% ( 6.9% + 0.0% )
==4426== LLd miss rate: 4.3% ( 6.9% + 0.0% )
==4426==
==4426== LL refs: 66,708,534 ( 66,607,602 rd + 100,932 wr)
==4426== LL misses: 66,682,458 ( 66,582,502 rd + 99,956 wr)
==4426== LL miss rate: 1.5% ( 1.7% + 0.0% )
似乎问题不在缓存中。
检查Turbo Boost中是否存在该问题。
对齐:16 - &gt; 32
使用Turbo Boost 启用:~4100ms - &gt; 〜4600ms
使用Turbo Boost 禁用:~5000ms - &gt; 〜5400ms
答案 0 :(得分:1)
不是答案,但我在Haswell E5-2680v3上使用GNU g ++ 6.4.0和Intel icpc 18.0.1进行了一些测量。所有时间都只与几毫秒的偏差非常一致:
g++ -O2 -mavx2 -ftree-vectorize
align=16
:6.99 [s] g++ -O2 -mavx2 -ftree-vectorize
align=32
:6.67 [s] g++ -O3 -mavx2 -ftree-vectorize
align=16
:6.72 [s] g++ -O3 -mavx2 -ftree-vectorize
align=32
:6.60 [s] g++ -O2 -march=haswell -ftree-vectorize
align=16
:6.45 [s] g++ -O2 -march=haswell -ftree-vectorize
align=32
:6.45 [s] g++ -O3 -march=haswell -ftree-vectorize
align=16
:6.44 [s] g++ -O3 -march=haswell -ftree-vectorize
align=32
:6.44 [s] icpc -O2 -xCORE-AVX2
align=16
:3.67 [s] icpc -O2 -xCORE-AVX2
align=32
:3.51 [s] icpc -O3 -xCORE-AVX2
align=16
:3.65 [s] icpc -O3 -xCORE-AVX2
align=32
:3.59 [s] Conslusions:
-march=haswell
比-mavx2
快,对-march=haswell
,对优化级别和对齐几乎不敏感。根据我的经验,ICC通常比GCC好得多,至少对于矢量化而言。我没有研究过生成的机器代码,但是人们可以使用例如Godbolt来理解为什么ICC在这种情况下如此优越。