继my question on CodeReview之后,我想知道为什么使用std::plus<int>
的两个向量的简单变换的PPL实现比顺序std::transform
慢得多并且使用带有OpenMP的for循环(顺序(带矢量化):25ms,顺序(无矢量化):28ms,C ++ AMP:131ms,PPL:51ms,OpenMP:24ms)。
我使用以下代码进行性能分析,并在Visual Studio 2013中使用完全优化进行编译:
#include <amp.h>
#include <iostream>
#include <numeric>
#include <random>
#include <assert.h>
#include <functional>
#include <chrono>
using namespace concurrency;
const std::size_t size = 30737418;
//----------------------------------------------------------------------------
// Program entry point.
//----------------------------------------------------------------------------
int main( )
{
accelerator default_device;
std::wcout << "Using device : " << default_device.get_description( ) << std::endl;
if( default_device == accelerator( accelerator::direct3d_ref ) )
std::cout << "WARNING!! Running on very slow emulator! Only use this accelerator for debugging." << std::endl;
std::mt19937 engine;
std::uniform_int_distribution<int> dist( 0, 10000 );
std::vector<int> vecTest( size );
std::vector<int> vecTest2( size );
std::vector<int> vecResult( size );
for( int i = 0; i < size; ++i )
{
vecTest[i] = dist( engine );
vecTest2[i] = dist( engine );
}
std::vector<int> vecCorrectResult( size );
std::chrono::high_resolution_clock clock;
auto beginTime = clock.now();
std::transform( std::begin( vecTest ), std::end( vecTest ), std::begin( vecTest2 ), std::begin( vecCorrectResult ), std::plus<int>() );
auto endTime = clock.now();
auto timeTaken = endTime - beginTime;
std::cout << "The time taken for the sequential function to execute was: " << std::chrono::duration_cast<std::chrono::milliseconds>(timeTaken).count() << "ms" << std::endl;
beginTime = clock.now();
#pragma loop(no_vector)
for( int i = 0; i < size; ++i )
{
vecResult[i] = vecTest[i] + vecTest2[i];
}
endTime = clock.now();
timeTaken = endTime - beginTime;
std::cout << "The time taken for the sequential function (with auto-vectorization disabled) to execute was: " << std::chrono::duration_cast<std::chrono::milliseconds>(timeTaken).count() << "ms" << std::endl;
beginTime = clock.now();
concurrency::array_view<const int, 1> av1( vecTest );
concurrency::array_view<const int, 1> av2( vecTest2 );
concurrency::array_view<int, 1> avResult( vecResult );
avResult.discard_data();
concurrency::parallel_for_each( avResult.extent, [=]( concurrency::index<1> index ) restrict(amp) {
avResult[index] = av1[index] + av2[index];
} );
avResult.synchronize();
endTime = clock.now();
timeTaken = endTime - beginTime;
std::cout << "The time taken for the AMP function to execute was: " << std::chrono::duration_cast<std::chrono::milliseconds>(timeTaken).count() << "ms" << std::endl;
std::cout << std::boolalpha << "The AMP function generated the correct answer: " << (vecResult == vecCorrectResult) << std::endl;
beginTime = clock.now();
concurrency::parallel_transform( std::begin( vecTest ), std::end( vecTest ), std::begin( vecTest2 ), std::begin( vecResult ), std::plus<int>() );
endTime = clock.now();
timeTaken = endTime - beginTime;
std::cout << "The time taken for the PPL function to execute was: " << std::chrono::duration_cast<std::chrono::milliseconds>(timeTaken).count() << "ms" << std::endl;
std::cout << "The PPL function generated the correct answer: " << (vecResult == vecCorrectResult) << std::endl;
beginTime = clock.now();
#pragma omp parallel
#pragma omp for
for( int i = 0; i < size; ++i )
{
vecResult[i] = vecTest[i] + vecTest2[i];
}
endTime = clock.now();
timeTaken = endTime - beginTime;
std::cout << "The time taken for the OpenMP function to execute was: " << std::chrono::duration_cast<std::chrono::milliseconds>(timeTaken).count() << "ms" << std::endl;
std::cout << "The OpenMP function generated the correct answer: " << (vecResult == vecCorrectResult) << std::endl;
return 0;
}
答案 0 :(得分:4)
根据MSDN,concurrency::parallel_transform
的默认分区程序为concurrency::auto_partitioner
。当涉及到它时:
这种分区方法使用范围窃取来实现负载平衡以及每次迭代取消。
使用此分区器对于简单(和内存限制)操作来说是一种过度杀伤,例如对两个数组求和,因为开销很大。您应该使用concurrency::static_partitioner
。静态分区正是大多数OpenMP实现在schedule
构造中缺少for
子句时默认使用的。
正如Code Review上已经提到的,这是一个非常受内存限制的代码。它也是STREAM benchmark的SUM
内核,专门用于测量运行系统的内存带宽。 a[i] = b[i] + c[i]
操作具有非常低的操作强度(以OPS /字节测量),并且其速度仅由主存储器总线的带宽确定。这就是为什么OpenMP代码和矢量化串行代码提供的性能基本相同,并不比非矢量化串行代码的性能高得多。
获得更高并行性能的方法是在现代多插槽系统上运行代码,并使每个阵列中的数据均匀分布在套接字上。然后你可以获得几乎等于CPU插座数量的加速。