特征与OpenMP:由于错误共享和线程开销而没有并行化

时间:2017-01-25 07:27:16

标签: c++ parallel-processing openmp eigen false-sharing

系统规范:

  1. Intel Xeon E7-v3处理器(4个插槽,16个核心/插槽,2个 线/芯)
  2. 使用Eigen family和C ++
  3. 以下是代码片段的串行实现:

    Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {
    
        Eigen::VectorXd row(nCols);
        for (int k=0; k<nCols; ++k) {
            row(k) = get_Matrix_Entry(j,k+nColStart);
        }
    
    } 
    
    double get_Matrix_Entry(int x , int y){
        return exp(-(x-y)*(x-y));
    } 
    

    我需要并行化get_Row部分,因为nCols可以大到10 ^ 6,因此,我尝试了某些技术:

    1. 天真的并行化:

      Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {  
          Eigen::VectorXd row(nCols);
      
           #pragma omp parallel for schedule(static,8)    
           for (int k=0; k<nCols; ++k) {
                row(k)    =   get_Matrix_Entry(j,k+nColStart);
      
           return row;
      }
      
    2. 剥离采矿:

      Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) { 
          int vec_len = 8;
          Eigen::VectorXd row(nCols) ;
          int i,cols;
          cols=nCols;
          int rem = cols%vec_len;
          if(rem!=0)
              cols-=rem;
      
          #pragma omp parallel for    
          for(int ii=0;ii<cols; ii+=vec_len){
               for(i=ii;i<ii+vec_len;i++){
                   row(i) = get_Matrix_Entry(j,i+nColStart);
               }
          }
      
          for(int jj=i; jj<nCols;jj++)
              row(jj) = get_Matrix_Entry(j,jj+nColStart);
      
          return row;
      }
      
    3. 从互联网上的某个地方避免虚假分享:

      Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {
          int cache_line_size=8;
          Eigen::MatrixXd row_m(nCols,cache_line_size);
      
          #pragma omp parallel for schedule(static,1)
          for (int k=0; k<nCols; ++k) 
              row_m(k,0)  =   get_Matrix_Entry(j,k+nColStart);
      
          Eigen::VectorXd row(nCols); 
          row = row_m.block(0,0,nCols,1);
      
         return row;
      
      }
      
    4. 输出:

      上述技术都没有帮助减少执行大型nCol的get_row所需的时间,这意味着naice并行化工作与其他技术类似(虽然串行更好),任何可以帮助改善时间的建议或方法?

      正如用户Avi Ginsburg所说,我提到了其他一些系统细节:

      • g ++(GCC)是版本4.4.7的编译器
      • Eigen Library Version是3.3.2
      • 使用的编译器标志:&#34; -c -fopenmp -Wall -march = native -O3 -funroll-all-loops -ffast-math -ffinite-math-only -I header&#34; ,此处标题是包含Eigen的文件夹。
      • gcc -march = native -Q -help = target-&gt;的输出(仅提及某些标志的描述):

        -mavx [enabled]

        -mfancy-math-387 [启用]

        -mfma [disabled]

        -march = core2

      有关完全废除旗帜的信息,请参阅this

1 个答案:

答案 0 :(得分:2)

尝试将您的函数重写为单个表达式,让Eigen自我向量化,

Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {

    Eigen::VectorXd row(nCols);

    row = (-( Eigen::VectorXd::LinSpaced(nCols, nColStart, nColStart + nCols - 1).array()
                      - double(j)).square()).exp().matrix();

    return row;
}

确保在编译时使用-mavx-mfma(或-march = native)。给我一个i7的x4加速(我知道你在谈论尝试使用64/128线程,但这只是一个线程)。

您可以通过将计算划分为段来启用openmp以进一步加速:

Eigen::VectorXd get_Row_omp(const int j, const int nColStart, const int nCols) {

    Eigen::VectorXd row(nCols);

#pragma omp parallel
    {
        int num_threads = omp_get_num_threads();
        int tid = omp_get_thread_num();
        int n_per_thread = nCols / num_threads;
        if ((n_per_thread * num_threads < nCols)) n_per_thread++;
        int start = tid * n_per_thread;
        int len = n_per_thread;
        if (tid + 1 == num_threads) len = nCols - start;

        if(start < nCols)
            row.segment(start, len) = (-(Eigen::VectorXd::LinSpaced(len,
                               nColStart + start, nColStart + start + len - 1)
                            .array() - double(j)).square()).exp().matrix();

    }
    return row;

}

对于我(4核),在计算10 ^ 8个元素时我得到了额外的~x3.3加速,但是对于10 ^ 6和/或64/128核心,我预计会更低(对于核心数量的标准化,疗程)。

修改

我没有进行任何检查以确保OMP线程没有超出界限 我混淆了串行版Eigen::VectorXd::LinSpaced中的第二个和第三个参数。这可能是你遇到的任何错误的原因。另外,我已经粘贴了我在这里用于测试的代码。我使用g++ -std=c++11 -fopenmp -march=native -O3编译,以适应您的需求。

#include <Eigen/Core>
#include <iostream>
#include <omp.h>


double get_Matrix_Entry(int x, int y) {
        return exp(-(x - y)*(x - y));
}

Eigen::VectorXd get_RowOld(const int j, const int nColStart, const int nCols) {

        Eigen::VectorXd row(nCols);
        for (int k = 0; k<nCols; ++k) {
                row(k) = get_Matrix_Entry(j, k + nColStart);
        }
        return row;
}


Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {

        Eigen::VectorXd row(nCols);

        row = (-( Eigen::VectorXd::LinSpaced(nCols, nColStart, nColStart + nCols - 1).array() - double(j)).square()).exp().matrix();

        return row;
}

Eigen::VectorXd get_Row_omp(const int j, const int nColStart, const int nCols) {

        Eigen::VectorXd row(nCols);

#pragma omp parallel
        {
                int num_threads = omp_get_num_threads();
                int tid = omp_get_thread_num();
                int n_per_thread = nCols / num_threads;
                if ((n_per_thread * num_threads < nCols)) n_per_thread++;
                int start = tid * n_per_thread;
                int len = n_per_thread;
                if (tid + 1 == num_threads) len = nCols - start;


#pragma omp critical
{
        std::cout << tid << "/" << num_threads << "\t" << n_per_thread << "\t" << start <<
                                                         "\t" << len << "\t" << start+len << "\n\n";
}

                if(start < nCols)
                        row.segment(start, len) = (-(Eigen::VectorXd::LinSpaced(len, nColStart + start, nColStart + start + len - 1).array() - double(j)).square()).exp().matrix();

        }
        return row;
}

int main()
{
        std::cout << EIGEN_WORLD_VERSION << '.' << EIGEN_MAJOR_VERSION << '.' << EIGEN_MINOR_VERSION << '\n';
        volatile int b = 3;
        int sz = 6553600;
        sz = 16;
        b = 6553500;
        b = 3;
        {
                auto beg = omp_get_wtime();
                auto r = get_RowOld(5, b, sz);
                auto end = omp_get_wtime();
                auto diff = end - beg;
                std::cout << r.rows() << "\t" << r.cols() << "\n";
//              std::cout << r.transpose() << "\n";
                std::cout << "Old: " << r.mean() << "\n" << diff << "\n\n";

                beg = omp_get_wtime();
                auto r2 = get_Row(5, b, sz);
                end = omp_get_wtime();
                diff = end - beg;
                std::cout << r2.rows() << "\t" << r2.cols() << "\n";
//              std::cout << r2.transpose() << "\n";
                std::cout << "Eigen:         " << (r2-r).cwiseAbs().sum() << "\t" << (r-r2).cwiseAbs().mean() << "\n" << diff << "\n\n";

                auto omp_beg = omp_get_wtime();
                auto r3 = get_Row_omp(5, b, sz);
                auto omp_end = omp_get_wtime();
                auto omp_diff = omp_end - omp_beg;
                std::cout << r3.rows() << "\t" << r3.cols() << "\n";
//              std::cout << r3.transpose() << "\n";
                std::cout << "OMP and Eigen: " << (r3-r).cwiseAbs().sum() << "\t" << (r - r3).cwiseAbs().mean() << "\n" << omp_diff << "\n";
        }

        return 0;

}