使用自定义运算符减少OpenMP SIMD

时间:2014-09-03 20:03:45

标签: c++ openmp simd avx

我有以下循环,我想使用#pragma omp simd来加速:

#define N 1024
double* data = new double[N];
// Generate data, not important how.

double mean = 0.0
for (size_t i = 0; i < N; i++) {
    mean += (data[i] - mean) / (i+1);
}

正如我所料,只是在循环之前直接放置#pragma omp simd没有影响(我正在检查运行时间)。我可以使用#pragma omp parallel for reduction(...)和自定义缩减器轻松地解决多线程案例,如下所示,但是如何在这里使用OpenMP SIMD?

我正在使用以下类来实现+和+ =运算符,以便将double添加到运行平均值以及组合两个运行方式:

class RunningMean {
    private:
        double mean;
        size_t count;

    public:
        RunningMean(): mean(0), count(0) {}
        RunningMean(double m, size_t c): mean(m), count(c) {}

        RunningMean operator+(RunningMean& rhs) {
            size_t c = this->count + rhs.count;
            double m = (this->mean*this->count + rhs.mean*rhs.count) / c;
            return RunningMean(m, c);
        }

        RunningMean operator+(double rhs) {
            size_t c = this->count + 1;
            double m = this->mean + (rhs - this->mean) / c;
            return RunningMean(m, c);
        }

        RunningMean& operator+=(const RunningMean& rhs) {
            this->mean = this->mean*this->count + rhs.mean*rhs.count;
            this->count += rhs.count;
            this->mean /= this->count;
            return *this;
        }

        RunningMean& operator+=(double rhs) {
            this->count++;
            this->mean += (rhs - this->mean) / this->count;
            return *this;
        }

        double getMean() { return mean; }
        size_t getCount() { return count; }
};

这方面的数学来自http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf。 对于多线程,非SIMD并行缩减,我执行以下操作:

#pragma omp declare reduction (runningmean : RunningMean : omp_out += omp_in)
RunningMean mean;
#pragma omp parallel for reduction(runningmean:mean)
for (size_t i = 0; i < N; i++)
    mean += data[i];

这让我在使用8个线程的Core i7 2600k上加速了3.2倍。

如果我在没有OpenMP的情况下自己实现SIMD,我只需要在一个向量中保留4个均值,在另一个向量中保持4个计数(假设使用AVX指令)并继续使用向量化添加4个元素的双精度向量版本operator+(double rhs)。一旦完成,我将使用operator+=中的数学结果添加结果的4对均值和计数。我如何指示OpenMP执行此操作?

2 个答案:

答案 0 :(得分:2)

问题在于

mean += (data[i] - mean) / (i+1);

不容易接受SIMD。然而,通过仔细研究数学,可以毫不费力地对其进行矢量化。

关键论坛是

mean(n+m) = (n*mean(n) + m*mean(m))/(n+m)

显示了如何添加n数字的均值和m数字的均值。这可以在您的运算符定义RunningMean operator+(RunningMean& rhs)中看到。这解释了您的并行代码的工作原理。如果我们对C ++代码进行去卷积,我认为这更清楚了:

double mean = 0.0;
int count = 0;
#pragma omp parallel
{
    double mean_private = 0.0;
    int count_private = 0;
    #pragma omp for nowait
    for(size_t i=0; i<N; i++) {
        count_private ++;
        mean_private += (data[i] - mean_private)/count_private;
    }
    #pragma omp critical
    {
        mean = (count_private*mean_private + count*mean);
        count += count_private;
        mean /= count;
    }
}

但是我们可以使用与SIMD相同的想法(并将它们组合在一起)。但是,让我们首先只做SIMD部分。使用AVX,我们可以同时处理四个并行方式。每个并行平均值将处理以下数据元素:

mean 1 data elements: 0,  4,  8, 12,...
mean 2 data elements: 1,  5,  9, 13,...
mean 3 data elements: 2,  6, 10, 14,...
mean 4 data elements: 3,  7, 11, 15,...

我们已经循环了所有元素,然后我们将四个并行和加在一起并除以4(因为每个和在N / 4个元素上运行)。

以下是此

的代码
double mean = 0.0;
__m256d mean4 = _mm256_set1_pd(0.0);
__m256d count4 = _mm256_set1_pd(0.0);
for(size_t i=0; i<N/4; i++) {
    count4 = _mm256_add_pd(count4,_mm256_set1_pd(1.0));
    __m256d t1 = _mm256_loadu_pd(&data[4*i]);
    __m256d t2 = _mm256_div_pd(_mm256_sub_pd(t1, mean4), count4);
    mean4 = _mm256_add_pd(t2, mean4);   
}
__m256d t1 = _mm256_hadd_pd(mean4,mean4);
__m128d t2 = _mm256_extractf128_pd(t1,1);
__m128d t3 = _mm_add_sd(_mm256_castpd256_pd128(t1),t2);
mean = _mm_cvtsd_f64(t3)/4;
int count = 0;
double mean2 = 0;
for(size_t i=4*(N/4); i<N; i++) {
    count++;
    mean2 += (data[i] - mean2)/count;
}
mean = (4*(N/4)*mean + count*mean2)/N;

最后,我们可以将其与OpenMP结合使用,以充分利用SIMD和MIMD这样的

double mean = 0.0;
int count = 0;
#pragma omp parallel 
{
    double mean_private = 0.0;
    int count_private = 0;
    __m256d mean4 = _mm256_set1_pd(0.0);
    __m256d count4 = _mm256_set1_pd(0.0);
    #pragma omp for nowait
    for(size_t i=0; i<N/4; i++) {
        count_private++;
        count4 = _mm256_add_pd(count4,_mm256_set1_pd(1.0));
        __m256d t1 = _mm256_loadu_pd(&data[4*i]);
        __m256d t2 = _mm256_div_pd(_mm256_sub_pd(t1, mean4), count4);
        mean4 = _mm256_add_pd(t2, mean4);
    }
    __m256d t1 = _mm256_hadd_pd(mean4,mean4);
    __m128d t2 = _mm256_extractf128_pd(t1,1);
    __m128d t3 = _mm_add_sd(_mm256_castpd256_pd128(t1),t2);
    mean_private = _mm_cvtsd_f64(t3)/4;

    #pragma omp critical
    {
        mean = (count_private*mean_private + count*mean);
        count += count_private;
        mean /= count;
    }   
}
int count2 = 0;
double mean2 = 0;
for(size_t i=4*(N/4); i<N; i++) {
    count2++;
    mean2 += (data[i] - mean2)/count2;
}
mean = (4*(N/4)*mean + count2*mean2)/N;

这是一个工作示例(使用-O3 -mavx -fopenmp编译)

#include <stdio.h>
#include <stdlib.h>
#include <x86intrin.h>

double mean_simd(double *data, const int N) {
    double mean = 0.0;
    __m256d mean4 = _mm256_set1_pd(0.0);
    __m256d count4 = _mm256_set1_pd(0.0);
    for(size_t i=0; i<N/4; i++) {
        count4 = _mm256_add_pd(count4,_mm256_set1_pd(1.0));
        __m256d t1 = _mm256_loadu_pd(&data[4*i]);
        __m256d t2 = _mm256_div_pd(_mm256_sub_pd(t1, mean4), count4);
        mean4 = _mm256_add_pd(t2, mean4);           
    }
    __m256d t1 = _mm256_hadd_pd(mean4,mean4);
    __m128d t2 = _mm256_extractf128_pd(t1,1);
    __m128d t3 = _mm_add_sd(_mm256_castpd256_pd128(t1),t2);
    mean = _mm_cvtsd_f64(t3)/4;
    int count = 0;
    double mean2 = 0;
    for(size_t i=4*(N/4); i<N; i++) {
        count++;
        mean2 += (data[i] - mean2)/count;
    }
    mean = (4*(N/4)*mean + count*mean2)/N;
    return mean;
}

double mean_simd_omp(double *data, const int N) {
    double mean = 0.0;
    int count = 0;
    #pragma omp parallel 
    {
        double mean_private = 0.0;
        int count_private = 0;
        __m256d mean4 = _mm256_set1_pd(0.0);
        __m256d count4 = _mm256_set1_pd(0.0);
        #pragma omp for nowait
        for(size_t i=0; i<N/4; i++) {
            count_private++;
            count4 = _mm256_add_pd(count4,_mm256_set1_pd(1.0));
            __m256d t1 = _mm256_loadu_pd(&data[4*i]);
            __m256d t2 = _mm256_div_pd(_mm256_sub_pd(t1, mean4), count4);
            mean4 = _mm256_add_pd(t2, mean4);
        }
        __m256d t1 = _mm256_hadd_pd(mean4,mean4);
        __m128d t2 = _mm256_extractf128_pd(t1,1);
        __m128d t3 = _mm_add_sd(_mm256_castpd256_pd128(t1),t2);
        mean_private = _mm_cvtsd_f64(t3)/4;

        #pragma omp critical
        {
            mean = (count_private*mean_private + count*mean);
            count += count_private;
            mean /= count;
        }   
    }
    int count2 = 0;
    double mean2 = 0;
    for(size_t i=4*(N/4); i<N; i++) {
        count2++;
        mean2 += (data[i] - mean2)/count2;
    }
    mean = (4*(N/4)*mean + count2*mean2)/N;
    return mean;
}


int main() {
    const int N = 1001;
    double data[N];

    for(int i=0; i<N; i++) data[i] = 1.0*rand()/RAND_MAX;
    float sum = 0; for(int i=0; i<N; i++) sum+= data[i]; sum/=N;
    printf("mean %f\n", sum);
    printf("mean_simd %f\n", mean_simd(data, N);
    printf("mean_simd_omp %f\n", mean_simd_omp(data, N));
}

答案 1 :(得分:0)

KISS回答:只计算循环外的平均值。并行化以下代码:

double sum = 0.0;
for(size_t i = 0; i < N; i++) sum += data[i];
double mean = sum/N;

总和很容易并行化,但你不会看到SIMD优化的任何影响:它纯粹是内存限制,CPU只会等待内存中的数据。如果N1024一样小,那么并行化甚至没什么意义,同步开销会占用所有增益。