为什么load_ps()在一台PC上运行而在另一台PC上运行?

时间:2018-05-22 13:48:26

标签: c x86 intel simd avx

我编写了以下用于缩放一组数字的代码:

 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
 #include "immintrin.h"

 void scale(struct problem_param prob_param, float* features)
 {
    int i,j,k;
    for (j = 0; j < prob_param.Nr_ft; j++)
    {
        __m256      range_vec ,low_up_vec , low_vec,tmp_vec;
        __m256      feat_min_vec, feat_vec;
        unsigned    count           = prob_param.Size;
        unsigned    offset          = j * prob_param.Size;
        float       feature_max     = features[offset];
        float       feature_min     = features[offset];
        /*
         * Look for min and max of each feature.
         */
        for ( i = 1; i < prob_param.Size ; i++)
        {
            if (features[i + offset] > feature_max )        feature_max = features[i + offset];

            if (features[i + offset] < feature_min )    feature_min = features[i + offset];
        }
        printf("feature : %u \t min = %f \t max = %f \n",j,feature_min,feature_max);
        /*
         * Set the range.
         * Set constant vectors for the vector instructions.
         */
        float       range   = feature_max - feature_min;
        feat_min_vec        = _mm256_set1_ps (feature_min);
        range_vec           = _mm256_set1_ps (range);
        low_up_vec          = _mm256_set1_ps (prob_param.upper_limit - prob_param.lower_limit);
        low_vec             = _mm256_set1_ps (prob_param.lower_limit);
        /*
         * Normalising
         * -----------
         * Head
         */
        for ( i = 0; i < prob_param.Size && count >= 7 ; i+=8)
        {
            feat_vec    = _mm256_load_ps(&features[i + offset]);
            tmp_vec     = _mm256_sub_ps(feat_vec,feat_min_vec);
            tmp_vec     = _mm256_mul_ps(tmp_vec,low_up_vec);
            tmp_vec     = _mm256_div_ps(tmp_vec,range_vec);
            feat_vec    = _mm256_add_ps(tmp_vec,low_vec);

            _mm256_store_ps (&features[i + offset], feat_vec);

            count -=8;
        }
        /*
         * Normalising
         * -----------
         * Tail
         */
        for ( k = i; k < prob_param.Size ; k++)
        {
            features[k + offset] = prob_param.lower_limit + (prob_param.upper_limit - prob_param.lower_limit) * (features[k + offset] - feature_min) / range;
        }
    }

这是负责缩放的功能,我这样称呼它:

#include <stdio.h>
#include <stdlib.h>

#include "data.h" 
#include "common.h"

#define     training_size       3089
#define     number_features     4
#define     low                 -1.0
#define     up                  1.0
float* feature_array;

int main()
{
  struct problem_param pp;

  pp.Size           =       training_size;
  pp.Nr_ft          =       number_features;
  pp.lower_limit    =       low;
  pp.upper_limit    =       up;

posix_memalign((void **) &feature_array, 32, (size_t) training_size * number_features *sizeof(float));

scale(pp,feature_array);

return EXIT_SUCCESS;
}

我用我的MacBook Pro Core i5 Haswell测试了这段代码并且它可以工作,但是当我使用华硕Core I7 Haswell进行测试时,它显示了加载的分段错误。我错过了什么吗?

1 个答案:

答案 0 :(得分:3)

offset(因此i + offset)的值并不总是8的倍数(在上例中它等于0,3089,6178,9267),所以你的负载并且商店内在函数通常会错位。

最简单的解决方案是使用_mm256_loadu_ps代替_mm256_load_ps_mm256_storeu_ps代替_mm256_store_ps

至于为什么它似乎适用于你的MacBook Pro,我的猜测是clang会在你背后产生未对齐的加载/存储指令,从而隐藏问题直到你尝试在具有不同编译器的系统上运行代码。

更新:我刚刚通过编译和反汇编生成的代码验证了上述假设(在带有macOS 10.13.4和Xcode 9.3.1的Haswell MacBook Pro上):

>>> vmovups (%r14,%r13,4), %ymm0
    vsubps  192(%rsp), %ymm0, %ymm0 ## 32-byte Folded Reload
    vmulps  448(%rsp), %ymm0, %ymm0 ## 32-byte Folded Reload
    vdivps  384(%rsp), %ymm0, %ymm0 ## 32-byte Folded Reload
    vaddps  416(%rsp), %ymm0, %ymm0 ## 32-byte Folded Reload
>>> vmovups %ymm0, (%r14,%r13,4)

请注意使用vmovups代替vmovaps