C中最快的交错操作?

时间:2013-01-28 17:34:14

标签: c arrays performance extract memcpy

我有一个指向字节数组mixed的指针,它包含两个不同数组array1array2的交错字节。说mixed看起来像这样:

a1b2c3d4...

我需要做的是对字节进行解交织,以便得到array1 = abcd...array2 = 1234...。我提前知道mixed的长度,array1array2的长度相等,都等于mixed / 2

以下是我当前的实施(array1array2已经分配):

int i, j;
int mixedLength_2 = mixedLength / 2;
for (i = 0, j = 0; i < mixedLength_2; i++, j += 2)
{
    array1[i] = mixed[j];
    array2[i] = mixed[j+1];
}

这避免了任何昂贵的乘法或除法运算,但仍然运行得不够快。我希望有一些像memcpy这样的东西可以使用一个索引器,它可以使用低级块复制操作来加速这个过程。是否有比我现有的更快的实施?

修改

目标平台是针对iOS和Mac的Objective-C。对于iOS设备而言,快速操作更为重要,因此针对iOS的解决方案将比没有更好。

更新

感谢大家的回应,尤其是Stephen Canon,Graham Lee和Mecki。这是我的“主”功能,使用Stephen的NEON内在函数(如果可用)和Graham的联合游标,Mecki建议的迭代次数减少。

void interleave(const uint8_t *srcA, const uint8_t *srcB, uint8_t *dstAB, size_t dstABLength)
{
#if defined __ARM_NEON__
    // attempt to use NEON intrinsics

    // iterate 32-bytes at a time
    div_t dstABLength_32 = div(dstABLength, 32);
    if (dstABLength_32.rem == 0)
    {
        while (dstABLength_32.quot --> 0)
        {
            const uint8x16_t a = vld1q_u8(srcA);
            const uint8x16_t b = vld1q_u8(srcB);
            const uint8x16x2_t ab = { a, b };
            vst2q_u8(dstAB, ab);
            srcA += 16;
            srcB += 16;
            dstAB += 32;
        }
        return;
    }

    // iterate 16-bytes at a time
    div_t dstABLength_16 = div(dstABLength, 16);
    if (dstABLength_16.rem == 0)
    {
        while (dstABLength_16.quot --> 0)
        {
            const uint8x8_t a = vld1_u8(srcA);
            const uint8x8_t b = vld1_u8(srcB);
            const uint8x8x2_t ab = { a, b };
            vst2_u8(dstAB, ab);
            srcA += 8;
            srcB += 8;
            dstAB += 16;
        }
        return;
    }
#endif

    // if the bytes were not aligned properly
    // or NEON is unavailable, fall back to
    // an optimized iteration

    // iterate 8-bytes at a time
    div_t dstABLength_8 = div(dstABLength, 8);
    if (dstABLength_8.rem == 0)
    {
        typedef union
        {
            uint64_t wide;
            struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; uint8_t a3; uint8_t b3; uint8_t a4; uint8_t b4; } narrow;
        } ab8x8_t;

        uint64_t *dstAB64 = (uint64_t *)dstAB;
        int j = 0;
        for (int i = 0; i < dstABLength_8.quot; i++)
        {
            ab8x8_t cursor;
            cursor.narrow.a1 = srcA[j  ];
            cursor.narrow.b1 = srcB[j++];
            cursor.narrow.a2 = srcA[j  ];
            cursor.narrow.b2 = srcB[j++];
            cursor.narrow.a3 = srcA[j  ];
            cursor.narrow.b3 = srcB[j++];
            cursor.narrow.a4 = srcA[j  ];
            cursor.narrow.b4 = srcB[j++];
            dstAB64[i] = cursor.wide;
        }
        return;
    }

    // iterate 4-bytes at a time
    div_t dstABLength_4 = div(dstABLength, 4);
    if (dstABLength_4.rem == 0)
    {
        typedef union
        {
            uint32_t wide;
            struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; } narrow;
        } ab8x4_t;

        uint32_t *dstAB32 = (uint32_t *)dstAB;
        int j = 0;
        for (int i = 0; i < dstABLength_4.quot; i++)
        {
            ab8x4_t cursor;
            cursor.narrow.a1 = srcA[j  ];
            cursor.narrow.b1 = srcB[j++];
            cursor.narrow.a2 = srcA[j  ];
            cursor.narrow.b2 = srcB[j++];
            dstAB32[i] = cursor.wide;
        }
        return;
    }

    // iterate 2-bytes at a time
    div_t dstABLength_2 = div(dstABLength, 2);
    typedef union
    {
        uint16_t wide;
        struct { uint8_t a; uint8_t b; } narrow;
    } ab8x2_t;

    uint16_t *dstAB16 = (uint16_t *)dstAB;
    for (int i = 0; i < dstABLength_2.quot; i++)
    {
        ab8x2_t cursor;
        cursor.narrow.a = srcA[i];
        cursor.narrow.b = srcB[i];
        dstAB16[i] = cursor.wide;
    }
}

void deinterleave(const uint8_t *srcAB, uint8_t *dstA, uint8_t *dstB, size_t srcABLength)
{
#if defined __ARM_NEON__
    // attempt to use NEON intrinsics

    // iterate 32-bytes at a time
    div_t srcABLength_32 = div(srcABLength, 32);
    if (srcABLength_32.rem == 0)
    {
        while (srcABLength_32.quot --> 0)
        {
            const uint8x16x2_t ab = vld2q_u8(srcAB);
            vst1q_u8(dstA, ab.val[0]);
            vst1q_u8(dstB, ab.val[1]);
            srcAB += 32;
            dstA += 16;
            dstB += 16;
        }
        return;
    }

    // iterate 16-bytes at a time
    div_t srcABLength_16 = div(srcABLength, 16);
    if (srcABLength_16.rem == 0)
    {
        while (srcABLength_16.quot --> 0)
        {
            const uint8x8x2_t ab = vld2_u8(srcAB);
            vst1_u8(dstA, ab.val[0]);
            vst1_u8(dstB, ab.val[1]);
            srcAB += 16;
            dstA += 8;
            dstB += 8;
        }
        return;
    }
#endif

    // if the bytes were not aligned properly
    // or NEON is unavailable, fall back to
    // an optimized iteration

    // iterate 8-bytes at a time
    div_t srcABLength_8 = div(srcABLength, 8);
    if (srcABLength_8.rem == 0)
    {
        typedef union
        {
            uint64_t wide;
            struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; uint8_t a3; uint8_t b3; uint8_t a4; uint8_t b4; } narrow;
        } ab8x8_t;

        uint64_t *srcAB64 = (uint64_t *)srcAB;
        int j = 0;
        for (int i = 0; i < srcABLength_8.quot; i++)
        {
            ab8x8_t cursor;
            cursor.wide = srcAB64[i];
            dstA[j  ] = cursor.narrow.a1;
            dstB[j++] = cursor.narrow.b1;
            dstA[j  ] = cursor.narrow.a2;
            dstB[j++] = cursor.narrow.b2;
            dstA[j  ] = cursor.narrow.a3;
            dstB[j++] = cursor.narrow.b3;
            dstA[j  ] = cursor.narrow.a4;
            dstB[j++] = cursor.narrow.b4;
        }
        return;
    }

    // iterate 4-bytes at a time
    div_t srcABLength_4 = div(srcABLength, 4);
    if (srcABLength_4.rem == 0)
    {
        typedef union
        {
            uint32_t wide;
            struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; } narrow;
        } ab8x4_t;

        uint32_t *srcAB32 = (uint32_t *)srcAB;
        int j = 0;
        for (int i = 0; i < srcABLength_4.quot; i++)
        {
            ab8x4_t cursor;
            cursor.wide = srcAB32[i];
            dstA[j  ] = cursor.narrow.a1;
            dstB[j++] = cursor.narrow.b1;
            dstA[j  ] = cursor.narrow.a2;
            dstB[j++] = cursor.narrow.b2;
        }
        return;
    }

    // iterate 2-bytes at a time
    div_t srcABLength_2 = div(srcABLength, 2);
    typedef union
    {
        uint16_t wide;
        struct { uint8_t a; uint8_t b; } narrow;
    } ab8x2_t;

    uint16_t *srcAB16 = (uint16_t *)srcAB;
    for (int i = 0; i < srcABLength_2.quot; i++)
    {
        ab8x2_t cursor;
        cursor.wide = srcAB16[i];
        dstA[i] = cursor.narrow.a;
        dstB[i] = cursor.narrow.b;
    }
}

6 个答案:

答案 0 :(得分:8)

在我的脑海中,我不知道用于解交织2个通道字节数据的库函数。但是,有必要向Apple提交一份错误报告来请求这样的功能。

与此同时,使用NEON或SSE内在函数对这样的函数进行矢量化非常容易。具体来说,在ARM上,您需要使用vld1q_u8从每个源数组加载一个向量vuzpq_u8来对它们进行去交织,并使用vst1q_u8来存储生成的向量;这是一个粗略的草图,我没有测试过甚至试图建立,但它应该说明一般的想法。更复杂的实现肯定是可能的(特别是,NEON可以在单个指令中加载/存储两个 16B寄存器,编译器可能不会这样做,并且一些流水线和/或展开可能是取决于你的缓冲区有多长,这是有益的):

#if defined __ARM_NEON__
#   include <arm_neon.h>
#endif
#include <stdint.h>
#include <stddef.h>

void deinterleave(uint8_t *mixed, uint8_t *array1, uint8_t *array2, size_t mixedLength) {
#if defined __ARM_NEON__
    size_t vectors = mixedLength / 32;
    mixedLength %= 32;
    while (vectors --> 0) {
        const uint8x16_t src0 = vld1q_u8(mixed);
        const uint8x16_t src1 = vld1q_u8(mixed + 16);
        const uint8x16x2_t dst = vuzpq_u8(src0, src1);
        vst1q_u8(array1, dst.val[0]);
        vst1q_u8(array2, dst.val[1]);
        mixed += 32;
        array1 += 16;
        array2 += 16;
    }
#endif
    for (size_t i=0; i<mixedLength/2; ++i) {
        array1[i] = mixed[2*i];
        array2[i] = mixed[2*i + 1];
    }
}

答案 1 :(得分:3)

我只是轻轻地测试了它,但它似乎至少是你版本的两倍:

typedef union {
uint16_t wide;
struct { uint8_t top; uint8_t bottom; } narrow;
} my_union;

uint16_t *source = (uint16_t *)mixed;
for (int i = 0; i < mixedLength/2; i++)
{
    my_union cursor;
    cursor.wide = source[i];
    array1[i] = cursor.narrow.top;
    array2[i] = cursor.narrow.bottom;
}

请注意,我对结构打包并不小心,但在这种情况下在这个架构上并不是问题。请注意,在我选择命名topbottom时,有人可能会抱怨;我假设你知道你需要哪一半整数。

答案 2 :(得分:2)

好的,这是你原来的方法:

static void simpleDeint (
    uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
    int i, j;
    int mixedLength_2 = mixedLength / 2;
    for (i = 0, j = 0; i < mixedLength_2; i++, j += 2)
    {
        array1[i] = mixed[j];
        array2[i] = mixed[j+1];
    }
}

有1000万个条目和-O3(编译器应优化最大速度),我可以在Mac上每秒运行154次。

这是我的第一个建议:

static void structDeint (
    uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
    int i;
    int len;
    uint8_t * array1Ptr = (uint8_t *)array1;
    uint8_t * array2Ptr = (uint8_t *)array2;
    struct {
        uint8_t byte1;
        uint8_t byte2;
    } * tb = (void *)mixed;

    len = mixedLength / 2;
    for (i = 0; i < len; i++) {
      *(array1Ptr++) = tb->byte1;
      *(array2Ptr++) = tb->byte2;
      tb++;
    }
}

与以前相同的计数和优化,我每秒获得193次运行。

现在Graham Lee的建议:

static void unionDeint (
    uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
    union my_union {
        uint16_t wide;
        struct { uint8_t top; uint8_t bottom; } narrow;
    };

    uint16_t * source = (uint16_t *)mixed;
    for (int i = 0; i < mixedLength/2; i++) {
        union my_union cursor;
        cursor.wide = source[i];
        array1[i] = cursor.narrow.top;
        array2[i] = cursor.narrow.bottom;
    }
}

与之前相同的设置,每秒198次运行(注意:此方法不是endian安全,结果取决于CPU的endianess。在你的情况下,array1和array2可能是交换的,因为ARM是小端,所以你必须交换它们在代码中。)

到目前为止,这是我最好的一个:

static void uint32Deint (
    uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
    int i;
    int count;
    uint32_t * fourBytes = (void *)mixed;
    uint8_t * array1Ptr = (uint8_t *)array1;
    uint8_t * array2Ptr = (uint8_t *)array2;


    count = mixedLength / 4;
    for (i = 0; i < count; i++) {
        uint32_t temp = *(fourBytes++);

#if __LITTLE_ENDIAN__
        *(array1Ptr++) = (uint8_t)(temp & 0xFF);
        temp >>= 8;
        *(array2Ptr++) = (uint8_t)(temp & 0xFF);
        temp >>= 8;
        *(array1Ptr++) = (uint8_t)(temp & 0xFF);
        temp >>= 8;
        *(array2Ptr++) = tb->byte2;

#else
        *(array1Ptr++) = (uint8_t)(temp >> 24);
        *(array2Ptr++) = (uint8_t)((temp >> 16) & 0xFF);
        *(array1Ptr++) = (uint8_t)((temp >>  8) & 0xFF);
        *(array2Ptr++) = (uint8_t)(temp & 0xFF);
#endif
    }
    // Either it is a multiple of 4 or a multiple of 2.
    // If it is a multiple of 2, 2 bytes are left over.
    if (count * 4 != mixedLength) {
        *(array1Ptr) = mixed[mixedLength - 2];
        *(array2Ptr) = mixed[mixedLength - 1];
    }
}

与上面相同的设置,每秒219次,除非我犯了错误,否则应该使用任何一个endianess。

答案 3 :(得分:1)

我推荐格雷厄姆的解决方案,但如果这对速度非常重要且你愿意去装配,你可以更快。

这个想法是这样的:

  1. mixed读取整个32位整数。你会得到'a1b2'。

  2. 将低16位旋转8位得到'1ab2'(我们使用小端,因为这是ARM中的默认值,因此Apple A#,所以前两个字节是较低的字节)。

  3. 将整个32位寄存器向右旋转(我认为是正确的......)8位,以获得'21ab'。

  4. 将低16位旋转8位以获得'12ab'

  5. 将低8位写入array2

  6. 将整个32位寄存器旋转16位。

  7. 将低8位写入array1

  8. array1提升16位,array2推进16位,mixed提升32位。

  9. 重复。

  10. 我们交换了2个内存读取(假设我们使用Graham的版本或等价物)和4个内存,一个内存读取,两个内存写入和4个寄存器操作。虽然操作数量从6增加到7,但是寄存器操作比内存操作更快,因此它的效率更高。此外,由于我们一次从mixed 32位而不是16位读取,因此我们将迭代管理减少了一半。

    PS:从理论上讲,这也适用于64位架构,但对'a1b2c3d4'进行所有这些旋转会让你感到疯狂。

答案 4 :(得分:1)

对于x86 SSE,您需要packpunpck指令。使用AVX的示例,以方便非破坏性3操作数指令。 (不使用AVX2 256b宽指令,因为256b打包/取消指令在低和高128b通道中执行两次128b解压缩,因此您需要随机播放才能以正确的最终顺序获取内容。)

以下内在版本的工作原理相同。只需编写快速答案,Asm指令的输入时间就会缩短。

交错abcd1234 - &gt; a1b2c3d4

# loop body:
vmovdqu    (%rax), %xmm0  # load the sources
vmovdqu    (%rbx), %xmm1
vpunpcklbw %xmm0, %xmm1, %xmm2  # low  halves -> 128b reg
vpunpckhbw %xmm0, %xmm2, %xmm3  # high halves -> 128b reg
vmovdqu    %xmm2, (%rdi)   # store the results
vmovdqu    %xmm3, 16(%rdi)
# blah blah some loop structure.

`punpcklbw` interleaves the bytes in the low 64 of the two source `xmm` registers.  There are `..wd` (word->dword), and dword->qword versions which would be useful for 16 or 32bit elements.

解交错a1b2c3d4 - &gt; abcd1234

#outside the loop
vpcmpeqb    %xmm5, %xmm5   # set to all-1s
vpsrlw     $8, %xmm5, %xmm5   # every 16b word has low 8b = 0xFF, high 8b = 0.

# loop body
vmovdqu    (%rsi), %xmm2     # load two src chunks
vmovdqu    16(%rsi), %xmm3
vpand      %xmm2, %xmm5, %xmm0  # mask to leave only the odd bytes
vpand      %xmm3, %xmm5, %xmm1
vpackuswb  %xmm0, %xmm1, %xmm4
vmovdqu    %xmm4, (%rax)    # store 16B of a[]
vpsrlw     $8, %xmm2, %xmm6     # even bytes -> odd bytes
vpsrlw     $8, %xmm3, %xmm7
vpackuswb  %xmm6, %xmm7, %xmm4
vmovdqu    %xmm4, (%rbx)

这当然可以使用更少的寄存器。我避免重复使用寄存器的可读性,而不是性能。硬件寄存器重命名使重用成为非问题,只要您从不依赖于先前值的内容开始。 (例如movd,而非movsspinsrd。)

由于pack指令的有符号或无符号饱和度,因此去交错的工作要多得多,因此每个16b元素的高8b必须首先归零。

另一种方法是使用pshufb将单个源寄存器的奇数或偶数字封装到寄存器的低64位。但是,在AMD XOP指令集VPPERM之外,没有可以同时从2个寄存器中选择字节的混洗(就像Altivec非常喜欢的vperm)。因此,只需SSE / AVX,每128b交错数据就需要2次洗牌。由于存储端口使用可能是瓶颈,因此punpcka的两个64位块组合到一个寄存器中以设置128b存储。

使用AMD XOP,deinterleave将是2x128b负载,2 VPPERM和2x128b存储。

答案 5 :(得分:-1)

  1. 过早优化不好

  2. 您的编译器可能比您更好地进行优化。

  3. 也就是说,你可以做的事情来帮助编译器,因为你有编译器不能拥有的数据的语义知识:

    1. 尽可能多地读取和写入字节,直到本机字大小 - 内存操作很昂贵,所以在可能的情况下在寄存器中进行操作

    2. 展开循环 - 查看“Duff的设备”。

    3. FWIW,我制作了两个版本的复制循环,一个与你的版本大致相同,第二个使用了大多数人认为的“最佳”(尽管仍然很简单)C代码:

      void test1(byte *p, byte *p1, byte *p2, int n)
      {
          int i, j;
          for (i = 0, j = 0; i < n / 2; i++, j += 2) {
              p1[i] = p[j];
              p2[i] = p[j + 1];
          }
      }
      
      void test2(byte *p, byte *p1, byte *p2, int n)
      {
          while (n) {
              *p1++ = *p++;
              *p2++ = *p++;
              n--; n--;
          }
      }
      

      在Intel x86上使用gcc -O3 -S时,它们都生成了几乎相同的汇编代码。这是内循环:

      LBB1_2:
          movb    -1(%rdi), %al
          movb    %al, (%rsi)
          movb    (%rdi), %al
          movb    %al, (%rdx)
          incq    %rsi
          addq    $2, %rdi
          incq    %rdx
          decq    %rcx
          jne LBB1_2
      

      LBB2_2:
          movb    -1(%rdi), %al
          movb    %al, (%rsi)
          movb    (%rdi), %al
          movb    %al, (%rdx)
          incq    %rsi
          addq    $2, %rdi
          incq    %rdx
          addl    $-2, %ecx
          jne LBB2_2
      

      两者都有相同数量的指令,差异仅仅因为第一个版本计数到n / 2而第二个计数减少到零。

      编辑这是一个更好的版本:

      /* non-portable - assumes little endian */
      void test3(byte *p, byte *p1, byte *p2, int n)
      {
          ushort *ps = (ushort *)p;
      
          n /= 2;
          while (n) {
              ushort n = *ps++;
              *p1++ = n;
              *p2++ = n >> 8;
          }
      }
      

      导致:

      LBB3_2:
          movzwl  (%rdi), %ecx
          movb    %cl, (%rsi)
          movb    %ch, (%rdx)  # NOREX
          addq    $2, %rdi
          incq    %rsi
          incq    %rdx
          decq    %rax
          jne LBB3_2
      

      这是少一条指令,因为它利用了对%cl%ch的即时访问权。