我正在尝试从YCbCr到BGRA执行图像颜色转换(不要问A位,这么头疼)。
无论如何,这需要尽可能快地执行,所以我使用编译器内在函数来编写它以利用SSE2。这是我第一次进入SIMD土地,我基本上是一个初学者,所以我确信我的工作效率很低。
我进行实际颜色转换的算术代码变得特别慢,英特尔的VTune显示它是一个重要的瓶颈。
那么,我可以用任何方式加快以下代码的速度吗?它以32位,一次4个像素完成。我最初尝试以8位,一次16个像素(如上部循环),但计算导致整数溢出和转换损坏。整个过程,包括英特尔jpeg解码,单个全高清领域需要大约14毫秒。如果我能把它降到至少12毫秒,理想情况下是10毫秒,那就太棒了。
感谢任何帮助或提示。谢谢!
const __m128i s128_8 = _mm_set1_epi8((char)128);
const int nNumPixels = roi.width * roi.height;
for (int i=0; i<nNumPixels; i+=32)
{
// Go ahead and prefetch our packed UV Data.
// As long as the load remains directly next, this saves us time.
_mm_prefetch((const char*)&pSrc8u[2][i],_MM_HINT_T0);
// We need to fetch and blit out our k before we write over it with UV data.
__m128i sK1 = _mm_load_si128((__m128i*)&pSrc8u[2][i]);
__m128i sK2 = _mm_load_si128((__m128i*)&pSrc8u[2][i+16]);
// Using the destination buffer temporarily here so we don't need to waste time doing a memory allocation.
_mm_store_si128 ((__m128i*)&m_pKBuffer[i], sK1);
_mm_store_si128 ((__m128i*)&m_pKBuffer[i+16], sK2);
// In theory, this prefetch needs to be some cycles ahead of the first read. It isn't, yet it does appear to save us time. Worth investigating.
_mm_prefetch((const char*)&pSrc8u[1][i],_MM_HINT_T0);
__m128i sUVI1 = _mm_load_si128((__m128i*)&pSrc8u[1][i]);
__m128i sUVI2 = _mm_load_si128((__m128i*)&pSrc8u[1][i+16]);
// Subtract the 128 here ahead of our YCbCr -> BGRA conversion so we can go 16 pixels at a time rather than 4.
sUVI1 = _mm_sub_epi8(sUVI1, s128_8);
sUVI2 = _mm_sub_epi8(sUVI2, s128_8);
// Swizzle and double up UV data from interleaved 8x1 byte blocks into planar
__m128i sU1 = _mm_unpacklo_epi8(sUVI1, sUVI1);
__m128i sV1 = _mm_unpackhi_epi8(sUVI1, sUVI1);
__m128i sU2 = _mm_unpacklo_epi8(sUVI2, sUVI2);
__m128i sV2 = _mm_unpackhi_epi8(sUVI2, sUVI2);
_mm_store_si128((__m128i*)&pSrc8u[1][i], sU1);
_mm_store_si128((__m128i*)&pSrc8u[1][i+16], sU2);
_mm_store_si128((__m128i*)&pSrc8u[2][i], sV1);
_mm_store_si128((__m128i*)&pSrc8u[2][i+16], sV2);
}
const __m128i s16 = _mm_set1_epi32(16);
const __m128i s299 = _mm_set1_epi32(299);
const __m128i s410 = _mm_set1_epi32(410);
const __m128i s518 = _mm_set1_epi32(518);
const __m128i s101 = _mm_set1_epi32(101);
const __m128i s209 = _mm_set1_epi32(209);
Ipp8u* pDstP = pDst8u;
for (int i=0; i<nNumPixels; i+=4, pDstP+=16)
{
__m128i sK = _mm_set_epi32(m_pKBuffer[i], m_pKBuffer[i+1], m_pKBuffer[i+2], m_pKBuffer[i+3]);
__m128i sY = _mm_set_epi32(pSrc8u[0][i], pSrc8u[0][i+1], pSrc8u[0][i+2], pSrc8u[0][i+3]);
__m128i sU = _mm_set_epi32((char)pSrc8u[1][i], (char)pSrc8u[1][i+1], (char)pSrc8u[1][i+2], (char)pSrc8u[1][i+3]);
__m128i sV = _mm_set_epi32((char)pSrc8u[2][i], (char)pSrc8u[2][i+1], (char)pSrc8u[2][i+2], (char)pSrc8u[2][i+3]);
// N.b. - Attempted to do the sub 16 in 8 bits similar to the sub 128 for U and V - however doing it here is quicker
// as the time saved on the arithmetic is less than the time taken by the additional loads/stores needed in the swizzle loop
sY = _mm_mullo_epi32(_mm_sub_epi32(sY, s16), s299);
__m128i sR = _mm_srli_epi32(_mm_add_epi32(sY,_mm_mullo_epi32(s410, sV)), 8);
__m128i sG = _mm_srli_epi32(_mm_sub_epi32(_mm_sub_epi32(sY, _mm_mullo_epi32(s101, sU)),_mm_mullo_epi32(s209, sV)), 8);
__m128i sB = _mm_srli_epi32(_mm_add_epi32(sY, _mm_mullo_epi32(s518, sU)), 8);
//Microsoft's YUV Conversion
//__m128i sC = _mm_sub_epi32(sY, s16);
//__m128i sD = _mm_sub_epi32(sU, s128);
//__m128i sE = _mm_sub_epi32(sV, s128);
//
//__m128i sR = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(_mm_mullo_epi32(s298, sC), _mm_mullo_epi32(s409, sE)), s128), 8);
//__m128i sG = _mm_srli_epi32(_mm_add_epi32(_mm_sub_epi32(_mm_mullo_epi32(s298, sC), _mm_sub_epi32(_mm_mullo_epi32(s100, sD), _mm_mullo_epi32(s208, sE))), s128), 8);
//__m128i sB = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(_mm_mullo_epi32(s298, sC), _mm_mullo_epi32(s516, sD)), s128), 8);
__m128i sKGl = _mm_unpacklo_epi32(sK, sG);
__m128i sKGh = _mm_unpackhi_epi32(sK, sG);
__m128i sRBl = _mm_unpacklo_epi32(sR, sB);
__m128i sRBh = _mm_unpackhi_epi32(sR, sB);
__m128i sKRGB1 = _mm_unpackhi_epi32(sKGh,sRBh);
__m128i sKRGB2 = _mm_unpacklo_epi32(sKGh,sRBh);
__m128i sKRGB3 = _mm_unpackhi_epi32(sKGl,sRBl);
__m128i sKRGB4 = _mm_unpacklo_epi32(sKGl,sRBl);
__m128i p1 = _mm_packus_epi16(sKRGB1, sKRGB2);
__m128i p2 = _mm_packus_epi16(sKRGB3, sKRGB4);
__m128i po = _mm_packus_epi16(p1, p2);
_mm_store_si128((__m128i*)pDstP, po);
}
答案 0 :(得分:5)
这里的带宽有限,因为相对于加载和存储的数量,计算量非常小。
一个建议:摆脱_mm_prefetch
内在函数 - 它们几乎肯定没有帮助,甚至可能阻碍更近期CPU的运行(已经在自动预取方面表现相当不错)。
另一个值得关注的领域:
__m128i sK = _mm_set_epi32(m_pKBuffer[i], m_pKBuffer[i+1], m_pKBuffer[i+2], m_pKBuffer[i+3]);
__m128i sY = _mm_set_epi32(pSrc8u[0][i], pSrc8u[0][i+1], pSrc8u[0][i+2], pSrc8u[0][i+3]);
__m128i sU = _mm_set_epi32((char)pSrc8u[1][i], (char)pSrc8u[1][i+1], (char)pSrc8u[1][i+2], (char)pSrc8u[1][i+3]);
__m128i sV = _mm_set_epi32((char)pSrc8u[2][i], (char)pSrc8u[2][i+1], (char)pSrc8u[2][i+2], (char)pSrc8u[2][i+3]);
这会产生批次不必要的指示 - 您应该在此使用_mm_load_xxx
和_mm_unpackxx_xxx
。它将看起来像更多的代码,但效率会更高。你应该每循环处理16个像素,而不是4个 - 这样你就可以加载一个8位值的向量,然后解压缩,根据需要将每组4个值作为32位整数的向量。< / p>