我将一个Brisk函数(用于调整图像大小)从SSE内在函数转换为ARM NEON内在函数,以便在ARM体系结构上执行它。如果支持,Brisk使用SSE功能,否则使用opencv功能。 SSE当然更快。 我逐步转换了ARM氖中的SSE函数,但是当我测量执行时间与openCV resize函数相比时,结果是我的函数更慢。(0.2ms vs 0.4ms)。这是代码:
SSE:
inline void BriskLayer::halfsample(const cv::Mat& srcimg, cv::Mat& dstimg){
const unsigned short leftoverCols = ((srcimg.cols%16)/2);// take care with border...
const bool noleftover = (srcimg.cols%16)==0; // note: leftoverCols can be zero butthis still false...
// make sure the destination image is of the right size:
assert(srcimg.cols/2==dstimg.cols);
assert(srcimg.rows/2==dstimg.rows);
// mask needed later:
register __m128i mask = _mm_set_epi32 (0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
// to be added in order to make successive averaging correct:
register __m128i ones = _mm_set_epi32 (0x11111111, 0x11111111, 0x11111111, 0x11111111);
// data pointers:
__m128i* p1=(__m128i*)srcimg.data;
__m128i* p2=(__m128i*)(srcimg.data+srcimg.cols);
__m128i* p_dest=(__m128i*)dstimg.data;
unsigned char* p_dest_char;//=(unsigned char*)p_dest;
// size:
const unsigned int size = (srcimg.cols*srcimg.rows)/16;
const unsigned int hsize = srcimg.cols/16;
__m128i* p_end=p1+size;
unsigned int row=0;
const unsigned int end=hsize/2;
bool half_end;
if(hsize%2==0)
half_end=false;
else
half_end=true;
while(p2<p_end){
for(unsigned int i=0; i<end;i++){
// load the two blocks of memory:
__m128i upper;
__m128i lower;
if(noleftover){
upper=_mm_load_si128(p1);
lower=_mm_load_si128(p2);
}
else{
upper=_mm_loadu_si128(p1);
lower=_mm_loadu_si128(p2);
}
__m128i result1=_mm_adds_epu8 (upper, ones);
result1=_mm_avg_epu8 (upper, lower);
// increment the pointers:
p1++;
p2++;
// load the two blocks of memory:
upper=_mm_loadu_si128(p1);
lower=_mm_loadu_si128(p2);
__m128i result2=_mm_adds_epu8 (upper, ones);
result2=_mm_avg_epu8 (upper, lower);
// calculate the shifted versions:
__m128i result1_shifted = _mm_srli_si128 (result1, 1);
__m128i result2_shifted = _mm_srli_si128 (result2, 1);
// pack:
__m128i result=_mm_packus_epi16 (_mm_and_si128 (result1, mask),
_mm_and_si128 (result2, mask));
__m128i result_shifted = _mm_packus_epi16 (_mm_and_si128 (result1_shifted, mask),
_mm_and_si128 (result2_shifted, mask));
// average for the second time:
result=_mm_avg_epu8(result,result_shifted);
// store to memory
_mm_storeu_si128 (p_dest, result);
// increment the pointers:
p1++;
p2++;
p_dest++;
//p_dest_char=(unsigned char*)p_dest;
}
// if we are not at the end of the row, do the rest:
if(half_end){
// load the two blocks of memory:
__m128i upper;
__m128i lower;
if(noleftover){
upper=_mm_load_si128(p1);
lower=_mm_load_si128(p2);
}
else{
upper=_mm_loadu_si128(p1);
lower=_mm_loadu_si128(p2);
}
__m128i result1=_mm_adds_epu8 (upper, ones);
result1=_mm_avg_epu8 (upper, lower);
// increment the pointers:
p1++;
p2++;
// compute horizontal pairwise average and store
p_dest_char=(unsigned char*)p_dest;
const UCHAR_ALIAS* result=(UCHAR_ALIAS*)&result1;
for(unsigned int j=0; j<8; j++){
*(p_dest_char++)=(*(result+2*j)+*(result+2*j+1))/2;
}
//p_dest_char=(unsigned char*)p_dest;
}
else{
p_dest_char=(unsigned char*)p_dest;
}
if(noleftover){
row++;
p_dest=(__m128i*)(dstimg.data+row*dstimg.cols);
p1=(__m128i*)(srcimg.data+2*row*srcimg.cols);
//p2=(__m128i*)(srcimg.data+(2*row+1)*srcimg.cols);
//p1+=hsize;
p2=p1+hsize;
}
else{
const unsigned char* p1_src_char=(unsigned char*)(p1);
const unsigned char* p2_src_char=(unsigned char*)(p2);
for(unsigned int k=0; k<leftoverCols; k++){
unsigned short tmp = p1_src_char[k]+p1_src_char[k+1]+
p2_src_char[k]+p2_src_char[k+1];
*(p_dest_char++)=(unsigned char)(tmp/4);
}
// done with the two rows:
row++;
p_dest=(__m128i*)(dstimg.data+row*dstimg.cols);
p1=(__m128i*)(srcimg.data+2*row*srcimg.cols);
p2=(__m128i*)(srcimg.data+(2*row+1)*srcimg.cols);
}
}
}
ARM NEON:
void halfsample(const cv::Mat& srcimg, cv::Mat& dstimg){
const unsigned short leftoverCols = ((srcimg.cols%16)/2);// take care with border...
const bool noleftover = (srcimg.cols%16)==0; // note: leftoverCols can be zero but this still false...
// make sure the destination image is of the right size:
//assert(srcimg.cols/2==dstimg.cols);
//assert(srcimg.rows/2==dstimg.rows);
//int32x4_t zero = vdupq_n_s8(0);
// mask needed later:
//register __m128i mask = _mm_set_epi32 (0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
int32x4_t mask = vdupq_n_s32(0x00FF00FF);
// to be added in order to make successive averaging correct:
int32x4_t ones = vdupq_n_s32(0x11111111);
print128_numhex(mask);
// data pointers:
int32_t* p1=(int32_t*)srcimg.data;
int32_t* p2=(int32_t*)(srcimg.data+srcimg.cols);
int32_t* p_dest=(int32_t*)dstimg.data;
unsigned char* p_dest_char;//=(unsigned char*)p_dest;
int k=0;
// size:
const unsigned int size = (srcimg.cols*srcimg.rows)/16;
const unsigned int hsize = srcimg.cols/16;
int32_t* p_end=p1+size*4;
unsigned int row=0;
const unsigned int end=hsize/2;
bool half_end;
if(hsize%2==0)
half_end=false;
else
half_end=true;
while(p2<p_end){
k++;
for(unsigned int i=0; i<end;i++){
// load the two blocks of memory:
int32x4_t upper;
int32x4_t lower;
if(noleftover){
upper=vld1q_s32(p1);
lower=vld1q_s32(p2);
}
else{
upper=vld1q_s32(p1);
lower=vld1q_s32(p2);
}
int32x4_t result1=vaddq_s32(upper, ones);
result1=vrhaddq_u8(upper, lower);
// increment the pointers:
p1=p1+4;
p2=p2+4;
// load the two blocks of memory:
upper=vld1q_s32(p1);
lower=vld1q_s32(p2);
int32x4_t result2=vaddq_s32(upper, ones);
result2=vrhaddq_u8(upper, lower);
// calculate the shifted versions:
int32x4_t result1_shifted = vextq_u8(result1,vmovq_n_u8(0),1);
int32x4_t result2_shifted = vextq_u8(result2,vmovq_n_u8(0),1);
// pack:
int32x4_t result= vcombine_u8(vqmovn_u16(vandq_u32(result1, mask)),
vqmovn_u16(vandq_u32 (result2, mask)));
int32x4_t result_shifted = vcombine_u8(vqmovn_u16(vandq_u32 (result1_shifted, mask)),
vqmovn_u16(vandq_u32(result2_shifted, mask)));
// average for the second time:
result=vrhaddq_u8(result,result_shifted);
// store to memory
vst1q_s32(p_dest, result);
// increment the pointers:
p1=p1+4;
p2=p2+4;
p_dest=p_dest+4;
//p_dest_char=(unsigned char*)p_dest;
}
// if we are not at the end of the row, do the rest:
if(half_end){
std::cout<<"entra in half_end" << std::endl;
// load the two blocks of memory:
int32x4_t upper;
int32x4_t lower;
if(noleftover){
upper=vld1q_s32(p1);
lower=vld1q_s32(p2);
}
else{
upper=vld1q_s32(p1);
lower=vld1q_s32(p2);
}
int32x4_t result1=vqaddq_s32(upper, ones);
result1=vrhaddq_u8(upper, lower);
// increment the pointers:
p1=p1+4;
p2=p2+4;
// compute horizontal pairwise average and store
p_dest_char=(unsigned char*)p_dest;
const UCHAR_ALIAS* result=(UCHAR_ALIAS*)&result1;
for(unsigned int j=0; j<8; j++){
*(p_dest_char++)=(*(result+2*j)+*(result+2*j+1))/2;
}
//p_dest_char=(unsigned char*)p_dest;
}
else{
p_dest_char=(unsigned char*)p_dest;
}
if(noleftover){
row++;
p_dest=(int32_t*)(dstimg.data+row*dstimg.cols);
p1=(int32_t*)(srcimg.data+2*row*srcimg.cols);
//p2=(__m128i*)(srcimg.data+(2*row+1)*srcimg.cols);
//p1+=hsize;
p2=p1+hsize*4;
}
else{
const unsigned char* p1_src_char=(unsigned char*)(p1);
const unsigned char* p2_src_char=(unsigned char*)(p2);
for(unsigned int k=0; k<leftoverCols; k++){
unsigned short tmp = p1_src_char[k]+p1_src_char[k+1]+
p2_src_char[k]+p2_src_char[k+1];
*(p_dest_char++)=(unsigned char)(tmp/4);
}
// done with the two rows:
row++;
p_dest=(int32_t*)(dstimg.data+row*dstimg.cols);
p1=(int32_t*)(srcimg.data+2*row*srcimg.cols);
p2=(int32_t*)(srcimg.data+(2*row+1)*srcimg.cols);
}
}
}
ARM和SSE功能的输出完全相同。问题是执行时间。
答案 0 :(得分:0)
你应该知道,无论是内在函数还是内联汇编代码都不能像本地汇编中的手写代码一样“完美”。
更糟糕的是,有时编译器 - 特别是像GCC这样的开源编译器 - 会产生一些不必要的指令,这些指令会导致管道停顿,这些停顿的成本远远超过十个周期。当这种情况发生在最内层循环中时,它对性能来说是致命的。
为什么不发布代码的反汇编?与intrinscs有问题的人应该首先看看它。 (并尽快停止使用内在函数)