目前我正在使用OpenCV将图片从YCrCb
转换为RGB
:
cv2.cvtColor(arr, cv2.COLOR_YCR_CB2RGB)
Pillow / PIL中是否有功能执行相同的颜色转换。至少我想在不需要OpenCV的情况下执行颜色转换。
我尝试了以下内容:
def _rgb( xxx ):
y, cb, cr = xxx
r = y + 1.402 * ( cr - 128 )
g = y - .34414 * ( cb - 128 ) - .71414 * ( cr - 128 )
b = y + 1.772 * ( cb - 128 )
return r, g, b
np.apply_along_axis( _rgb, 2, arr.astype( np.float32 ) ).astype( np.uint8 )
并且它非常慢并且不能正常工作。
答案 0 :(得分:1)
YCrCb-Colorspace转换为RGB-Colorspace状态:
R = Y + 1.402 * ( Cr - 128 )
G = Y - 0.34414 * ( Cb - 128 ) - 0.71414 * ( Cr - 128 )
B = Y + 1.772 * ( Cb - 128 )
Nota Bene 1:
openCV
来源记录了根据ITU-R建议书BT-709,使用与http://en.wikipedia.org/wiki/HSL_and_HSV不同的系数执行的转换过程。 BT-601:
R = Y + 1.403 * ( Cr - delta )
G = Y - 0.344 * ( Cb - delta ) - 0.714 * ( Cr - delta )
B = Y + 1.773 * ( Cb - delta )
,其中
delta = 128 # for 8-bit images CV_8U,
# 32768 # for 16-bit images CV_16U,
# 0.5 # for floating-point images CV_32F.
Nota Bene 2:[参考下文] 子> 强>
使用向量化模式,numpy
可以帮助numba
的JIT编译进一步加速加速:
import numpy as np
import numba
@numba.jit
def translateYCrCb2RGB( a3DMatrixOfUINT8_YCrCb ): # naive type-checking & no exception handling
a3DMatrixOfUINT8_RGB = np.zeros( a3DMatrixOfUINT8_YCrCb.shape,
dtype = np.uint8
)
a3DMatrixOfUINT8_RGB[:,:,0] = a3DMatrixOfUINT8_YCrCb[:,:,0] \
+ 1.402 * ( a3DMatrixOfUINT8_YCrCb[:,:,1] - 128 )
a3DMatrixOfUINT8_RGB[:,:,1] = a3DMatrixOfUINT8_YCrCb[:,:,0] \
- 0.34414 * ( a3DMatrixOfUINT8_YCrCb[:,:,2] - 128 ) \
- 0.71414 * ( a3DMatrixOfUINT8_YCrCb[:,:,1] - 128 )
a3DMatrixOfUINT8_RGB[:,:,2] = a3DMatrixOfUINT8_YCrCb[:,:,0] \
+ 1.772 * ( a3DMatrixOfUINT8_YCrCb[:,:,2] - 128 )
return( a3DMatrixOfUINT8_RGB )
进一步的加速技巧可能有助于以更大的内存占用或破坏性处理可变原始YCrCb矩阵为代价
@numba.jit
def translateYCrCb2RGB( Y__slice, # YCrCb_ORIGINAL[:,:,0], # ... asView
Cr_slice, # YCrCb_ORIGINAL[:,:,1], # ... asView
Cb_slice # YCrCb_ORIGINAL[:,:,2] # ... asView
): # naive type-checking & no exception handling
return( np.dstack( ( Y__slice + 1.402 * ( Cr_slice - 128 ),
Y__slice - 0.34414 * ( Cb_slice - 128 ) - 0.71414 * ( Cr_slice - 128 ),
Y__slice + 1.772 * ( Cb_slice - 128 )
) # .dstack consumes aTUPLE
)
)
def getCvFromPIL( PILpic ):
return np.array( PILpic.getdata(), # .getdata()
dtype = np.uint8 # .uint8 type-enforced
).reshape( ( PILpic.size[1], # .reshape x
PILpic.size[0], # y
3 # z-depth
) # aTUPLE
)[:,:,::-1] # RGB c-reverse -> to BGR as cv2 standard representation
From openCV
sources one may read about implemented precision of coefs:
template<typename _Tp> struct YCrCb2RGB_f
{
typedef _Tp channel_type;
YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
{
static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f};
memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
const _Tp delta = ColorChannel<_Tp>::half(), alpha = ColorChannel<_Tp>::max();
float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3];
n *= 3;
for(int i = 0; i < n; i += 3, dst += dcn)
{
_Tp Y = src[i];
_Tp Cr = src[i+1];
_Tp Cb = src[i+2];
_Tp b = saturate_cast<_Tp>(Y + (Cb - delta)*C3);
_Tp g = saturate_cast<_Tp>(Y + (Cb - delta)*C2 + (Cr - delta)*C1);
_Tp r = saturate_cast<_Tp>(Y + (Cr - delta)*C0);
dst[bidx] = b; dst[1] = g; dst[bidx^2] = r;
if( dcn == 4 )
dst[3] = alpha;
}
}
int dstcn, blueIdx;
float coeffs[4];
};