我正在尝试将vp8编码和解码添加到我的流项目中。我正在使用openni::PIXEL_FORMAT_RGB888
格式将OpenNI中的彩色帧读入彩色帧缓冲区。然后我尝试使用vp8压缩每个RGB888帧并通过UDP将其流式传输到我的查看器。查看器将帧解压缩为RGB,然后显示它。我可以通过JPEG压缩使端到端功能正常工作,但是当我尝试使用VP8编解码器实现压缩时,我得到的是一个主要是绿色的框架,顶部有小块白色。
我使用libyuv将RGB转换为yv12和从yv12转换RGB。 我的VP8编码代码是:
#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#define VPX_INTERFACE (vpx_codec_vp8_cx())
// Code from http://www.webmproject.org/docs/vp8-sdk/example__simple__encoder.html
void vp8::init(const QUALITY quality)
{
m_quality = vpx_quality_convert(quality);
// INFO("VPX interface: %s", vpx_codec_iface_name(VPX_INTERFACE));
const vpx_codec_iface_t* iface = VPX_INTERFACE;
ASSERT(iface);
vpx_codec_enc_cfg_t config;
vpx_codec_err_t res = vpx_codec_enc_config_default(iface, &config, 0);
ASSERT_THROW(res == VPX_CODEC_OK, "failed to get config: %s", vpx_codec_err_to_string(res));
// Adjust default target bit-rate to account for actual desktop size.
config.rc_target_bitrate = width() * height() *
config.rc_target_bitrate / config.g_w / config.g_h;
config.g_w = width();
config.g_h = height();
m_vpx_codec = new vpx_codec_ctx_t;
res = vpx_codec_enc_init(m_vpx_codec, iface, &config, 0);
if (res)
{
delete m_vpx_codec;
m_vpx_codec = nullptr;
THROW("failed initialize codec: %s", vpx_codec_err_to_string(res));
}
}
vp8::~vp8()
{
if (m_vpx_codec)
{
vpx_codec_err_t res = vpx_codec_destroy(m_vpx_codec);
VPX_CODEC_THROW(m_vpx_codec, res, "error destroying vpx codec");
}
delete m_vpx_codec;
}
size_t convert_to_yv12(uint8_t* output, const uint8_t* input)
{
// YV12 is the same as i420 but with the U and V planes switched
uint8_t* y_plane = output;
uint8_t* v_plane = output + num_frame_pixels() * 1;
uint8_t* u_plane = output + num_frame_pixels() * 5 / 4;
// Do the conversion here
switch (input_format())
{
case STREAM_FORMAT::COLOR_RGB888:
// YV12 is the same as i420 except with the u and v planes switched
libyuv::RAWToI420(input, width() * sizeof(RGB888Pixel),
y_plane, width(),
u_plane, width() / 2,
v_plane, width() / 2,
width(), height() );
break;
default:
THROW("can't convert format %1%", input_format());
break;
}
return num_frame_pixels() * 3 / 2;
}
size_t vp8::encode(uint8_t* output, const uint8_t* input, const size_t output_buffer_size, const size_t input_buffer_size)
{
convert_to_yv12(m_work_buf, input);
// Encode the frame
vpx_image_t img_wrapper;
vpx_img_wrap(&img_wrapper, VPX_IMG_FMT_YV12, width(), height(), 1, m_work_buf);
vpx_codec_err_t res = vpx_codec_encode(m_vpx_codec, &img_wrapper, m_encoder_count, 1,
0, VPX_DL_REALTIME); // quality());
VPX_CODEC_THROW(m_vpx_codec, res, "write_frames: failed to encode frame");
vpx_codec_iter_t iter = nullptr;
const vpx_codec_cx_pkt_t *pkt;
size_t bytes_written = 0;
while ( (pkt = vpx_codec_get_cx_data(m_vpx_codec, &iter)) )
{
switch (pkt->kind)
{
case VPX_CODEC_CX_FRAME_PKT:
ASSERT(bytes_written + pkt->data.frame.sz <= output_buffer_size);
memcpy(output, pkt->data.frame.buf, pkt->data.frame.sz);
output += pkt->data.frame.sz;
bytes_written += pkt->data.frame.sz;
break;
default: // ignore everything else
break;
}
}
ASSERT(vpx_codec_get_cx_data(m_vpx_codec, &iter) == nullptr);
m_encoder_count++;
return bytes_written;
}
我的VP8解码代码是:
#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_decoder.h"
#include "vpx/vp8dx.h"
#define VPX_INTERFACE (vpx_codec_vp8_dx())
// Code from http://www.webmproject.org/docs/vp8-sdk/example__simple__decoder.html
vp8::vp8()
{
// INFO("VPX interface: %s", vpx_codec_iface_name(VPX_INTERFACE));
m_vpx_codec = new vpx_codec_ctx_t;
vpx_codec_err_t res = vpx_codec_dec_init(m_vpx_codec, VPX_INTERFACE, nullptr, 0);
VPX_CODEC_THROW(m_vpx_codec, res, "failed to initialize the codec");
}
vp8::~vp8()
{
if (m_vpx_codec)
{
vpx_codec_err_t res = vpx_codec_destroy(m_vpx_codec);
VPX_CODEC_THROW(m_vpx_codec, res, "error destroying vpx codec");
}
delete m_vpx_codec;
}
void vp8::decode(RGB888Pixel* output,
const uint8_t* input,
const size_t /* output_buffer_size_in_bytes */,
const size_t input_buffer_size_in_bytes)
{
vpx_codec_err_t res = vpx_codec_decode(m_vpx_codec, input, input_buffer_size_in_bytes, nullptr, 0);
VPX_CODEC_THROW(m_vpx_codec, res, "error decoding frame");
// Get a YV12 image frame from the decoder
vpx_codec_iter_t iter = NULL;
vpx_image_t *img;
img = vpx_codec_get_frame(m_vpx_codec, &iter);
ASSERT(vpx_codec_get_frame(m_vpx_codec, &iter) == nullptr);
ASSERT_EQUAL(img->d_w, width());
ASSERT_EQUAL(img->d_h, height());
const uint8_t* y_plane = img->planes[VPX_PLANE_Y];
const uint8_t* u_plane = img->planes[VPX_PLANE_U];
const uint8_t* v_plane = img->planes[VPX_PLANE_V];
int ret =
libyuv::I420ToRAW(y_plane, img->stride[VPX_PLANE_Y],
u_plane, img->stride[VPX_PLANE_U],
v_plane, img->stride[VPX_PLANE_V],
(uint8*)output, stride_in_bytes(STREAM_FORMAT::COLOR_RGB888, output_stride_in_pixels()),
width(), height() );
ASSERT(ret == 0);
}
基本上,我从VP8的简单编码器和解码器示例中复制了所有代码。但我无法弄清楚我的哪些错误是错误的。 (另外,我很挣扎,因为没有将libyup与libvpx一起使用的例子很奇怪,因为它们都是由Google开发的)
谢谢! @ bensch128