我有一个ImageReader,其表面连接到MediaCodec解码器以进行渲染。
AMediaCodec *videoDecoder = nullptr;
ANativeWindow* surface = nullptr;
AImageReader* imageReader = nullptr;
AImageReader_ImageListener* imageListener = nullptr;
if ((videoDecoder = AMediaCodec_createDecoderByType(mime)))
{
if (AImageReader_new(mWidth, mHeight, AIMAGE_FORMAT_YUV_420_888, 2, &imageReader) == AMEDIA_OK)
{
if (AImageReader_getWindow(imageReader, &surface) == AMEDIA_OK)
{
if (AMediaCodec_configure(videoDecoder, mediaFormat, surface, NULL, 0) == AMEDIA_OK)
{
int32_t outputFormat{};
AMediaFormat_getInt32(AMediaCodec_getOutputFormat(videoDecoder), AMEDIAFORMAT_KEY_COLOR_FORMAT, &outputFormat);
imageListener = new AImageReader_ImageListener();
imageListener->onImageAvailable = &onImageAvailableCallback;
AImageReader_setImageListener(imageReader, imageListener);
if (AMediaCodec_start(videoDecoder) == AMEDIA_OK)
{
configCompleted = true;
}
else
{
TRACE("ImporterMP4Android", 0, "Failed to Start Video Decoder");
}
}
else
{
TRACE("ImporterMP4Android", 0, "Failed to Configure Video Decoder");
}
}
else
{
TRACE("ImporterMP4Android", 0, "Failed to Fetch Surface owned by the ImageReader");
}
}
else
{
TRACE("ImporterMP4Android", 0, "Failed to Create ImageReader");
}
}
else
{
TRACE("ImporterMP4Android", 0, "Failed to Create Decoder");
}
onImageAvailableCallback
看起来像这个atm:
void onImageAvailableCallback(void *context, AImageReader *reader)
{
int32_t format;
media_status_t status = AImageReader_getFormat (reader, &format);
AImage *image;
status = AImageReader_acquireLatestImage(reader, &image);
status = AImage_getFormat(image, &format);
// TODO: copy *raw data somewhere for downstream processing
AImage_delete(image);
}
正如TODO评论所示,我想要复制从Image
获取的ImageReader
的原始数据,以便进一步处理。 Image类提供的接口允许我查询平面的数量并获取单个平面数据,但我有兴趣一次抓取整个帧。关于如何实现这一目标的任何建议?
简而言之,我正在使用MediaCodec视频解码器渲染到ImageReader拥有的Surface中,并最终希望以整个YUV420NV21格式从ImageReader中抓取解码的视频帧以进一步处理
答案 0 :(得分:2)
如果你想要所有三个YUV平面,你需要将它们逐个复制到你想要它们的目标缓冲区中。你不能指望它们是连续的,但是所有三个平面都可以在内存中任意分开放置。像这样(未经测试)的东西几乎就是你需要的东西:
uint8_t *buf = new uint8_t[width*height + 2*(width+1)/2*(height+1)/2];
int32_t yPixelStride, yRowStride;
uint8_t *yPtr;
int yLength;
AImage_getPlanePixelStride(image, 0, &yPixelStride);
AImage_getPlaneRowStride(image, 0, &yRowStride);
AImage_getPlaneData(image, 0, &yPtr, &yLength);
if (yPixelStride == 1) {
// All pixels in a row are contiguous; copy one line at a time.
for (int y = 0; y < height; y++)
memcpy(buf + y*width, yPtr + y*yRowStride, width);
} else {
// Highly improbable, but not disallowed by the API. In this case
// individual pixels aren't stored consecutively but sparsely with
// other data inbetween each pixel.
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
buf[y*width+x] = yPtr[y*yRowStride + x*yPixelStride];
}
int32_t cbPixelStride, crPixelStride, cbRowStride, crRowStride;
uint8_t *cbPtr, *crPtr;
int cbLength, crLength;
AImage_getPlanePixelStride(image, 1, &cbPixelStride);
AImage_getPlaneRowStride(image, 1, &cbRowStride);
AImage_getPlaneData(image, 1, &cbPtr, &cbLength);
AImage_getPlanePixelStride(image, 2, &crPixelStride);
AImage_getPlaneRowStride(image, 2, &crRowStride);
AImage_getPlaneData(image, 2, &crPtr, &crLength);
uint8_t *chromaBuf = &buf[width*height];
int chromaBufStride = 2*((width + 1)/2);
if (cbPixelStride == 2 && crPixelStride == 2 &&
cbRowStride == crRowStride && crPtr == cbPtr + 1) {
// The actual cb/cr planes happened to be laid out in
// exact NV21 form in memory; copy them as is
for (int y = 0; y < (height + 1)/2; y++)
memcpy(chromabuf + y*chromaBufStride, crPtr + y*crRowStride, chromaBufStride);
} else if (cbPixelStride == 2 && crPixelStride == 2 &&
cbRowStride == crRowStride && crPtr == cbPtr + 1) {
// The cb/cr planes happened to be laid out in exact NV12 form
// in memory; if the destination API can use NV12 in addition to
// NV21 do something similar as above, but using cbPtr instead of crPtr.
// If not, remove this clause and use the generic code below.
} else {
if (cbPixelStride == 1 && crPixelStride == 1) {
// Continuous cb/cr planes; the input data was I420/YV12 or similar;
// copy it into NV21 form
for (int y = 0; y < (height + 1)/2; y++) {
for (int x = 0; x < (width + 1)/2; x++) {
chromaBuf[y*chromaBufStride + 2*x + 0] = crPtr[y*crRowStride + x];
chromaBuf[y*chromaBufStride + 2*x + 1] = cbPtr[y*cbRowStride + x];
}
}
} else {
// Generic data copying into NV21
for (int y = 0; y < (height + 1)/2; y++) {
for (int x = 0; x < (width + 1)/2; x++) {
chromaBuf[y*chromaBufStride + 2*x + 0] = crPtr[y*crRowStride + x*crPixelStride];
chromaBuf[y*chromaBufStride + 2*x + 1] = cbPtr[y*cbRowStride + x*cbPixelStride];
}
}
}
}
但是,许多API可以以三个指向每个平面起点的形式处理数据,再加上每个平面的行间距。然后你可以减少复制。对于色度,您仍然可能想要确定它是否是I420 / NV12 / NV21并将其按原样传递到目标API。如果您无法将其与目标API支持的特定像素格式布局相匹配,则需要将其解压缩为具有已知支持布局的本地缓冲区。