我正在努力实施https://android.googlesource.com/platform/cts/+/jb-mr2-release/tests/tests/media/src/android/media/cts/DecodeEditEncodeTest.java 但是使用视频文件mp4修改源代码。 mime tipe是video / avc,比特率288kbps,iframeinterval 100,宽度:176,高度:144。文件大小为6MB。 当我解码视频并将帧放在输出表面时,我可以将帧保存到位图并看到帧很棒。但最后,在编码后(使用原始视频的相同参数),我得到一个700kb的文件,我看不到视频(可能是一个损坏的文件)。
extractor = new MediaExtractor();
extractor.SetDataSource(filePath);
for (int i = 0; i < extractor.TrackCount; i++)
{
inputFormat = extractor.GetTrackFormat(i);
string mime = inputFormat.GetString(MediaFormat.KeyMime);
if (mime.StartsWith("video/"))
{
extractor.SelectTrack(i);
mimeType = mime;
break;
}
}
mWidth = inputFormat.GetInteger(MediaFormat.KeyWidth);
mHeight = inputFormat.GetInteger(MediaFormat.KeyHeight);
// Create an encoder format that matches the input format. (Might be able to just
// re-use the format used to generate the video, since we want it to be the same.)
MediaFormat outputFormat = MediaFormat.CreateVideoFormat(mimeType, mWidth, mHeight);
outputFormat.SetInteger(MediaFormat.KeyColorFormat,
(int)MediaCodecCapabilities.Formatsurface);
outputFormat.SetInteger(MediaFormat.KeyBitRate, 288000);
outputFormat.SetInteger(MediaFormat.KeyFrameRate,
inputFormat.GetInteger(MediaFormat.KeyFrameRate));
outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, 100);
outputData.setMediaFormat(outputFormat);
encoder = MediaCodec.CreateEncoderByType(mimeType);
encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode);
inputSurface = new InputSurface(encoder.CreateInputSurface());
inputSurface.makeCurrent();
encoder.Start();
// OutputSurface uses the EGL context created by InputSurface.
decoder = MediaCodec.CreateDecoderByType(mimeType);
outputSurface = new OutputSurface();
outputSurface.changeFragmentShader(FRAGMENT_SHADER);
decoder.Configure(inputFormat, outputSurface.getSurface(), null, 0);
decoder.Start();
editVideoData2(extractor, decoder, outputSurface, inputSurface, encoder, outputData);
和解码编码部分:
while (!outputDone)
{
if (VERBOSE) Log.Debug(TAG, "edit loop");
// Feed more data to the decoder.
if (!inputDone)
{
int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC);
if (inputBufIndex >= 0)
{
ByteBuffer buffer = decoderInputBuffers[inputBufIndex];
int sampleSize = extractor.ReadSampleData(buffer, 0);
if (sampleSize < 0)
{
inputChunk++;
// End of stream -- send empty frame with EOS flag set.
decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L,
MediaCodecBufferFlags.EndOfStream);
inputDone = true;
if (VERBOSE) Log.Debug(TAG, "sent input EOS (with zero-length frame)");
}
else {
// Copy a chunk of input to the decoder. The first chunk should have
// the BUFFER_FLAG_CODEC_CONFIG flag set.
buffer.Clear();
decoder.QueueInputBuffer(inputBufIndex, 0, sampleSize, extractor.SampleTime, 0);
extractor.Advance();
inputChunk++;
}
}
else {
if (VERBOSE) Log.Debug(TAG, "input buffer not available");
}
}
// Assume output is available. Loop until both assumptions are false.
bool decoderOutputAvailable = !decoderDone;
bool encoderOutputAvailable = true;
while (decoderOutputAvailable || encoderOutputAvailable)
{
// Start by draining any pending output from the encoder. It's important to
// do this before we try to stuff any more data in.
int encoderStatus = encoder.DequeueOutputBuffer(info, TIMEOUT_USEC);
if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater)
{
// no output available yet
if (VERBOSE) Log.Debug(TAG, "no output from encoder available");
encoderOutputAvailable = false;
}
else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged)
{
encoderOutputBuffers = encoder.GetOutputBuffers();
if (VERBOSE) Log.Debug(TAG, "encoder output buffers changed");
}
else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged)
{
MediaFormat newFormat = encoder.OutputFormat;
if (VERBOSE) Log.Debug(TAG, "encoder output format changed: " + newFormat);
}
else if (encoderStatus < 0)
{
Log.Error(TAG, "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
}
else { // encoderStatus >= 0
ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null)
{
Log.Error(TAG,"encoderOutputBuffer " + encoderStatus + " was null");
}
// Write the data to the output "file".
if (info.Size != 0)
{
encodedData.Position(info.Offset);
encodedData.Limit(info.Offset + info.Size);
byte[] data = new byte[encodedData.Remaining()];
encodedData.Get(data);
fStream.Write(data, 0, data.Length);
// outputData.addChunk(encodedData, (int)info.Flags, info.PresentationTimeUs);
outputCount++;
if (VERBOSE) Log.Debug(TAG, "encoder output " + info.Size + " bytes");
}
outputDone = (info.Flags & MediaCodecBufferFlags.EndOfStream) != 0;
encoder.ReleaseOutputBuffer(encoderStatus, false);
}
if (encoderStatus != (int)MediaCodecInfoState.TryAgainLater)
{
// Continue attempts to drain output.
continue;
}
// Encoder is drained, check to see if we've got a new frame of output from
// the decoder. (The output is going to a Surface, rather than a ByteBuffer,
// but we still get information through BufferInfo.)
if (!decoderDone)
{
int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC);
if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater)
{
// no output available yet
if (VERBOSE) Log.Debug(TAG, "no output from decoder available");
decoderOutputAvailable = false;
}
else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged)
{
//decoderOutputBuffers = decoder.GetOutputBuffers();
if (VERBOSE) Log.Debug(TAG, "decoder output buffers changed (we don't care)");
}
else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged)
{
// expected before first buffer of data
MediaFormat newFormat = decoder.OutputFormat;
if (VERBOSE) Log.Debug(TAG, "decoder output format changed: " + newFormat);
}
else if (decoderStatus < 0)
{
Log.Error(TAG,"unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus);
}
else { // decoderStatus >= 0
if (VERBOSE) Log.Debug(TAG, "surface decoder given buffer "
+ decoderStatus + " (size=" + info.Size + ")");
// The ByteBuffers are null references, but we still get a nonzero
// size for the decoded data.
bool doRender = (info.Size != 0);
// As soon as we call releaseOutputBuffer, the buffer will be forwarded
// to SurfaceTexture to convert to a texture. The API doesn't
// guarantee that the texture will be available before the call
// returns, so we need to wait for the onFrameAvailable callback to
// fire. If we don't wait, we risk rendering from the previous frame.
decoder.ReleaseOutputBuffer(decoderStatus, doRender);
if (doRender)
{
// This waits for the image and renders it after it arrives.
if (VERBOSE) Log.Debug(TAG, "awaiting frame");
outputSurface.awaitNewImage();
outputSurface.drawImage();
outputSurface.saveFrame(Android.OS.Environment.ExternalStorageDirectory + "/test.jpg", mWidth, mHeight);
// Send it to the encoder.
inputSurface.setPresentationTime(info.PresentationTimeUs * 1000);
if (VERBOSE) Log.Debug(TAG, "swapBuffers");
inputSurface.swapBuffers();
}
if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0)
{
// forward decoder EOS to encoder
if (VERBOSE) Log.Debug(TAG, "signaling input EOS");
if (WORK_AROUND_BUGS)
{
// Bail early, possibly dropping a frame.
return;
}
else {
encoder.SignalEndOfInputStream();
}
}
}
}
}
}
if (inputChunk != outputCount)
{
throw new RuntimeException("frame lost: " + inputChunk + " in, " +
outputCount + " out");
}
fStream.Close();
如果我将帧放入图像中并且我可以看到它没问题,我猜这个帧对OutputSurface来说是好的。我没有看到编码器配置有任何奇怪之处。你能帮助我吗,至少可以说我认为哪个可以检查?感谢。
答案 0 :(得分:2)
我忘了添加Mediamuxer
正如法登所说的那样。如果您通过Mediamuxer
writesampledata
更改了fstream部分,并添加了start()
和stop()
以及Adtrack()
来电,那么它的工作正常。任何人都可以将此代码视为解码编码的示例。
感谢。