Android提取解码编码Mux音频

时间:2015-11-26 03:47:32

标签: android audio mediacodec multiplexing mediamuxer

我正在尝试调整ExtractDecodeEditEncodeMuxTest.java中的代码,以便从通过Cordova的device.capture.captureVideo录制的mp4中提取音频和视频,解码音频,编辑解码的音频样本,对音频进行编码,并将音频与视频复用,然后再次保存为mp4。

我的第一次尝试只是提取,解码,编码和复用音频而不尝试编辑任何音频样本 - 如果我能做到这一点,我相当确定我可以根据需要编辑解码的样本。我不需要编辑视频,所以我假设我可以简单地使用MediaExtractor来提取和复用视频轨道。

然而,我遇到的问题是我似乎无法正确地获得音频解码/编码过程。不断发生的事情是,复用器从提取的视频轨道和提取的 - >创建mp4。解码 - >编码音频轨道,但是当视频播放时,音频以短暂的噪声开始,然后看起来像正常播放的音频数据的最后几秒(但在视频的开头),然后静音视频。

一些相关领域:

private MediaFormat audioFormat;
private MediaFormat videoFormat;
private int videoTrackIndex = -1;
private int audioTrackIndex = -1;
private static final int MAX_BUFFER_SIZE = 256 * 1024;

// parameters for the audio encoder
private static final String OUTPUT_AUDIO_MIME_TYPE = "audio/mp4a-latm"; // Advanced Audio Coding
private static final int OUTPUT_AUDIO_CHANNEL_COUNT = 2; // Must match the input stream. not using this, getting from input format
private static final int OUTPUT_AUDIO_BIT_RATE = 128 * 1024;
private static final int OUTPUT_AUDIO_AAC_PROFILE = MediaCodecInfo.CodecProfileLevel.AACObjectHE; //not using this, getting from input format 
private static final int OUTPUT_AUDIO_SAMPLE_RATE_HZ = 44100; // Must match the input stream
private static final String TAG = "vvsLog";
private static final Boolean DEBUG = false;
private static final Boolean INFO = true;
/** How long to wait for the next buffer to become available. */
private static final int TIMEOUT_USEC = 10000;
private String videoPath;

配置解码器,编码器和复用器的代码:

MediaCodecInfo audioCodecInfo = selectCodec(OUTPUT_AUDIO_MIME_TYPE);
    if (audioCodecInfo == null) {
        // Don't fail CTS if they don't have an AAC codec (not here, anyway).
        Log.e(TAG, "Unable to find an appropriate codec for " + OUTPUT_AUDIO_MIME_TYPE);
        return;
    }

    MediaExtractor videoExtractor = null;
    MediaExtractor audioExtractor = null;
    MediaCodec audioDecoder = null;
    MediaCodec audioEncoder = null;
    MediaMuxer muxer = null;

    try {

        /**
         * Video
         * just need to configure the extractor, no codec processing required
         */
        videoExtractor = createExtractor(originalAssetPath);
        String vidMimeStartsWith = "video/";
        int videoInputTrack = getAndSelectTrackIndex(videoExtractor, vidMimeStartsWith);
        videoFormat = videoExtractor.getTrackFormat(videoInputTrack);

        /**
         * Audio
         * needs an extractor plus an audio decoder and encoder
         */
        audioExtractor = createExtractor(originalAssetPath);
        String audMimeStartsWith = "audio/";
        int audioInputTrack = getAndSelectTrackIndex(audioExtractor, audMimeStartsWith);
        audioFormat = audioExtractor.getTrackFormat(audioInputTrack);
        audioFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE,OUTPUT_AUDIO_SAMPLE_RATE_HZ);

        MediaFormat outputAudioFormat = MediaFormat.createAudioFormat(OUTPUT_AUDIO_MIME_TYPE,
                audioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE),
                audioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT));
        outputAudioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, audioFormat.getInteger(MediaFormat.KEY_AAC_PROFILE));
        outputAudioFormat.setInteger(MediaFormat.KEY_BIT_RATE, OUTPUT_AUDIO_BIT_RATE);

        // Create a MediaCodec for the decoder, based on the extractor's format, configure and start it.
        audioDecoder = createAudioDecoder(audioFormat);
        // Create a MediaCodec for the desired codec, then configure it as an encoder and start it.
        audioEncoder = createAudioEncoder(audioCodecInfo, outputAudioFormat);

        //create muxer to overwrite original asset path
        muxer = createMuxer(originalAssetPath);

        //add the video and audio tracks
        /**
         * need to wait to add the audio track until after the first encoder output buffer is created
         * since the encoder changes the MediaFormat at that time
         * and the muxer needs the correct format, including the correct Coded Specific Data (CSD) ByteBuffer
         */

        doExtractDecodeEditEncodeMux(
                videoExtractor,
                audioExtractor,
                audioDecoder,
                audioEncoder,
                muxer);

    }

怪物doExtractDecodeEditEncodeMux方法:

private void doExtractDecodeEditEncodeMux(
        MediaExtractor videoExtractor,
        MediaExtractor audioExtractor,
        MediaCodec audioDecoder,
        MediaCodec audioEncoder,
        MediaMuxer muxer) {

    ByteBuffer videoInputBuffer = ByteBuffer.allocate(MAX_BUFFER_SIZE);
    MediaCodec.BufferInfo videoBufferInfo = new MediaCodec.BufferInfo();

    ByteBuffer[] audioDecoderInputBuffers = null;
    ByteBuffer[] audioDecoderOutputBuffers = null;
    ByteBuffer[] audioEncoderInputBuffers = null;
    ByteBuffer[] audioEncoderOutputBuffers = null;
    MediaCodec.BufferInfo audioDecoderOutputBufferInfo = null;
    MediaCodec.BufferInfo audioEncoderOutputBufferInfo = null;

    audioDecoderInputBuffers = audioDecoder.getInputBuffers();
    audioDecoderOutputBuffers =  audioDecoder.getOutputBuffers();
    audioEncoderInputBuffers = audioEncoder.getInputBuffers();
    audioEncoderOutputBuffers = audioEncoder.getOutputBuffers();
    audioDecoderOutputBufferInfo = new MediaCodec.BufferInfo();
    audioEncoderOutputBufferInfo = new MediaCodec.BufferInfo();

    /**
     * sanity checks
     */
    //frames
    int videoExtractedFrameCount = 0;
    int audioExtractedFrameCount = 0;
    int audioDecodedFrameCount = 0;
    int audioEncodedFrameCount = 0;
    //times
    long lastPresentationTimeVideoExtractor = 0;
    long lastPresentationTimeAudioExtractor = 0;
    long lastPresentationTimeAudioDecoder = 0;
    long lastPresentationTimeAudioEncoder = 0;

    // We will get these from the decoders when notified of a format change.
    MediaFormat decoderOutputAudioFormat = null;
    // We will get these from the encoders when notified of a format change.
    MediaFormat encoderOutputAudioFormat = null;
    // We will determine these once we have the output format.
    int outputAudioTrack = -1;
    // Whether things are done on the video side.
    boolean videoExtractorDone = false;
    // Whether things are done on the audio side.
    boolean audioExtractorDone = false;
    boolean audioDecoderDone = false;
    boolean audioEncoderDone = false;
    // The audio decoder output buffer to process, -1 if none.
    int pendingAudioDecoderOutputBufferIndex = -1;

    boolean muxing = false;

    /**
     * need to wait to add the audio track until after the first encoder output buffer is created
     * since the encoder changes the MediaFormat at that time
     * and the muxer needs the correct format, including the correct Coded Specific Data (CSD) ByteBuffer
     * muxer.start();
     * muxing = true;
     */

    MediaMetadataRetriever retrieverTest = new MediaMetadataRetriever();
    retrieverTest.setDataSource(videoPath);
    String degreesStr = retrieverTest.extractMetadata(MediaMetadataRetriever.METADATA_KEY_VIDEO_ROTATION);
    if (degreesStr != null) {
        Integer degrees = Integer.parseInt(degreesStr);
        if (degrees >= 0) {
            muxer.setOrientationHint(degrees);
        }
    }

    while (!videoExtractorDone || !audioEncoderDone) {
        if (INFO) {
            Log.d(TAG, String.format("ex:%d at %d | de:%d at %d | en:%d at %d ",
                    audioExtractedFrameCount, lastPresentationTimeAudioExtractor,
                    audioDecodedFrameCount, lastPresentationTimeAudioDecoder,
                    audioEncodedFrameCount, lastPresentationTimeAudioEncoder
                    ));
        }
        /**
         * Extract and mux video
         */
        while (!videoExtractorDone && muxing) {

            try {
                videoBufferInfo.size = videoExtractor.readSampleData(videoInputBuffer, 0);
            } catch (Exception e) {
                e.printStackTrace();
            }

            if (videoBufferInfo.size < 0) {
                videoBufferInfo.size = 0;
                videoExtractorDone = true;
            } else {
                videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime();
                lastPresentationTimeVideoExtractor = videoBufferInfo.presentationTimeUs;
                        videoBufferInfo.flags = videoExtractor.getSampleFlags();
                muxer.writeSampleData(videoTrackIndex, videoInputBuffer, videoBufferInfo);
                videoExtractor.advance();
                videoExtractedFrameCount++;
            }
        }

        /**
         * Extract, decode, watermark, encode and mux audio
         */

        /** Extract audio from file and feed to decoder. **/
        while (!audioExtractorDone  && (encoderOutputAudioFormat == null || muxing)) {
            int decoderInputBufferIndex = audioDecoder.dequeueInputBuffer(TIMEOUT_USEC);
            if (decoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
                break;
            }
            if (DEBUG) {
                Log.d(TAG, "audio decoder: returned input buffer: " + decoderInputBufferIndex);
            }
            ByteBuffer decoderInputBuffer = audioDecoderInputBuffers[decoderInputBufferIndex];
            int size = audioExtractor.readSampleData(decoderInputBuffer, 0);
            long presentationTime = audioExtractor.getSampleTime();
            lastPresentationTimeAudioExtractor = presentationTime;
            if (DEBUG) {
                Log.d(TAG, "audio extractor: returned buffer of size " + size);
                Log.d(TAG, "audio extractor: returned buffer for time " + presentationTime);
            }
            if (size >= 0) {
                audioDecoder.queueInputBuffer(
                        decoderInputBufferIndex,
                        0,
                        size,
                        presentationTime,
                        audioExtractor.getSampleFlags());
            }
            audioExtractorDone = !audioExtractor.advance();
            if (audioExtractorDone) {
                if (DEBUG) Log.d(TAG, "audio extractor: EOS");
                audioDecoder.queueInputBuffer(
                        decoderInputBufferIndex,
                        0,
                        0,
                        0,
                        MediaCodec.BUFFER_FLAG_END_OF_STREAM);
            }
            audioExtractedFrameCount++;
            // We extracted a frame, let's try something else next.
            break;
        }

        /**
         * Poll output frames from the audio decoder.
         * Do not poll if we already have a pending buffer to feed to the encoder.
         */
        while (!audioDecoderDone && pendingAudioDecoderOutputBufferIndex == -1 && (encoderOutputAudioFormat == null || muxing)) {
            int decoderOutputBufferIndex =
                    audioDecoder.dequeueOutputBuffer(
                            audioDecoderOutputBufferInfo, TIMEOUT_USEC);
            if (decoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
                if (DEBUG) Log.d(TAG, "no audio decoder output buffer");
                break;
            }
            if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
                if (DEBUG) Log.d(TAG, "audio decoder: output buffers changed");
                audioDecoderOutputBuffers = audioDecoder.getOutputBuffers();
                break;
            }
            if (decoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
                decoderOutputAudioFormat = audioDecoder.getOutputFormat();
                if (DEBUG) {
                    Log.d(TAG, "audio decoder: output format changed: "
                            + decoderOutputAudioFormat);
                }
                break;
            }
            if (DEBUG) {
                Log.d(TAG, "audio decoder: returned output buffer: "
                        + decoderOutputBufferIndex);
            }
            if (DEBUG) {
                Log.d(TAG, "audio decoder: returned buffer of size "
                        + audioDecoderOutputBufferInfo.size);
            }
            ByteBuffer decoderOutputBuffer =
                    audioDecoderOutputBuffers[decoderOutputBufferIndex];
            if ((audioDecoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG)
                    != 0) {
                if (DEBUG) Log.d(TAG, "audio decoder: codec config buffer");
                audioDecoder.releaseOutputBuffer(decoderOutputBufferIndex, false);
                break;
            }
            if (DEBUG) {
                Log.d(TAG, "audio decoder: returned buffer for time "
                        + audioDecoderOutputBufferInfo.presentationTimeUs);
            }
            if (DEBUG) {
                Log.d(TAG, "audio decoder: output buffer is now pending: "
                        + pendingAudioDecoderOutputBufferIndex);
            }
            pendingAudioDecoderOutputBufferIndex = decoderOutputBufferIndex;
            audioDecodedFrameCount++;
            // We extracted a pending frame, let's try something else next.
            break;
        }

        // Feed the pending decoded audio buffer to the audio encoder.
        while (pendingAudioDecoderOutputBufferIndex != -1) {
            if (DEBUG) {
                Log.d(TAG, "audio decoder: attempting to process pending buffer: "
                        + pendingAudioDecoderOutputBufferIndex);
            }
            int encoderInputBufferIndex = audioEncoder.dequeueInputBuffer(TIMEOUT_USEC);
            if (encoderInputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
                if (DEBUG) Log.d(TAG, "no audio encoder input buffer");
                break;
            }
            if (DEBUG) {
                Log.d(TAG, "audio encoder: returned input buffer: " + encoderInputBufferIndex);
            }
            ByteBuffer encoderInputBuffer = audioEncoderInputBuffers[encoderInputBufferIndex];
            int size = audioDecoderOutputBufferInfo.size;
            long presentationTime = audioDecoderOutputBufferInfo.presentationTimeUs;
            lastPresentationTimeAudioDecoder = presentationTime;
            if (DEBUG) {
                Log.d(TAG, "audio decoder: processing pending buffer: "
                        + pendingAudioDecoderOutputBufferIndex);
            }
            if (DEBUG) {
                Log.d(TAG, "audio decoder: pending buffer of size " + size);
                Log.d(TAG, "audio decoder: pending buffer for time " + presentationTime);
            }
            if (size >= 0) {
                ByteBuffer decoderOutputBuffer =
                        audioDecoderOutputBuffers[pendingAudioDecoderOutputBufferIndex]
                                .duplicate();
                decoderOutputBuffer.position(audioDecoderOutputBufferInfo.offset);
                decoderOutputBuffer.limit(audioDecoderOutputBufferInfo.offset + size);
                encoderInputBuffer.position(0);
                encoderInputBuffer.put(decoderOutputBuffer);
                audioEncoder.queueInputBuffer(
                        encoderInputBufferIndex,
                        0,
                        size,
                        presentationTime,
                        audioDecoderOutputBufferInfo.flags);
            }
            audioDecoder.releaseOutputBuffer(pendingAudioDecoderOutputBufferIndex, false);
            pendingAudioDecoderOutputBufferIndex = -1;
            if ((audioDecoderOutputBufferInfo.flags
                    & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
                if (DEBUG) Log.d(TAG, "audio decoder: EOS");
                audioDecoderDone = true;
            }
            // We enqueued a pending frame, let's try something else next.
            break;
        }

        // Poll frames from the audio encoder and send them to the muxer.
        while (!audioEncoderDone && (encoderOutputAudioFormat == null || muxing)) {
            int encoderOutputBufferIndex = audioEncoder.dequeueOutputBuffer(
                    audioEncoderOutputBufferInfo, TIMEOUT_USEC);
            if (encoderOutputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
                if (DEBUG) Log.d(TAG, "no audio encoder output buffer");
                break;
            }
            if (encoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
                if (DEBUG) Log.d(TAG, "audio encoder: output buffers changed");
                audioEncoderOutputBuffers = audioEncoder.getOutputBuffers();
                break;
            }
            if (encoderOutputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
                encoderOutputAudioFormat = audioEncoder.getOutputFormat();
                if (DEBUG) {
                    Log.d(TAG, "audio encoder: output format changed");
                }
                if (outputAudioTrack >= 0) {
                    Log.e(TAG,"audio encoder changed its output format again?");
                }
                break;
            }
            if (DEBUG) {
                Log.d(TAG, "audio encoder: returned output buffer: "
                        + encoderOutputBufferIndex);
                Log.d(TAG, "audio encoder: returned buffer of size "
                        + audioEncoderOutputBufferInfo.size);
            }
            ByteBuffer encoderOutputBuffer =
                    audioEncoderOutputBuffers[encoderOutputBufferIndex];
            if ((audioEncoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG)
                    != 0) {
                if (DEBUG) Log.d(TAG, "audio encoder: codec config buffer");
                // Simply ignore codec config buffers.
                audioEncoder.releaseOutputBuffer(encoderOutputBufferIndex, false);
                break;
            }
            if (DEBUG) {
                Log.d(TAG, "audio encoder: returned buffer for time "
                        + audioEncoderOutputBufferInfo.presentationTimeUs);
            }
            if (audioEncoderOutputBufferInfo.size != 0) {
                lastPresentationTimeAudioEncoder = audioEncoderOutputBufferInfo.presentationTimeUs;
                muxer.writeSampleData(
                        audioTrackIndex, encoderOutputBuffer, audioEncoderOutputBufferInfo);
            }
            if ((audioEncoderOutputBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM)
                    != 0) {
                if (DEBUG) Log.d(TAG, "audio encoder: EOS");
                audioEncoderDone = true;
            }
            audioEncoder.releaseOutputBuffer(encoderOutputBufferIndex, false);
            audioEncodedFrameCount++;
            // We enqueued an encoded frame, let's try something else next.
            break;
        }

        if (!muxing && (encoderOutputAudioFormat != null)) {

            Log.d(TAG, "muxer: adding video track.");
            videoTrackIndex = muxer.addTrack(videoFormat);

            Log.d(TAG, "muxer: adding audio track.");
            audioTrackIndex = muxer.addTrack(encoderOutputAudioFormat);

            Log.d(TAG, "muxer: starting");
            muxer.start();
            muxing = true;
        }
    }
    /**
     * Done processing audio and video
     */
    Log.d(TAG,"encoded and decoded audio frame counts should match. decoded:"+audioDecodedFrameCount+" encoded:"+audioEncodedFrameCount);

    Log.d(TAG,"decoded frame count should be less than extracted frame coun. decoded:"+audioDecodedFrameCount+" extracted:"+audioExtractedFrameCount);
    Log.d(TAG,"no audio frame should be pending "+pendingAudioDecoderOutputBufferIndex);

    PluginResult result = new PluginResult(PluginResult.Status.OK, videoPath);
    result.setKeepCallback(false);
    callbackContext.sendPluginResult(result);

}

我在前几百个提取的音频帧中看到了这个ACodec错误:

11-25 20:49:58.497   9807-13101/com.vvs.VVS430011 E/ACodec﹕ OMXCodec::onEvent, OMX_ErrorStreamCorrupt
11-25 20:49:58.497   9807-13101/com.vvs.VVS430011 W/AHierarchicalStateMachine﹕ Warning message AMessage(what = 'omx ', target = 8) = {
    int32_t type = 0
    int32_t node = 7115
    int32_t event = 1
    int32_t data1 = -2147479541
    int32_t data2 = 0
    } unhandled in root state.

这里是整个logcat的一个粘贴框,其中包含一些格式为:

的健全性检查日志
D/vvsLog﹕ ex:{extracted frame #} at {presentationTime} | de:{decoded frame #} at {presentationTime} | en:{encoded frame #} at {presentationTime}

当出现那些OMX_ErrorStreamCorrupt消息时,编码和解码帧的presentationTime似乎增加得太快。当它们停止时,解码和编码帧的presentationTime似乎返回到&#34; normal&#34;,并且似乎也与实际的&#34; good&#34;相匹配。音频我在视频开头听到 - &#34; good&#34;音频来自原始音轨的末尾。

我希望对这些低级Android多媒体API有更多经验的人能帮助我理解为什么会这样。请记住,我很清楚这个代码没有经过优化,在单独的线程中运行等等。 - 一旦我有一个基本提取的工作示例 - &gt; decode-&gt; edit-&gt,我将重构清理。 ; encode-&gt; mux进程。

谢谢!

1 个答案:

答案 0 :(得分:2)

原来上面的代码运行正常 - 只要您不尝试同时复制同一文件的同一文件。

  

: - )

我有一个以前的版本提取,然后将曲目复用到同一个文件,忘了在这个版本中更改。

这个小方法救了一天大声笑。

private String getMuxedAssetPath() {
    String muxedAssetPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DCIM) + "/" + CAMERA_DIRECTORY + "/muxedAudioVideo.mp4";

    File file = new File(muxedAssetPath);
    if (!file.exists()) {
        try {
            file.createNewFile();
        } catch (IOException e) {
            e.printStackTrace();
            muxedAssetPath = null;
        }
    }

    return muxedAssetPath;
}