编码器工作时,为新视频文件重新创建muxer

时间:2016-01-19 11:34:42

标签: android video mediacodec muxer

我使用BigFlake的CameraToMpegTest,但我修改它以在编码器工作时多次启用启动和停止多路复用器。当视频录制没有时,我仍然使用编码器从纹理中获取帧以进行其他处理。

首次启动/停止时效果正常,但第二次启动/停止时出现问题。我得到异常然后关闭应用程序:“在无效状态3”,“...中调用stop()引起:java.lang.IllegalStateException:无法停止muxer ...”。第二个视频文件也没有打开,但它不是空的。

是否有可能在连续运行编码器时重启多路复用器?我究竟做错了什么?

更新:我在关键帧中发现了这个问题。当我设置format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL,0)时,一切都很好。那么,当多路复用器再次开始录制时如何请求关键帧?

public class MainFrameProcessor {
public static final String TAG = "MainFrameProcessor";
public static final boolean VERBOSE = false;           // lots of logging

// parameters for the encoder
private static final String MIME_TYPE = "video/avc";    // H.264 Advanced Video Coding
private static final int FRAME_RATE = 30;               // 30fps
private static final int IFRAME_INTERVAL = 5;           // 5 seconds between I-frames

public static final int encWidth = 1920;
public static final int encHeight = 1080;
private static final int encBitRate = 10000000;//6000000;      // Mbps   //http://static.googleusercontent.com/media/source.android.com/en//compatibility/android-cdd.pdf 

public ByteBuffer mPixelBuf;                       // used by saveFrame()
private boolean isWrite=false;
private boolean isWork=true;

// encoder / muxer state
private MediaCodec mEncoder;
private CodecInputSurface mInputSurface;
private Writer writer;
private String path;

// camera state
private Camera mCamera;
private SurfaceTextureManager mStManager;

private Handler messageHandler;

public MainFrameProcessor(String path, Handler messageHandler){
    mPixelBuf = ByteBuffer.allocateDirect(encHeight * encWidth * 4);
    mPixelBuf.order(ByteOrder.LITTLE_ENDIAN);
    this.path=path;
    this.messageHandler=messageHandler;
}

/** test entry point */
public void startProcessor() throws Throwable {
    CameraToMpegWrapper.runTest(this);
}

public void stopProcessor() {
    isWork=false;
}

synchronized public void release(){
    stopProcessor();
    releaseCamera();
    releaseEncoderAndWriter();
    releaseSurfaceTexture();
}

/**
 * Wraps processCameraFrames().  This is necessary because SurfaceTexture will try to use
 * the looper in the current thread if one exists, and the CTS tests create one on the
 * test thread.
 *
 * The wrapper propagates exceptions thrown by the worker thread back to the caller.
 */
private static class CameraToMpegWrapper implements Runnable {
    private Throwable mThrowable;
    private MainFrameProcessor mTest;

    private CameraToMpegWrapper(MainFrameProcessor test) {
        mTest = test;
    }

    @Override
    public void run() {
        try {
            mTest.processCameraFrames();
        } catch (Throwable th) {
            mThrowable = th;
        }
    }

    /** Entry point. */
    public static void runTest(MainFrameProcessor obj) throws Throwable {
        CameraToMpegWrapper wrapper = new CameraToMpegWrapper(obj);
        Thread th = new Thread(wrapper, "codec test");
        th.start();
        //th.join();  //http://stackoverflow.hex1.ru/questions/22457623/surfacetextures-onframeavailable-method-always-called-too-late
        if (wrapper.mThrowable != null) {
            throw wrapper.mThrowable;
        }
    }
}

boolean to_start=false;
public void startRecord(){
    to_start=true;
}

boolean to_stop=false;
public void stopRecord(){
    to_stop=true;
}

/**
 * Tests encoding of AVC video from Camera input.  The output is saved as an MP4 file.
 */
private void processCameraFrames() {
    // arbitrary but popular values
    Log.d(TAG, MIME_TYPE + " output " + encWidth + "x" + encHeight + " @" + encBitRate);

    try {
        prepareCamera(encWidth, encHeight);
        prepareEncoderAndWriter(encWidth, encHeight, encBitRate);
        mInputSurface.makeCurrent();
        prepareSurfaceTexture();

        mCamera.startPreview();

        SurfaceTexture st = mStManager.getSurfaceTexture();

        while (isWork) {
            if (to_start){
                writer.startWriter();
                isWrite=true;
                to_start=false;
            }
            if (to_stop){
                isWrite=false;
                writer.stopWriter();
                to_stop=false;
            }
            // Acquire a new frame of input, and render it to the Surface.  If we had a
            // GLSurfaceView we could switch EGL contexts and call drawImage() a second
            // time to render it on screen.  The texture can be shared between contexts by
            // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
            // argument.
            mStManager.awaitNewImage();
            mStManager.drawImage();

            synchronized (mPixelBuf) {
                mPixelBuf.rewind();
                GLES20.glReadPixels(0, 0, encWidth, encHeight, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE,
                       mPixelBuf);
            }

            if (isWrite) {
                if (writer.checkDurationEnd()) {
                    stopRecord();
                    Message msg = messageHandler.obtainMessage(MainActivity.MESSAGE_STOP_REC);
                    messageHandler.sendMessage(msg);
                } else
                    writer.write(st, mInputSurface);
            }
        }
    } finally {
        // release everything we grabbed
        release();
    }
}

/**
 * Configures Camera for video capture.  Sets mCamera.
 * <p>
 * Opens a Camera and sets parameters.  Does not start preview.
 */
private void prepareCamera(int encWidth, int encHeight) {
    if (mCamera != null) {
        throw new RuntimeException("camera already initialized");
    }

    Camera.CameraInfo info = new Camera.CameraInfo();

    mCamera = Camera.open();    // opens first back-facing camera
    if (mCamera == null) {
        throw new RuntimeException("Unable to open camera");
    }

    Camera.Parameters parms = mCamera.getParameters();

    choosePreviewSize(parms, encWidth, encHeight);
    // leave the frame rate set to default
    mCamera.setParameters(parms);

    Camera.Size size = parms.getPreviewSize();
    Log.d(TAG, "Camera preview size is " + size.width + "x" + size.height);
}

/**
 * Attempts to find a preview size that matches the provided width and height (which
 * specify the dimensions of the encoded video).  If it fails to find a match it just
 * uses the default preview size.
 * <p>
 * TODO: should do a best-fit match.
 */
private static void choosePreviewSize(Camera.Parameters parms, int width, int height) {
    // We should make sure that the requested MPEG size is less than the preferred
    // size, and has the same aspect ratio.
    Camera.Size ppsfv = parms.getPreferredPreviewSizeForVideo();
    if (VERBOSE && ppsfv != null) {
        Log.d(TAG, "Camera preferred preview size for video is " +
                ppsfv.width + "x" + ppsfv.height);
    }

    for (Camera.Size size : parms.getSupportedPreviewSizes()) {
        if (size.width == width && size.height == height) {
            parms.setPreviewSize(width, height);
            return;
        }
    }

    Log.w(TAG, "Unable to set preview size to " + width + "x" + height);
    if (ppsfv != null) {
        parms.setPreviewSize(ppsfv.width, ppsfv.height);
    }
}

/**
 * Stops camera preview, and releases the camera to the system.
 */
private void releaseCamera() {
    if (VERBOSE) Log.d(TAG, "releasing camera");
    if (mCamera != null) {
        mCamera.stopPreview();
        mCamera.release();
        mCamera = null;
    }
}

/**
 * Configures SurfaceTexture for camera preview.  Initializes mStManager, and sets the
 * associated SurfaceTexture as the Camera's "preview texture".
 * <p>
 * Configure the EGL surface that will be used for output before calling here.
 */
private void prepareSurfaceTexture() {
    mStManager = new SurfaceTextureManager(mPixelBuf);
    SurfaceTexture st = mStManager.getSurfaceTexture();
    try {
        mCamera.setPreviewTexture(st);
    } catch (IOException ioe) {
        throw new RuntimeException("setPreviewTexture failed", ioe);
    }
}

/**
 * Releases the SurfaceTexture.
 */
private void releaseSurfaceTexture() {
    if (mStManager != null) {
        mStManager.release();
        mStManager = null;
    }
}

/**
 * Configures encoder and muxer state, and prepares the input Surface.  Initializes
 * mEncoder, mMuxer, mInputSurface, mBufferInfo, mTrackIndex, and mMuxerStarted.
 */
private void prepareEncoderAndWriter(int width, int height, int bitRate) {
    MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, width, height);

    // Set some properties.  Failing to specify some of these can cause the MediaCodec
    // configure() call to throw an unhelpful exception.
    format.setInteger(MediaFormat.KEY_COLOR_FORMAT,
            MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
    format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
    format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
    format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
    if (VERBOSE) Log.d(TAG, "format: " + format);

    // Create a MediaCodec encoder, and configure it with our format.  Get a Surface
    // we can use for input and wrap it with a class that handles the EGL work.
    //
    // If you want to have two EGL contexts -- one for display, one for recording --
    // you will likely want to defer instantiation of CodecInputSurface until after the
    // "display" EGL context is created, then modify the eglCreateContext call to
    // take eglGetCurrentContext() as the share_context argument.
    try {
        mEncoder = MediaCodec.createEncoderByType(MIME_TYPE);
    } catch (IOException e) {
        e.printStackTrace();
    }
    mEncoder.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
    mInputSurface = new CodecInputSurface(mEncoder.createInputSurface());
    mEncoder.start();

    writer=new Writer(path, mEncoder);
}

/**
 * Releases encoder resources.
 */
private void releaseEncoderAndWriter() {
    if (VERBOSE) Log.d(TAG, "releasing encoder objects");
    if (writer!=null)
        writer.releaseWriter();
    if (mEncoder != null) {
        mEncoder.stop();
        mEncoder.release();
        mEncoder = null;
    }
    if (mInputSurface != null) {
        mInputSurface.release();
        mInputSurface = null;
    }
}
}

Muxer的类:

public class Writer {
private static final long DURATION_SEC = 1000;          // seconds of video

private MediaCodec mEncoder;

private MediaMuxer mMuxer;
private String path;

private int mTrackIndex;
private long startWhen;
private long desiredEnd;
private boolean mMuxerStarted;

// allocate one of these up front so we don't need to do it every time
private MediaCodec.BufferInfo mBufferInfo;
boolean new_track=false;
public Writer(String path, MediaCodec mEncoder){
    this.path=path;
    this.mEncoder=mEncoder;
}

public void startWriter(){
    // Output filename.  Ideally this would use Context.getFilesDir() rather than a
    // hard-coded output directory.
    String outputPath= CameraUtils.createPathFile(path, "mp4");
    Log.i(MainFrameProcessor.TAG, "Output file is " + outputPath);


    // Create a MediaMuxer.  We can't add the video track and start() the muxer here,
    // because our MediaFormat doesn't have the Magic Goodies.  These can only be
    // obtained from the encoder after it has started processing data.
    //
    // We're not actually interested in multiplexing audio.  We just want to convert
    // the raw H.264 elementary stream we get from MediaCodec into a .mp4 file.
    try {
        mMuxer = new MediaMuxer(outputPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
    } catch (IOException ioe) {
        throw new RuntimeException("MediaMuxer creation failed", ioe);
    }

    mBufferInfo = new MediaCodec.BufferInfo();
    mTrackIndex = -1;
    mMuxerStarted = false;

    new_track=true;

    startWhen = System.nanoTime();
    desiredEnd = startWhen + DURATION_SEC * 1000000000L;
}

synchronized public void stopWriter(){
    // send end-of-stream to encoder, and drain remaining output
    if (mMuxer != null) {
        Log.w(MainFrameProcessor.TAG,"stop");
        //if (mEncoder!=null) drainEncoder(true);

        mMuxer.stop();
        mMuxer.release();
        mMuxer = null;
    }
}

public void releaseWriter() {
    stopWriter();
}

public void write(SurfaceTexture st, CodecInputSurface mInputSurface) {
    // Set the presentation time stamp from the SurfaceTexture's time stamp.  This
    // will be used by MediaMuxer to set the PTS in the video.
    if (MainFrameProcessor.VERBOSE) {
        Log.d(MainFrameProcessor.TAG, "present: " +
                ((st.getTimestamp() - startWhen) / 1000000.0) + "ms");
    }
    mInputSurface.setPresentationTime(st.getTimestamp());

    // Submit it to the encoder.  The eglSwapBuffers call will block if the input
    // is full, which would be bad if it stayed full until we dequeued an output
    // buffer (which we can't do, since we're stuck here).  So long as we fully drain
    // the encoder before supplying additional input, the system guarantees that we
    // can supply another frame without blocking.
    if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "sending frame to encoder");
    mInputSurface.swapBuffers();

    drainEncoder(false);
}

/**
 * Extracts all pending data from the encoder and forwards it to the muxer.
 * <p>
 * If endOfStream is not set, this returns when there is no more data to drain.  If it
 * is set, we send EOS to the encoder, and then iterate until we see EOS on the output.
 * Calling this with endOfStream set should be done once, right before stopping the muxer.
 * <p>
 * We're just using the muxer to get a .mp4 file (instead of a raw H.264 stream).  We're
 * not recording audio.
 */
private void drainEncoder(boolean endOfStream) {
    final int TIMEOUT_USEC = 10000;
    if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "drainEncoder(" + endOfStream + ")");

    if (endOfStream) {
        if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "sending EOS to encoder");
        mEncoder.signalEndOfInputStream();
    }

    ByteBuffer[] encoderOutputBuffers = mEncoder.getOutputBuffers();
    while (true) {
        int encoderStatus = mEncoder.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
        Log.w("ddd", ""+encoderStatus+"; "+new_track);
        if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
            // no output available yet
            if (!endOfStream) {
                break;      // out of while
            } else {
                if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "no output available, spinning to await EOS");
            }
        } else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
            // not expected for an encoder
            encoderOutputBuffers = mEncoder.getOutputBuffers();
        } else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
            // should happen before receiving buffers, and should only happen once
            /*if (mMuxerStarted) {
                throw new RuntimeException("format changed twice");
            }
            MediaFormat newFormat = mEncoder.getOutputFormat();
            Log.d(MainFrameProcessor.TAG, "encoder output format changed: " + newFormat);

            // now that we have the Magic Goodies, start the muxer
            mTrackIndex = mMuxer.addTrack(newFormat);
            mMuxer.start();
            mMuxerStarted = true;*/
        } else if (encoderStatus < 0) {
            Log.w(MainFrameProcessor.TAG, "unexpected result from encoder.dequeueOutputBuffer: " +
                    encoderStatus);
            // let's ignore it
        } else if (new_track){
            MediaFormat newFormat = mEncoder.getOutputFormat();
            Log.d(MainFrameProcessor.TAG, "encoder output format changed: " + newFormat);

            // now that we have the Magic Goodies, start the muxer
            mTrackIndex = mMuxer.addTrack(newFormat);
            mMuxer.start();
            mMuxerStarted = true;
            new_track=false;
        } else {
            ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
            if (encodedData == null) {
                throw new RuntimeException("encoderOutputBuffer " + encoderStatus +
                        " was null");
            }

            if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
                // The codec config data was pulled out and fed to the muxer when we got
                // the INFO_OUTPUT_FORMAT_CHANGED status.  Ignore it.
                if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "ignoring BUFFER_FLAG_CODEC_CONFIG");
                mBufferInfo.size = 0;
            }

            if (mBufferInfo.size != 0) {
                if (!mMuxerStarted) {
                    throw new RuntimeException("muxer hasn't started");
                }

                // adjust the ByteBuffer values to match BufferInfo (not needed?)
                encodedData.position(mBufferInfo.offset);
                encodedData.limit(mBufferInfo.offset + mBufferInfo.size);

                mMuxer.writeSampleData(mTrackIndex, encodedData, mBufferInfo);
                if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "sent " + mBufferInfo.size + " bytes to muxer");
            }

            mEncoder.releaseOutputBuffer(encoderStatus, false);

            if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
                if (!endOfStream) {
                    Log.w(MainFrameProcessor.TAG, "reached end of stream unexpectedly");
                } else {
                    if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "end of stream reached");
                }
                break;      // out of while
            }
        }
    }
}

public boolean checkDurationEnd() {
    return System.nanoTime() >= desiredEnd;
}
}

0 个答案:

没有答案