使用Mp4Parser,如果我添加更多视频,则audiostream会不同步

时间:2013-06-18 06:55:29

标签: android video append concatenation mp4parser

正如我所说,随着我拍摄的视频越多,音频越来越不同步。我怎样才能解决这个问题?我有以下代码来附加视频;

public class ConcatenateVideos extends ExecutorAsyncTask<String, Void, Boolean> {

private ArrayList<String> video_urls = null;

private final String TAG = ConcatenateVideos.class.getSimpleName();

public void setUris(ArrayList<String> videos) {
    LogService.log(TAG, "set uris");
    if (videos != null) {
        video_urls = videos;
        this.execute();
    }
}

@Override
protected Boolean doInBackground(String... params) {
    boolean success = false;

    FileInputStream[] videos = new FileInputStream[video_urls.size()];

    try {
        for (int i = 0; i < video_urls.size(); i++) {
            videos[i] = new FileInputStream(video_urls.get(i));
        }

        success = concatenateVideos(videos);

    } catch (Exception e) {
        success = false;
        LogService.err(TAG, e.getMessage(), e);
    }
    return success;
}

private boolean concatenateVideos(InputStream[] video_streams) {
    boolean success = false;
    Movie[] inMovies = new Movie[video_streams.length];

    FileChannel fc = null;
    Movie result = new Movie();
    IsoFile out = null;

    try {
        for (int i = 0; i < inMovies.length; i++) {
            if (video_streams[i] != null) {
                inMovies[i] = MovieCreator.build(Channels.newChannel(video_streams[i]));
            }
        }
        List<Track> videoTracks = new LinkedList<Track>();
        List<Track> audioTracks = new LinkedList<Track>();

        for (Movie m : inMovies) {
            for (Track t : m.getTracks()) {
                if (t.getHandler().equals("soun")) {
                    audioTracks.add(t);
                }
                if (t.getHandler().equals("vide")) {
                    videoTracks.add(t);
                }
            }
        }

        if (audioTracks.size() > 0) {

            result.addTrack(new AppendTrack(audioTracks.toArray(new Track[audioTracks.size()])));

        }
        if (videoTracks.size() > 0) {

            result.addTrack(new AppendTrack(videoTracks.toArray(new Track[videoTracks.size()])));

        }
        out = new DefaultMp4Builder().build(result);
        fc = new RandomAccessFile(video_urls.get(0), "rw").getChannel();
        for (int i = 1; i < video_urls.size(); i++) {
            File f = new File(video_urls.get(i));
            LogService.log(TAG, "delete file : "  + f.delete());
        }
        success = true;
    } catch (Exception e) {
        LogService.err(TAG, e.getMessage(), e);
        success = false;
    } finally {
        try {
            LogService.log(TAG, "==========finally");
            if (fc != null) {
                fc.position(0);
                out.getBox(fc);
                fc.close();
            }
        } catch (Exception e) {
            LogService.err(TAG, e.getMessage(), e);
        }
    }
    return success;
}

 }

这是我用来调用这个ConcatenateVideos函数的服务:

 private final String TAG = ConcatenateVideosService.class.getSimpleName();
final Messenger myMessenger = new Messenger(new IncomingHandler());

class IncomingHandler extends Handler {
    private Messenger client = null;

    @Override
    public void handleMessage(Message msg) {

        // init messenger
        if (client == null) {
            client = msg.replyTo;
        }

        // get the message
        Bundle data = msg.getData();
        byte dataString = data.getByte("message");

        switch (dataString) {
        case Constants.INIT_CMD_SERVICE:
            LogService.log(TAG, "INIT_CMD_SERVICE:");

            break;

        case Constants.CONCATE_CMD_SERVICE:
            LogService.log(TAG, "CONCATE_CMD_SERVICE:");

            ArrayList<String> videos = data.getStringArrayList(Constants.SERVICE_VIDEO_URLS);

            ConcatenateVideos concatenateVideos = new ConcatenateVideos() {
                @Override
                protected void onPostExecute(Boolean result) {
                    LogService.log(TAG, "onPostExecute() ,  result : " + result);
                    super.onPostExecute(result);

                    // setup the answer
                    Message answer = Message.obtain();
                    Bundle bundle = new Bundle();

                    bundle.putBoolean("result", result);
                    answer.setData(bundle);

                    // send the answer
                    try {
                        client.send(answer);
                    } catch (RemoteException e) {
                        LogService.err(TAG, e.getMessage(), e);
                    }
                }
            };
            concatenateVideos.setUris(videos);
            break;
        }
    }
}

@Override
public boolean onUnbind(Intent intent) {
    stopSelf();
    return super.onUnbind(intent);
}

@Override
public IBinder onBind(Intent intent) {
    return myMessenger.getBinder();
}

@Override
public void onDestroy() {
    super.onDestroy();
}

我的视频以以下质量录制:VideoBitrate - 800000audioBR - 64000audioSamplingRate - 44100MPEG_4. H264 Container.AAC at 30fps。 现在我做了一个测试,如果我制作了4个视频,那么每个视频的视频Timescale is 90000,音频Timescale is 44100。 但在添加视频后,视频的音频TimeScale仍为44100,但视频Timescale为:900。为什么VideoTimeScale会改变而不是音频改变?

2 个答案:

答案 0 :(得分:2)

在许多情况下,录音(音频/视频)的长度不同。假设录音总是10.0秒,视频总是10.1秒。如果您只是播放一部电影,那么音频可能会在视频之前结束。它会自动沉默。

如果你添加其中两个视频,第一个音频从0秒开始,第二个视频从10.0开始 - 不幸的是第二个视频开始是10.1,并且你有同步问题。

您需要通过添加静音甚至丢弃一些帧来补偿不同的运行长度!

答案 1 :(得分:0)

我知道这个问题很旧,但是我遇到了同样的问题,没有一个明确的解决方案,并且从这里到那里获取代码,我做了几个函数来解决这个问题。

@Throws(Exception::class)
fun appendVideos(videoPathList: List<String>, targetFilePath: String) {

    val movies = videoPathList.flatMap { file -> listOf(MovieCreator.build(file)) }

    val finalMovie = Movie()

    val videoTracksTotal = mutableListOf<Track>()
    val audioTracksTotal = mutableListOf<Track>()

    var audioDuration = 0.0
    var videoDuration = 0.0

    movies.forEach { movie ->

        val videoTracks = mutableListOf<Track>()
        val audioTracks = mutableListOf<Track>()

        movie.tracks.forEach { track ->

            val trackDuration = track.sampleDurations.toList()
                .map { t -> t.toDouble() / track.trackMetaData.timescale }.sum()

            if (track.handler == "vide") {
                videoDuration += trackDuration
                videoTracks.add(track)
            } else if (track.handler == "soun") {
                audioDuration += trackDuration
                audioTracks.add(track)
            }
        }

        // Adjusting Durations
        adjustDurations(videoTracks, audioTracks, videoDuration, audioDuration).let {
            audioDuration = it.audioDuration
            videoDuration = it.videoDuration
        }

        videoTracksTotal.addAll(videoTracks)
        audioTracksTotal.addAll(audioTracks)
    }

    if (videoTracksTotal.isNotEmpty() && audioTracksTotal.isNotEmpty()) {
        finalMovie.addTrack(AppendTrack(*videoTracksTotal.toTypedArray()))
        finalMovie.addTrack(AppendTrack(*audioTracksTotal.toTypedArray()))
    }

    val container = DefaultMp4Builder().build(finalMovie)

    val fos = FileOutputStream(targetFilePath)
    val bb = Channels.newChannel(fos)
    container.writeContainer(bb)
    fos.close()
}

class Durations(val audioDuration: Double, val videoDuration: Double)

private fun adjustDurations(
    videoTracks: MutableList<Track>,
    audioTracks: MutableList<Track>,
    videoDuration: Double,
    audioDuration: Double
): Durations {

    var diff = audioDuration - videoDuration
    val tracks: MutableList<Track>
    var durationOperator: Double
    val isAudioProblem: Boolean

    when {
        // audio and video match, no operations to perform
        diff == 0.0 -> {
            return Durations(audioDuration, videoDuration)
        }
        // audio tracks are longer than video
        diff > 0 -> {
            tracks = audioTracks
            durationOperator = audioDuration
            isAudioProblem = true
        }
        // video tracks are longer than audio
        else -> {
            tracks = videoTracks
            durationOperator = videoDuration
            diff *= -1.0
            isAudioProblem = false
        }
    }

    // Getting the last track in order to operate with it
    var track: Track = tracks.last()
    var counter: Long = 0

    // Reversing SampleDuration list
    track.sampleDurations.toList().asReversed().forEach { sampleDuration ->

        // Calculating how much this track need to be re-adjusted
        if (sampleDuration.toDouble() / track.trackMetaData.timescale > diff) {
            return@forEach
        }
        diff -= sampleDuration.toDouble() / track.trackMetaData.timescale
        durationOperator -= sampleDuration.toDouble() / track.trackMetaData.timescale
        counter++
    }

    if (counter != 0L) {
        // Cropping track
        track = CroppedTrack(track, 0, track.samples.size - counter)

        //update the original reference
        tracks.removeAt(tracks.lastIndex)
        tracks.add(track)
    }

    // Returning durations
    return if (isAudioProblem) {
        Durations(durationOperator, videoDuration)
    } else {
        Durations(audioDuration, durationOperator)
    }
}