从图像创建视频

时间:2011-11-17 16:16:57

标签: android image video codec

有没有办法从Android上的一系列图像创建视频?也许是一种扩展MediaRecorder并能够将图像作为输入的方法。

我尝试真正创建视频并存储它(例如,作为mpeg4文件)。

感谢您的任何建议。

4 个答案:

答案 0 :(得分:7)

我也在尝试做同样的事情。我一直建议使用Libav。 http://libav.org/ 但是我需要用NDK构建它,我目前有一些问题。

我正在寻找一些关于它的文档。我会告诉你的。

我已经创建了一篇关于它的帖子:Libav build for Android

答案 1 :(得分:1)

您可以在ImageView中使用AnimationDrawable

使用AnimationDrawable.addFrame(Drawable frame, int duration)方法添加框架,然后使用AnimationDrawable.start()启动动画。

不确定这是否理想,但它会起作用。

答案 2 :(得分:1)

我使用Android + NDK

AVFrame* OpenImage(const char* imageFileName)
{
    AVFormatContext *pFormatCtx =  avformat_alloc_context();
    std::cout<<"1"<<imageFileName<<std::endl;
    if( avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL) < 0)
    {
        printf("Can't open image file '%s'\n", imageFileName);
        return NULL;
    }
    std::cout<<"2"<<std::endl;
    av_dump_format(pFormatCtx, 0, imageFileName, false);

    AVCodecContext *pCodecCtx;
    std::cout<<"3"<<std::endl;
    pCodecCtx = pFormatCtx->streams[0]->codec;
    pCodecCtx->width = W_VIDEO;
    pCodecCtx->height = H_VIDEO;
    //pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

    // Find the decoder for the video stream
    AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (!pCodec)
    {
        printf("Codec not found\n");
        return NULL;
    }

    // Open codec
    //if(avcodec_open2(pCodecCtx, pCodec)<0)
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)//check this NULL, it should be of AVDictionary **options
    {
        printf("Could not open codec\n");
        return NULL;
    }
    std::cout<<"4"<<std::endl;
    //
    AVFrame *pFrame;

    pFrame = av_frame_alloc();

    if (!pFrame)
    {
        printf("Can't allocate memory for AVFrame\n");
        return NULL;
    }
    printf("here");
    int frameFinished;
    int numBytes;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size( pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
    uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

    avpicture_fill((AVPicture *) pFrame, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

    // Read frame

    AVPacket packet;

    int framesNumber = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0)
    {
        if(packet.stream_index != 0)
            continue;

        int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
        if (ret > 0)
        {
            printf("Frame is decoded, size %d\n", ret);
            pFrame->quality = 4;
            return pFrame;
        }
        else
            printf("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
    }
}
int combine_images_to_video(const char * infile_dir, const char * infile_prefix, const char* infile_surname, int total_frames,const char *outfile)
{


    if (total_frames <= 0){
        std::cout << "Usage: cv2ff <dir_name> <prefix> <image surname> <total frames> <outfile>" << std::endl;
        std::cout << "Please check that the 4th argument is integer value of total frames"<<std::endl;
        return 1;
    }
    printf("max %d frames\n",total_frames);

    char *imageFileName;
    char numberChar[NUMNUMBER];
    // initialize FFmpeg library
    av_register_all();
    //  av_log_set_level(AV_LOG_DEBUG);
    int ret;

    const int dst_width = W_VIDEO;
    const int dst_height = H_VIDEO;
    const AVRational dst_fps = {30, 1};//{fps,1}


    // open output format context
    AVFormatContext* outctx = nullptr;
    ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile);

    //outctx->video_codec->
    if (ret < 0) {
        std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret;
        return 2;
    }

    // open output IO context
    ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr);
    if (ret < 0) {
        std::cerr << "fail to avio_open2: ret=" << ret;
        return 2;
    }
// create new video stream
    AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec);
    AVStream* vstrm = avformat_new_stream(outctx, vcodec);
    if (!vstrm) {
        std::cerr << "fail to avformat_new_stream";
        return 2;
    }
    avcodec_get_context_defaults3(vstrm->codec, vcodec);
    vstrm->codec->width = dst_width;
    vstrm->codec->height = dst_height;
    vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
    vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
    vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps;
    if (outctx->oformat->flags & AVFMT_GLOBALHEADER)
        vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // open video encoder
    ret = avcodec_open2(vstrm->codec, vcodec, nullptr);
    if (ret < 0) {
        std::cerr << "fail to avcodec_open2: ret=" << ret;
        return 2;
    }

    std::cout
            << "outfile: " << outfile << "\n"
            << "format:  " << outctx->oformat->name << "\n"
            << "vcodec:  " << vcodec->name << "\n"
            << "size:    " << dst_width << 'x' << dst_height << "\n"
            << "fps:     " << av_q2d(dst_fps) << "\n"
            << "pixfmt:  " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n"
            << std::flush;

    // initialize sample scaler
    SwsContext* swsctx = sws_getCachedContext(
            nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24,
            dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
    if (!swsctx) {
        std::cerr << "fail to sws_getCachedContext";
        return 2;
    }

    // allocate frame buffer for encoding
    AVFrame* frame = av_frame_alloc();
    std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height));
    avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height);
    frame->width = dst_width;
    frame->height = dst_height;
    frame->format = static_cast<int>(vstrm->codec->pix_fmt);

    // encoding loop
    avformat_write_header(outctx, nullptr);
    int64_t frame_pts = 0;
    unsigned nb_frames = 0;
    bool end_of_stream = false;
    int got_pkt = 0;
    int i =0;
    imageFileName = (char *)malloc(strlen(infile_dir)+strlen(infile_prefix)+NUMNUMBER+strlen(infile_surname)+1);
    do{
        if(!end_of_stream){

            strcpy(imageFileName,infile_dir);
            //strcat(imageFileName,"/");
            strcat(imageFileName,infile_prefix);
            sprintf(numberChar,"%03d",i+1);
            strcat(imageFileName,numberChar);
            //strcat(imageFileName,".");
            strcat(imageFileName,infile_surname);
            __android_log_print(1, "RecordingImage", "%s", imageFileName);
            std::cout<<imageFileName<<std::endl;


            AVFrame* frame_from_file =  OpenImage(imageFileName);
            if(!frame_from_file){
                std::cout<<"error OpenImage"<<std::endl;
                return 5;
            }
            //const int Stride [] = {1920};
            sws_scale(swsctx, frame_from_file->data, &STRIDE , 0, frame_from_file->height, frame->data, frame->linesize);
            frame->pts = frame_pts++;
            av_frame_free(&frame_from_file);
        }
        // encode video frame
        AVPacket pkt;
        pkt.data = nullptr;
        pkt.size = 0;
        av_init_packet(&pkt);
        ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt);
        if (ret < 0) {
            std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n";
            return 2;
        }

        // rescale packet timestamp
        pkt.duration = 1;
        av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base);
        // write packet
        av_write_frame(outctx, &pkt);
        std::cout << nb_frames << '\r' << std::flush;  // dump progress
        ++nb_frames;

        av_free_packet(&pkt);
        i++;
        if(i==total_frames-1)
            end_of_stream = true;
    } while (i<total_frames);

    av_write_trailer(outctx);
    std::cout << nb_frames << " frames encoded" << std::endl;

    av_frame_free(&frame);
    avcodec_close(vstrm->codec);
    avio_close(outctx->pb);
    avformat_free_context(outctx);
    free(imageFileName);


    return 0;
}

答案 3 :(得分:0)

我们可以使用ffmpeg从图像创建视频。

查看我的post在android中使用ffmpeg。

使用以下命令从放置在同一文件夹中的图像创建视频

String command[]={"-y", "-r","1/5" ,"-i",src.getAbsolutePath(),
"-c:v","libx264","-vf", "fps=25","-pix_fmt","yuv420p", dest.getAbsolutePath()};

在这里,

src.getAbsolutePath()是所有输入图像的绝对路径。

例如, 如果所有图像都存储在带有名称的Pictures目录内的Images文件夹中 extract_picture001.jpg,extract_picture002.jpg,extract_picture003.jpg ......

。 然后,

String filePrefix = "extract_picture";
String fileExtn = ".jpg";
File picDir = Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES);
File dir = new File(picDir, "Images");
File src = new File(dir, filePrefix + "%03d" + fileExtn);

用于从放置在不同文件夹中的图像创建视频 必须创建一个文本文件并添加图像路径,然后指定 该文本文件的路径作为输入选项。 例如,

文字档案

file '/storage/emulated/0/DCIM/Camera/P_20170807_143916.jpg'
duration 2
file '/storage/emulated/0/DCIM/Pic/P_20170305_142948.jpg'
duration 5
file '/storage/emulated/0/DCIM/Camera/P_20170305_142939.jpg'
duration 6
file '/storage/emulated/0/DCIM/Pic/P_20170305_142818.jpg'
duration 2

命令

String command[] = {"-y", "-f", "concat", "-safe", "0", "-i", textFile.getAbsolutePath(), "-vsync", "vfr", "-pix_fmt", "yuv420p", dest.getAbsolutePath()};

其中textFile.getAbsolutePath()是文本文件的绝对路径

查看此ffmpeg doc了解更多信息