我对用Delphi / Pascal或C ++编写的库(用于windows)感兴趣,它允许我录制(到视频格式)桌面屏幕,要求:
加油:
如果有任何机会,我没有解释正确的事情,请随时提出,以便我可以改写或提供更多细节等。
答案 0 :(得分:11)
FFMPEG支持屏幕捕获(转换),并且是跨平台的。
答案 1 :(得分:3)
您可以尝试Windows Media Encoder(免费软件,仅限wmv / asf)或VLC(GPL,Win / OSX / Linux)。请注意,“硬件加速”视图(例如Direct3D和OpenGL渲染)将不可用,并且由于视频压缩将会出现一些质量损失。损失多少取决于您的设置(编解码器,比特率,分辨率等)
示例:How to Stream your Desktop using VLC
vlc screen:// :screen-fps=30 :screen-caching=100 --sout '#transcode{vcodec=mp4v,vb=4096,acodec=mpga,ab=256,scale=1,width=1280,height=800}:rtp{dst=192.168.1.2,port=1234,access=udp,mux=ts}'
您可以在VLC documentation中找到更多选项,例如将您的信息流保存为文件。
答案 2 :(得分:3)
这是我在Delphi中使用的那个,它被称为“Professional Screen Camera Component”。不可否认,我必须进行一些更改以支持unicode版本(用PAnsiChar替换PChar,用AnsiChar替换Char)。
它会愉快地记录我设置的任何帧速率,将使用我指定的任何编解码器(如果我想要的话)对视频进行编码,并允许您指定要录制的区域。
也有一个演示项目!
哦,这是免费/开源的!
答案 3 :(得分:2)
FFmpeg可用于捕获屏幕。
使用FFMPEG观看屏幕录制的视频演示:https://www.youtube.com/watch?v=a31bBY3HuxE
容器格式:MP4
编解码器:MPEG4
按照步骤使用FFmpeg和其他库在视频中录制屏幕。
初始化所需的寄存器
在av_find_input_format中使用x11grab(对于linux OS)
提及在屏幕上捕捉视频的位置(例如,":0.0 + 10,250"在av_format_open_input中)
现在进行常规视频参数初始化和内存分配。
开始捕捉帧并将其存储在文件中。
最后,完成后释放已分配的资源!
下面的代码是用c ++编写的,使用linux(ubuntu)平台的视频格式是mp4格式。
// sample code to record the computer screen !
#ifndef SCREENRECORDER_H
#define SCREENRECORDER_H
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <cstring>
#include <math.h>
#include <string.h>
#define __STDC_CONSTANT_MACROS
//FFMPEG LIBRARIES
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavcodec/avfft.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
// libav resample
#include "libavutil/opt.h"
#include "libavutil/common.h"
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
// lib swresample
#include "libswscale/swscale.h"
}
class ScreenRecorder
{
private:
AVInputFormat *pAVInputFormat;
AVOutputFormat *output_format;
AVCodecContext *pAVCodecContext;
AVFormatContext *pAVFormatContext;
AVFrame *pAVFrame;
AVFrame *outFrame;
AVCodec *pAVCodec;
AVCodec *outAVCodec;
AVPacket *pAVPacket;
AVDictionary *options;
AVOutputFormat *outAVOutputFormat;
AVFormatContext *outAVFormatContext;
AVCodecContext *outAVCodecContext;
AVStream *video_st;
AVFrame *outAVFrame;
const char *dev_name;
const char *output_file;
double video_pts;
int out_size;
int codec_id;
int value;
int VideoStreamIndx;
public:
ScreenRecorder();
~ScreenRecorder();
int openCamera();
int init_outputfile();
int collectFrames();
};
#endif
using namespace std;
ScreenRecorder::ScreenRecorder()
{
cout<<"\n\n Registering required functions...";
av_register_all();
avcodec_register_all();
avdevice_register_all();
cout<<"\n\n Registered successfully...";
}
ScreenRecorder::~ScreenRecorder()
{
avformat_close_input(&pAVFormatContext);
if( !pAVFormatContext )
{
cout<<"\n\n1.Success : avformat_close_input()";
}
else
{
cout<<"\n\nError : avformat_close_input()";
}
avformat_free_context(pAVFormatContext);
if( !pAVFormatContext )
{
cout<<"\n\n2.Success : avformat_free_context()";
}
else
{
cout<<"\n\nError : avformat_free_context()";
}
cout<<"\n\n---------------Successfully released all resources------------------\n\n\n";
cout<<endl;
cout<<endl;
cout<<endl;
}
int ScreenRecorder::collectFrames()
{
int flag;
int frameFinished;
//when you decode a single packet, you still don't have information enough to have a frame [depending on the type of codec, some of them //you do], when you decode a GROUP of packets that represents a frame, then you have a picture! that's why frameFinished will let //you know you decoded enough to have a frame.
int frame_index = 0;
value = 0;
pAVPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(pAVPacket);
pAVFrame = av_frame_alloc();
if( !pAVFrame )
{
cout<<"\n\nError : av_frame_alloc()";
return -1;
}
outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if( !outFrame )
{
cout<<"\n\nError : av_frame_alloc()";
return -1;
}
int video_outbuf_size;
int nbytes = av_image_get_buffer_size(outAVCodecContext->pix_fmt,outAVCodecContext->width,outAVCodecContext->height,32);
uint8_t *video_outbuf = (uint8_t*)av_malloc(nbytes);
if( video_outbuf == NULL )
{
cout<<"\n\nError : av_malloc()";
}
// Setup the data pointers and linesizes based on the specified image parameters and the provided array.
value = av_image_fill_arrays( outFrame->data, outFrame->linesize, video_outbuf , AV_PIX_FMT_YUV420P, outAVCodecContext->width,outAVCodecContext->height,1 ); // returns : the size in bytes required for src
if(value < 0)
{
cout<<"\n\nError : av_image_fill_arrays()";
}
SwsContext* swsCtx_ ;
// Allocate and return swsContext.
// a pointer to an allocated context, or NULL in case of error
// Deprecated : Use sws_getCachedContext() instead.
swsCtx_ = sws_getContext(pAVCodecContext->width,
pAVCodecContext->height,
pAVCodecContext->pix_fmt,
outAVCodecContext->width,
outAVCodecContext->height,
outAVCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
int ii = 0;
int no_frames = 100;
cout<<"\n\nEnter No. of Frames to capture : ";
cin>>no_frames;
AVPacket outPacket;
int j = 0;
int got_picture;
while( av_read_frame( pAVFormatContext , pAVPacket ) >= 0 )
{
if( ii++ == no_frames )break;
if(pAVPacket->stream_index == VideoStreamIndx)
{
value = avcodec_decode_video2( pAVCodecContext , pAVFrame , &frameFinished , pAVPacket );
if( value < 0)
{
cout<<"Error : avcodec_decode_video2()";
}
if(frameFinished)// Frame successfully decoded :)
{
sws_scale(swsCtx_, pAVFrame->data, pAVFrame->linesize,0, pAVCodecContext->height, outFrame->data,outFrame->linesize);
av_init_packet(&outPacket);
outPacket.data = NULL; // packet data will be allocated by the encoder
outPacket.size = 0;
avcodec_encode_video2(outAVCodecContext , &outPacket ,outFrame , &got_picture);
if(got_picture)
{
if(outPacket.pts != AV_NOPTS_VALUE)
outPacket.pts = av_rescale_q(outPacket.pts, video_st->codec->time_base, video_st->time_base);
if(outPacket.dts != AV_NOPTS_VALUE)
outPacket.dts = av_rescale_q(outPacket.dts, video_st->codec->time_base, video_st->time_base);
printf("Write frame %3d (size= %2d)\n", j++, outPacket.size/1000);
if(av_write_frame(outAVFormatContext , &outPacket) != 0)
{
cout<<"\n\nError : av_write_frame()";
}
av_packet_unref(&outPacket);
} // got_picture
av_packet_unref(&outPacket);
} // frameFinished
}
}// End of while-loop
value = av_write_trailer(outAVFormatContext);
if( value < 0)
{
cout<<"\n\nError : av_write_trailer()";
}
//THIS WAS ADDED LATER
av_free(video_outbuf);
}
int ScreenRecorder::openCamera()
{
value = 0;
options = NULL;
pAVFormatContext = NULL;
pAVFormatContext = avformat_alloc_context();//Allocate an AVFormatContext.
pAVInputFormat = av_find_input_format("x11grab");
value = avformat_open_input(&pAVFormatContext, ":0.0+10,250", pAVInputFormat, NULL);
if(value != 0)
{
cout<<"\n\nError : avformat_open_input\n\nstopped...";
return -1;
}
value = av_dict_set( &options,"framerate","30",0 );
if(value < 0)
{
cout<<"\n\nError : av_dict_set(framerate , 30 , 0)";
return -1;
}
value = av_dict_set( &options, "preset", "medium", 0 );
if(value < 0)
{
cout<<"\n\nError : av_dict_set(preset , medium)";
return -1;
}
// value = avformat_find_stream_info(pAVFormatContext,NULL);
if(value < 0)
{
cout<<"\n\nError : avformat_find_stream_info\nstopped...";
return -1;
}
VideoStreamIndx = -1;
for(int i = 0; i < pAVFormatContext->nb_streams; i++ ) // find video stream posistion/index.
{
if( pAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO )
{
VideoStreamIndx = i;
break;
}
} // End for-loop
if( VideoStreamIndx == -1)
{
cout<<"\n\nError : VideoStreamIndx = -1";
return -1;
}
// assign pAVFormatContext to VideoStreamIndx
pAVCodecContext = pAVFormatContext->streams[VideoStreamIndx]->codec;
pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id);
if( pAVCodec == NULL )
{
cout<<"\n\nError : avcodec_find_decoder()";
return -1;
}
value = avcodec_open2(pAVCodecContext , pAVCodec , NULL);//Initialize the AVCodecContext to use the given AVCodec.
if( value < 0 )
{
cout<<"\n\nError : avcodec_open2()";
return -1;
}
}
int ScreenRecorder::init_outputfile()
{
outAVFormatContext = NULL;
value = 0;
output_file = "output.mp4";
avformat_alloc_output_context2(&outAVFormatContext, NULL, NULL, output_file);
if (!outAVFormatContext)
{
cout<<"\n\nError : avformat_alloc_output_context2()";
return -1;
}
/*Returns the output format in the list of registered output formats which best matches the provided parameters, or returns NULL if there is no match.
*/
output_format = av_guess_format(NULL, output_file ,NULL);
if( !output_format )
{
cout<<"\n\nError : av_guess_format()";
return -1;
}
video_st = avformat_new_stream(outAVFormatContext ,NULL);
if( !video_st )
{
cout<<"\n\nError : avformat_new_stream()";
return -1;
}
outAVCodecContext = avcodec_alloc_context3(outAVCodec);
if( !outAVCodecContext )
{
cout<<"\n\nError : avcodec_alloc_context3()";
return -1;
}
outAVCodecContext = video_st->codec;
outAVCodecContext->codec_id = AV_CODEC_ID_MPEG4;// AV_CODEC_ID_MPEG4; // AV_CODEC_ID_H264 // AV_CODEC_ID_MPEG1VIDEO
outAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outAVCodecContext->bit_rate = 400000; // 2500000
outAVCodecContext->width = 1920;
outAVCodecContext->height = 1080;
outAVCodecContext->gop_size = 3;
outAVCodecContext->max_b_frames = 2;
outAVCodecContext->time_base.num = 1;
outAVCodecContext->time_base.den = 30; // 15fps
if (codec_id == AV_CODEC_ID_H264)
{
av_opt_set(outAVCodecContext->priv_data, "preset", "slow", 0);
}
outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if( !outAVCodec )
{
cout<<"\n\nError : avcodec_find_encoder()";
return -1;
}
// Some container formats (like MP4) require global headers to be present
// Mark the encoder so that it behaves accordingly.
if ( outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
{
outAVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
value = avcodec_open2(outAVCodecContext, outAVCodec, NULL);
if( value < 0)
{
cout<<"\n\nError : avcodec_open2()";
return -1;
}
if ( !(outAVFormatContext->flags & AVFMT_NOFILE) )
{
if( avio_open2(&outAVFormatContext->pb , output_file , AVIO_FLAG_WRITE ,NULL, NULL) < 0 )
{
cout<<"\n\nError : avio_open2()";
}
}
if(!outAVFormatContext->nb_streams)
{
cout<<"\n\nError : Output file dose not contain any stream";
return -1;
}
value = avformat_write_header(outAVFormatContext , &options);
if(value < 0)
{
cout<<"\n\nError : avformat_write_header()";
return -1;
}
cout<<"\n\nOutput file information :\n\n";
av_dump_format(outAVFormatContext , 0 ,output_file ,1);
}
int main()
{
ScreenRecorder s_record;
s_record.openCamera();
s_record.init_outputfile();
s_record.collectFrames();
cout<<"\n\n---------EXIT_SUCCESS------------\n\n";
return 0;
}
/* to compile the code : g++ -Wno-format-zero-length -Wno-write-strings -L/home/abdullah/ffmpeg_build/lib/ -L/usr/lib/x86_64-linux-gnu/ -I/home/abdullah/ffmpeg_build/include/ -o ScreenRecorder ScreenRecorder.cpp -lavdevice -lavfilter -lswscale -lavformat -lavcodec -lavutil -lswresample -lm -lva -lpthread -lvorbis -lvpx -lopus -lz -lpostproc -ldl -lfdk-aac -lmp3lame -lvorbisenc -lvorbisfile -lx264 -ltheora -lx265 -ltheoraenc -ltheoradec -ldl -lrt -lbz2 -lasound -lSDL -lSDLmain -lSDL_ttf -lfreetype -lass -llzma -lftgl -lperl -lcrypto -lxcb -lxcb-shm -lxcb-xfixes -lao -lxcb-shape -lfftw3 */
答案 4 :(得分:1)
我之前没有这样做,但是当我用谷歌搜索时(我确定你有),我遇到了这个:
http://www.codeproject.com/KB/GDI/barry_s_screen_capture.aspx
看起来它应该可以轻松地为您提出要求(对于Windows),并且没有与之相关的许可证(如底部所确认)。我不相信它设置为库,但我确信你可以轻松地将接口与示例WinCap函数绑定到一个。
答案 5 :(得分:1)
对于您的需求可能有点过分,但DataStead的视频采集器组件也可以记录屏幕活动并将输出保存为视频文件。见http://www.datastead.com/products/tvideograbber/overview.html。我与DataStead没有联系,但几年来一直是客户,而且效果很好。
答案 6 :(得分:0)
使用屏幕截图精简版 https://github.com/smasherprog/screen_capture_lite
这是一个C ++库和跨平台