我正在使用ffmpeg将一些cv :: mat流式传输到rtmp服务器,我知道avio_open2()函数会调用librtmp中的一些函数,例如RTMP_init(),RTMP_SetupURL()。但是我无法控制rtmp的详细信息仅使用ffmpeg来流cv :: mat(网络断开后网络恢复时我想恢复rtmp连接)。有人可以告诉我是否存在将cv :: mat转换为RTMP_packet并使用librtmp进行流处理的工具或代码谢谢
更新(相关代码):
//encode and write Frame to rtmp server
static int encode_and_write_frame(AVCodecContext *codec_ctx, AVFormatContext *fmt_ctx, AVFrame *frame)
{
AVPacket pkt = {0};
av_init_packet(&pkt);
int ret = avcodec_send_frame(codec_ctx, frame);
if (ret < 0)
{
fprintf(stderr, "Error sending frame to codec context!\n");
return ret;
}
ret = avcodec_receive_packet(codec_ctx, &pkt);
if (ret < 0)
{
fprintf(stderr, "Error receiving packet from codec context!\n" );
return ret;
}
int ret1 = av_interleaved_write_frame(fmt_ctx, &pkt);
av_packet_unref(&pkt);
return ret1;
}
//scale cv::mat and copy it's value to avframe,then encode_and_write_frame
void Streamer::stream_frame(const cv::Mat &image, int64_t frame_duration)
{
if(can_stream()) {
const int stride[] = {static_cast<int>(image.step[0])};
sws_scale(scaler.ctx, &image.data, stride, 0, image.rows, picture.frame->data, picture.frame->linesize);
picture.frame->pts += frame_duration; //time of frame in milliseconds
int ret = encode_and_write_frame(out_codec_ctx, format_ctx, picture.frame);
std::cout<<"stream_ret"<<ret<<std::endl;
}
}
// Initialization
int Streamer::init(const StreamerConfig &streamer_config)
{
init_ok = false;
cleanup();
config = streamer_config;
if(!network_init_ok) {
return 1;
}
//initialize format context for output with flv and no filename
avformat_alloc_output_context2(&format_ctx, nullptr, "flv", nullptr);
if(!format_ctx) {
return 2;
}
//AVIOContext for accessing the resource indicated by url
if (!(format_ctx->oformat->flags & AVFMT_NOFILE)) {
//init and handshake with rtmp server.
int avopen_ret = avio_open2(&format_ctx->pb, config.server.c_str(),
AVIO_FLAG_WRITE, nullptr, nullptr);
if (avopen_ret < 0) {
fprintf(stderr, "failed to open stream output context, stream will not work\n");
return 3;
}
rtmp_server_conn = true;
}
//use selected codec
AVCodecID codec_id = AV_CODEC_ID_H264;
out_codec = avcodec_find_encoder(codec_id);
if (!(out_codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
return 4;
}
out_stream = avformat_new_stream(format_ctx, out_codec);
if (!out_stream) {
fprintf(stderr, "Could not allocate stream\n");
return 5;
}
out_codec_ctx = avcodec_alloc_context3(out_codec);
if(set_options_and_open_encoder(format_ctx, out_stream, out_codec_ctx, out_codec, config.profile,
config.dst_width, config.dst_height, config.fps, config.bitrate, codec_id)) {
return 6;
}
out_stream->codecpar->extradata_size = out_codec_ctx->extradata_size;
out_stream->codecpar->extradata = static_cast<uint8_t*>(av_mallocz(out_codec_ctx->extradata_size));
memcpy(out_stream->codecpar->extradata, out_codec_ctx->extradata, out_codec_ctx->extradata_size);
av_dump_format(format_ctx, 0, config.server.c_str(), 1);
picture.init(out_codec_ctx->pix_fmt, config.dst_width, config.dst_height);
scaler.init(out_codec_ctx, config.src_width, config.src_height,config.dst_width, config.dst_height, SWS_BILINEAR);
if (avformat_write_header(format_ctx, nullptr) < 0)
{
fprintf(stderr, "Could not write header!\n");
return 7;
}
printf("stream time base = %d / %d \n", out_stream->time_base.num, out_stream->time_base.den);
inv_stream_timebase = (double)out_stream->time_base.den/(double)out_stream->time_base.num;
init_ok = true;
return 0;
}
}