我正在使用ffmpeg阅读mpeg4视频流。我使用它的一些属性并使用这些属性执行一些过程。我想要的是使用像vlc这样的播放器播放打开的视频。然后我需要在处理后播放输出视频以检查是否存在延迟。是否可以将视频提供给端口。这样我就可以从特定端口将视频作为vlc播放器的输入。
这是我到目前为止的代码。我使用MV_generation方法从中提取特征并从中进行比较。
static int MV_generation(const AVPacket *pkt)
{
std::vector<unsigned long long> vl = File_read();
std::hash<string> hash1;
std::ios_base::app);
double x_src_val = 0;
double y_src_val = 0;
double x_dst_val = 0;
double y_dst_val = 0;
int ret = avcodec_send_packet(video_dec_ctx, pkt);
if (ret < 0) {
//fprintf(stderr, "Error while sending a packet to the decoder: %s\n", av_err2str(ret));
return ret;
}
video_frame_count++;
while (ret >= 0){
ret = avcodec_receive_frame(video_dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}
else if (ret < 0) {
return ret;
}
if (ret >= 0) {
AVFrameSideData *sd;
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
if (sd) {
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
int size_sd = sd->size;
//outData << size_sd << endl;
string str = "", str1, str2, str3, str4;
for (int i = 0; i < size_sd / sizeof(*mvs); i++) {
const AVMotionVector *mv = &mvs[i];
int x_src = mv->src_x;
int y_src = mv->src_y;
int x_dst = mv->dst_x;
int y_dst = mv->dst_y;
if (x_src != x_dst || y_src != y_dst || x_src > 100 || y_src > 100 || x_dst > 100 || y_dst > 100){
str1 = to_string(x_src);
str2 = to_string(y_src);
str3 = to_string(x_dst);
str4 = to_string(y_dst);
str = str.append(str1).append(str2).append(str3).append(str4);
}
}
for (unsigned long long y : vl)
{
// Check if any of the numbers are equal to x
if (hash1(str) == y)
{
cout << "matched frame_no : " << video_frame_count << endl;
}
}
}
av_frame_unref(frame);
}
}
outData.close();
return 0;
}
答案 0 :(得分:0)
我已经像这样使用opencv显示了帧。
Mat YUV420ToBGRA(uchar *_buffer[3], int _rowBytes, int _width, int _height){
Mat result(_height, _width, CV_8UC4);
if (_buffer == NULL || _buffer[0] == NULL || _buffer[1] == NULL || _buffer[2] == NULL)
return result;
uchar y, cb, cr;
uchar *output = new uchar[_width * _height * 4];
result.data = output;
uchar r, g, b;
for (int i = 0; i < _height; i++){
for (int j = 0; j < _width; j++){
y = _buffer[0][(i * _rowBytes) + j];
cb = _buffer[1][((i / 2)*(_rowBytes / 2)) + (j / 2)];
cr = _buffer[2][((i / 2)*(_rowBytes / 2)) + (j / 2)];
b = saturate_cast<uchar>(y + 1.772*(cb - 128));
g = saturate_cast<uchar>(y - 0.344*(cb - 128) - 0.714*(cr - 128));
r = saturate_cast<uchar>(y + 1.402*(cr - 128));
*output++ = b;
*output++ = g;
*output++ = r;
*output++ = 255;
}
}
imshow("video", result);
waitKey(10);
return result;
}
在main方法中我像这样调用这个方法。
static AVFrame *frame = NULL;
YUV420ToBGRA(frame->data,
frame->linesize[0],
frame->width,
frame->height);
请注意,您必须在循环内调用此方法,该循环逐帧输入方法。
答案 1 :(得分:0)
一旦帧转换为mat
,也可以使用此方法int ShowVideo(Mat mRGB, string textOnVideo, string windowName){
namedWindow(windowName, 1);
Pict_type = frame->pict_type;
//cout << av_get_picture_type_char(Pict_type); //I P or B frame
if (AV_PICTURE_TYPE_NONE != Pict_type)
{
namedWindow(windowName, 1);
mRGB = Mat(dec_ctx->height, dec_ctx->width, CV_8UC3);
Mat mYUV(dec_ctx->height + dec_ctx->height / 2, dec_ctx->width, CV_8UC1, (void*)buff);
cvtColor(mYUV, mRGB, CV_YUV2RGB_YV12, 3);
putText(mRGB, textOnVideo,
Point(100, 100), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255, 255));
imshow(windowName, mRGB);
waitKey(1);
}
return 0;
}