当我从视频中提取帧时,我注意到ffmpeg
不会完成某些图像的渲染。问题最终是两个jpeg
图像之间的字节“填充”。如果我的缓冲区大小为4096
,并且在该缓冲区中位于前一个图像和下一个图像之间的字节,并且如果它们之间没有被任何数量的字节分隔,则无法正确渲染下一个图像。为什么会这样?
-i path -f image2pipe -c:v mjpeg -q:v 2 -vf fps=25 pipe:1
渲染帧:
代码示例:
public void ExtractFrames()
{
string FFmpegPath = "Path...";
string Arguments = $"-i { VideoPath } -f image2pipe -c:v mjpeg -q:v 2 -vf fps=25/1 pipe:1";
using (Process cmd = GetProcess(FFmpegPath, Arguments))
{
cmd.Start();
FileStream fStream = cmd.StandardOutput.BaseStream as FileStream;
bool Add = false;
int i = 0, n = 0, BufferSize = 4096;
byte[] buffer = new byte[BufferSize + 1];
MemoryStream mStream = new MemoryStream();
while (true)
{
if (i.Equals(BufferSize))
{
i = 0;
buffer[0] = buffer[BufferSize];
if (fStream.Read(buffer, 1, BufferSize) == 0)
break;
}
if (buffer[i].Equals(255) && buffer[i + 1].Equals(216))
{
Add = true;
}
if (buffer[i].Equals(255) && buffer[i + 1].Equals(217))
{
n++;
Add = false;
mStream.Write(new byte[] { 255, 217 }, 0, 2);
File.WriteAllBytes($@"C:\Path...\{n}.jpg", mStream.ToArray());
mStream = new MemoryStream();
}
if (Add)
mStream.WriteByte(buffer[i]);
i++;
}
cmd.WaitForExit();
cmd.Close();
}
}
private Process GetProcess(string FileName, string Arguments)
{
return new Process
{
StartInfo = new ProcessStartInfo
{
FileName = FileName,
Arguments = Arguments,
UseShellExecute = false,
RedirectStandardOutput = true,
CreateNoWindow = false,
}
};
}
长度为60秒或以上的视频样本(> 480p)应用于测试。
答案 0 :(得分:3)
此问题在全球范围内发生,以备取自Adobe网站:
答案就在那里-默认渲染输出未压缩, 产生如此高的数据速率,即使功能强大的计算机也永远无法 能够流畅地播放。
这里的事情很简单:即使使用低质量的图像,您也要呈现高数据速率。在这种情况下,最大缓冲区大小确实为4096
。如果该缓冲区中的是前一幅图像和下一幅图像的字节,并且 ARE 没有用逗号分隔,则FFmpeg无法确定要渲染的帧,因此它会跳过该帧,因为它会正确变暗而不是随机提示刷新哪一帧。
如果用逗号分隔字节,则可以帮助FFmpeg绑定上一张和下一张图像的字节,从而更容易区分要渲染的帧,因此不会跳过帧。
答案 1 :(得分:3)
如果文件已存储,那么告诉FFmpeg将该视频文件转换为Jpegs可能会更容易。
(1)读取视频文件和输出帧Jpeg(不涉及管道或内存/文件流):
string str_MyProg = "C:/FFmpeg/bin/ffmpeg.exe";
string VideoPath = "C:/someFolder/test_vid.mp4";
string save_folder = "C:/someOutputFolder/";
//# Setup the arguments to directly output a sequence of images (frames)
string str_CommandArgs = "-i " + VideoPath + " -vf fps=25/1 " + save_folder + "n_%03d.jpg"; //the n_%03d replaces "n++" count
System.Diagnostics.ProcessStartInfo cmd_StartInfo = new System.Diagnostics.ProcessStartInfo(str_MyProg, str_CommandArgs);
cmd_StartInfo.RedirectStandardError = false; //set false
cmd_StartInfo.RedirectStandardOutput = false; //set false
cmd_StartInfo.UseShellExecute = true; //set true
cmd_StartInfo.CreateNoWindow = true; //don't need the black window
//Create a process, assign its ProcessStartInfo and start it
System.Diagnostics.Process cmd = new System.Diagnostics.Process();
cmd.StartInfo = cmd_StartInfo;
cmd.Start();
//# Started process. Check output folder for images...
(2)管道方法:
使用管道时,FFmpeg将像广播一样流回输出。如果到达最后一个视频帧,则相同的最后一个帧“图像”将无限重复。您必须手动告诉 FFmpeg,何时停止发送到您的应用程序(这种情况下没有“退出”代码)。
代码中的这一行将指定停止之前如何提取任何帧:
int frames_expected_Total = 0; //is... (frame_rate x Duration) = total expected frames
您可以将限制计算为:input-Duration / output-FPS
或output-FPS * input-Duration
。
例如:视频时长为4.88秒,因此25 * 4.88 =
的122帧是此视频的限制。
”如果我的缓冲区大小为 4096 ...,则下一张图像无法正确呈现。 为什么会这样?”
由于缓冲区太小而无法容纳完整的图像,因此您的图像“出现毛刺” ...
缓冲区大小公式为:
int BufferSize = ( video_Width * video_Height );
由于最终压缩的jpeg小于此大小,因此可以保证BufferSize
可以容纳任何完整的帧而不会出错。出于兴趣,您从哪里得到 4096 号码?标准输出通常提供的最大数据包大小为32kb( 32768 字节)。
解决方案(已测试):
这是一个完整的有效示例,用于解决“小故障”图像问题,检查代码注释...
using System;
using System.IO;
using System.Net;
using System.Drawing;
using System.Diagnostics;
using System.Collections.Generic;
namespace FFmpeg_Vid_to_JPEG //replace with your own project "namespace"
{
class Program
{
public static void Main(string[] args)
{
//# testing the Extract function...
ExtractFrames();
}
public static void ExtractFrames()
{
//# define paths for PROCESS
string FFmpegPath = "C:/FFmpeg/bin/ffmpeg.exe";
string VideoPath = "C:/someFolder/test_vid.mp4";
//# FFmpeg arguments for PROCESS
string str_myCommandArgs = "-i " + VideoPath + " -f image2pipe -c:v mjpeg -q:v 2 -vf fps=25/1 pipe:1";
//# define paths for SAVE folder & filename
string save_folder = "C:/someOutputFolder/";
string save_filename = ""; //update name later on, during SAVE commands
MemoryStream mStream = new MemoryStream(); //create once, recycle same for each frame
////// # also create these extra variables...
bool got_current_JPG_End = false; //flag to begin extraction of image bytes within stream
int pos_in_Buffer = 0; //pos in buffer(when checking for Jpeg Start/End bytes)
int this_jpeg_len = 0; // holds bytes of single jpeg image to save... correct length avoids cropping effect
int pos_jpeg_start = 0; int pos_jpeg_end = 0; //marks the start/end pos of one image within total stream
int jpeg_count = 0; //count of exported Jpeg files (replaces the "n++" count)
int frames_expected_Total = 0; //number of frames to get before stopping
//# use input video's width x height as buffer size //eg: size 921600 = 1280 W x 720H
int BufferSize = 921600;
byte[] buffer = new byte[BufferSize + 1];
// Create a process, assign its ProcessStartInfo and start it
ProcessStartInfo cmd_StartInfo = new ProcessStartInfo(FFmpegPath, str_myCommandArgs);
cmd_StartInfo.RedirectStandardError = true;
cmd_StartInfo.RedirectStandardOutput = true; //set true to redirect the process stdout to the Process.StandardOutput StreamReader
cmd_StartInfo.UseShellExecute = false;
cmd_StartInfo.CreateNoWindow = true; //do not create the black window
Process cmd = new System.Diagnostics.Process();
cmd.StartInfo = cmd_StartInfo;
cmd.Start();
if (cmd.Start())
{
//# holds FFmpeg output bytes stream...
var ffmpeg_Output = cmd.StandardOutput.BaseStream; //replaces: fStream = cmd.StandardOutput.BaseStream as FileStream;
cmd.BeginErrorReadLine(); //# begin receiving FFmpeg output bytes stream
//# get (read) first two bytes in stream, so can check for Jpegs' SOI (xFF xD8)
//# each "Read" auto moves forward by read "amount"...
ffmpeg_Output.Read(buffer, 0, 1);
ffmpeg_Output.Read(buffer, 1, 1);
pos_in_Buffer = this_jpeg_len = 2; //update reading pos
//# we know first jpeg's SOI is always at buffer pos: [0] and [1]
pos_jpeg_start = 0; got_current_JPG_End = false;
//# testing amount... Duration 4.88 sec, FPS 25 --> (25 x 4.88) = 122 frames
frames_expected_Total = 122; //122; //number of Jpegs to get before stopping.
while(true)
{
//# For Pipe video you must exit stream manually
if ( jpeg_count == (frames_expected_Total + 1) )
{
cmd.Close(); cmd.Dispose(); //exit the process
break; //exit if got required number of frame Jpegs
}
//# otherwise read as usual
ffmpeg_Output.Read(buffer, pos_in_Buffer, 1);
this_jpeg_len +=1; //add 1 to expected jpeg bytes length
//# find JPEG start (SOI is bytes 0xFF 0xD8)
if ( (buffer[pos_in_Buffer] == 0xD8) && (buffer[pos_in_Buffer-1] == 0xFF) )
{
if (got_current_JPG_End == true)
{
pos_jpeg_start = (pos_in_Buffer-1);
got_current_JPG_End = false;
}
}
//# find JPEG ending (EOI is bytes 0xFF 0xD9) then SAVE FILE
if ( (buffer[pos_in_Buffer] == 0xD9) && (buffer[pos_in_Buffer-1] == 0xFF) )
{
if (got_current_JPG_End == false)
{
pos_jpeg_end = pos_in_Buffer; got_current_JPG_End = true;
//# update saved filename
save_filename = save_folder + "n_" + (jpeg_count).ToString() + ".jpg";
try
{
//# If the Jpeg save folder doesn't exist, create it.
if ( !Directory.Exists( save_folder ) ) { Directory.CreateDirectory( save_folder ); }
}
catch (Exception)
{
//# handle any folder create errors here.
}
mStream.Write(buffer, pos_jpeg_start, this_jpeg_len); //
//# save to disk...
File.WriteAllBytes(@save_filename, mStream.ToArray());
//recycle MemoryStream, avoids creating multiple = new MemoryStream();
mStream.SetLength(0); mStream.Position = 0;
//# reset for next pic
jpeg_count +=1; this_jpeg_len=0;
pos_in_Buffer = -1; //allows it to become 0 position at incrementation part
}
}
pos_in_Buffer += 1; //increment to store next byte in stdOut stream
} //# end While
}
else
{
// Handler code here for "Process is not running" situation
}
} //end ExtractFrame function
} //end class
} //end program
注意:在修改上述代码时,请确保将Process
的创建内容保留在函数ExtractFrames()
本身之内,如果您使用某些外部函数来完成此操作,则不起作用返回Process
。不要设置为:using (Process cmd = GetProcess(FFmpegPath, Arguments))
。
祝你好运。让我知道怎么回事。
(PS:对不起“代码过多”的代码注释,这是对将来的读者的好处,他们可能会或可能不理解此代码在缓冲区问题上的正确工作)。