我正在尝试将连续缓冲区分成3个字节的通道(RGB)。以下是我用缓冲区填充图像的实际工作流程:
除此之外,还有代码:
HRESULT hr = S_OK;
IMFAttributes *attributes = NULL;
SafeRelease(&_sourcereader);
hr = MFCreateAttributes(&attributes, 1);
if (FAILED(hr)) {
// TODO: set error
return false;
}
hr = attributes->SetUINT32(MF_SOURCE_READER_ENABLE_VIDEO_PROCESSING, true);
if (FAILED(hr)) {
// TODO: set error
return false;
}
// conversion from qstring to const wchar*
const WCHAR* wfilename = filename.toStdWString().c_str();
// create source reader from file with attributes
hr = MFCreateSourceReaderFromURL(wfilename, attributes, &_sourcereader);
if (FAILED(hr)) {
// TODO: set error
return false;
}
// configure sourcereader for progressive RGB32 frames
IMFMediaType *mediatype = NULL;
hr = MFCreateMediaType(&mediatype);
if (SUCCEEDED(hr))
{
hr = mediatype->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
}
if (SUCCEEDED(hr))
{
hr = mediatype->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32);
}
if (SUCCEEDED(hr))
{
hr = _sourcereader->SetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
NULL, mediatype);
}
// Ensure the stream is selected.
if (SUCCEEDED(hr))
{
hr = _sourcereader->SetStreamSelection(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, TRUE);
}
if (FAILED(hr)) {
// TODO: Error log for failed configuration
std::cout << "(ConfigureSourceReader) Configuration failed" << std::endl;
return false;
}
//------------------------------------------------------------------
//---------------------- Get Video Format Infos --------------------
//------------------------------------------------------------------
GUID subtype = { 0 };
// Get the media type from the stream.
hr = _sourcereader->GetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, &mediatype );
// Make sure it is a video format.
hr = mediatype->GetGUID(MF_MT_SUBTYPE, &subtype);
if (subtype != MFVideoFormat_RGB32)
{
hr = E_UNEXPECTED;
// TODO: Error log message
SafeRelease(&mediatype);
return false;
}
//------------------------------------------------------------------
// Get the width and height
UINT32 width = 0, height = 0;
hr = MFGetAttributeSize(mediatype, MF_MT_FRAME_SIZE, &width, &height);
if (FAILED(hr))
{
// TODO: Error log message
SafeRelease(&mediatype);
return false;
}
//assign dimensions to VideoInfo
_videoinfo.imageHeight = height; _videoinfo.imageWidth = width;
//std::cout << "(GetVideoFormat) width: " << width << ", height: " << height << std::endl;
//------------------------------------------------------------------
//get framerate
UINT32 framerate_num = 0, framerate_denom = 0;
hr = MFGetAttributeRatio(mediatype, MF_MT_FRAME_RATE, &framerate_num, &framerate_denom);
if (FAILED(hr))
{
// TODO: Error log message
SafeRelease(&mediatype);
return false;
}
//set frame rate in struct
_videoinfo.fps = framerate_num / framerate_denom; // TODO: check for valid fps 24,25,30 ...
//------------------------------------------------------------------
// Get length
LONGLONG length = 0;
PROPVARIANT var;
PropVariantInit(&var);
hr = _sourcereader->GetPresentationAttribute((DWORD)MF_SOURCE_READER_MEDIASOURCE,
MF_PD_DURATION,
&var
);
if (SUCCEEDED(hr)) {
assert(var.vt == VT_UI8);
length = var.hVal.QuadPart;
} else {
// TODO : erro log msg
return false;
}
//Get total framenumber and length: save to info struct
_videoinfo.noofFrames = length / 10000000 * this->getFrameRate(); // incl. conversion from nano sec to sec
_videoinfo.duration = length;
//------------------------------------------------------------------
// Get the stride to find out if the bitmap is top-down or bottom-up.
LONG lStride = 0;
lStride = (LONG)MFGetAttributeUINT32(mediatype, MF_MT_DEFAULT_STRIDE, 1);
_videoinfo.stride = lStride;
_videoinfo.bTopDown = (lStride > 0);
//------------------------------------------------------------------
SafeRelease(&mediatype);
// return true and flag if initialization went well
_bInitialized = true;
return true;
之后我调用一个函数来读取一个帧(此时是第一个帧)。
HRESULT hr = S_OK;
IMFSample *pSample = NULL;
IMFMediaBuffer *buffer = NULL;
DWORD streamIndex, flags;
LONGLONG llTimeStamp;
// Read Sample (RGB32)
hr = _sourcereader->ReadSample (
(DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0,
&streamIndex,
&flags,
&llTimeStamp,
&pSample);
if (FAILED (hr)) {
// TODO handle fail case
}
//convert sample data to buffer
hr = pSample->ConvertToContiguousBuffer(&buffer);
if (FAILED (hr)) {
// TODO handle fail case
}
我知道通过调用函数buffer->Lock(&pixels, NULL, &nPixels)
我可以获得存储在pixels
中的BYTE流。在我的例子中,我创建了一个具有给定高度和宽度的自定义图像(来自SourceReader; [第一个函数])。从空图像我可以得到一个空的颜色矩阵,必须填充以下功能:Color (byte red, byte green, byte blue)
我不知道如何将RGB32 BYTE阵列分割成单个通道来填充我的图像?也许这是一个愚蠢的问题,但我对这个领域相对较新......
答案 0 :(得分:1)
对于RGB32,字节格式为:
R =红 G =绿色 B =蓝色 A =透明度
... RGBARGBARGBA
提取频道的一个非常简单的伪代码示例如下所示。
for (int row = 0; row < height; row++) {
for (int col = 0; col < stride; col += 4) {
redBuf[rIndex++] = sample[row * stride + col];
greenBuf[gIndex++] = sample[row * stride + col + 1];
blueBuf[bIndex++] = sample[row * stride + col + 2];
transparencyBuf[tIndex++] = sample[row * stride + col + 3];
}
}