我试图通过我在YT工作中找到的Media FOundation进行一些相机预览。我已经列举了我的设备。我可以从相机中预览,但我的数据格式是NV12。我需要RGB32格式的数据。这是包含有关设备的信息的类。
class Media : public IMFSourceReaderCallback
{
CRITICAL_SECTION criticalSection;
long referenceCount;
WCHAR *wSymbolicLink;
UINT32 cchSymbolicLink;
IMFSourceReader* sourceReader;
public:
LONG stride;
int bytesPerPixel;
GUID videoFormat;
UINT height;
UINT width;
WCHAR deviceNameString[2048];
BYTE* rawData;
HRESULT CreateCaptureDevice();
HRESULT SetSourceReader(IMFActivate *device);
HRESULT IsMediaTypeSupported(IMFMediaType* type);
HRESULT GetDefaultStride(IMFMediaType *pType, LONG *plStride);
HRESULT Close();
Media();
~Media();
// the class must implement the methods from IUnknown
STDMETHODIMP QueryInterface(REFIID iid, void** ppv);
STDMETHODIMP_(ULONG) AddRef();
STDMETHODIMP_(ULONG) Release();
// the class must implement the methods from IMFSourceReaderCallback
STDMETHODIMP OnReadSample(HRESULT status, DWORD streamIndex, DWORD streamFlags, LONGLONG timeStamp, IMFSample *sample);
STDMETHODIMP OnEvent(DWORD, IMFMediaEvent *);
STDMETHODIMP OnFlush(DWORD);
};
这是创建设备的方法:
HRESULT Media::CreateCaptureDevice()
{
HRESULT hr = S_OK;
//this is important!!
hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);//COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
UINT32 count = 0;
IMFAttributes *attributes = NULL;
IMFActivate **devices = NULL;
if (FAILED(hr)) { CLEAN_ATTRIBUTES() }
// Create an attribute store to specify enumeration parameters.
hr = MFCreateAttributes(&attributes, 1);
if (FAILED(hr)) { CLEAN_ATTRIBUTES() }
//The attribute to be requested is devices that can capture video
hr = attributes->SetGUID(
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
);
if (FAILED(hr)) { CLEAN_ATTRIBUTES() }
//Enummerate the video capture devices
hr = MFEnumDeviceSources(attributes, &devices, &count);
if (FAILED(hr)) { CLEAN_ATTRIBUTES() }
//if there are any available devices
if (count > 0)
{
/*If you actually need to select one of the available devices
this is the place to do it. For this example the first device
is selected
*/
//Get a source reader from the first available device
SetSourceReader(devices[0]);
WCHAR *nameString = NULL;
// Get the human-friendly name of the device
UINT32 cchName;
hr = devices[0]->GetAllocatedString(
MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME,
&nameString, &cchName);
if (SUCCEEDED(hr))
{
//allocate a byte buffer for the raw pixel data
bytesPerPixel = abs(stride) / width;
rawData = new BYTE[width*height * bytesPerPixel];
wcscpy(deviceNameString,nameString);
}
CoTaskMemFree(nameString);
}
//clean
CLEAN_ATTRIBUTES()
}
这是设置设备的方法:
HRESULT Media::SetSourceReader(IMFActivate *device)
{
HRESULT hr = S_OK;
IMFMediaSource *source = NULL;
IMFAttributes *attributes = NULL;
IMFMediaType *mediaType = NULL;
EnterCriticalSection(&criticalSection);
hr = device->ActivateObject(__uuidof(IMFMediaSource), (void**)&source);
//get symbolic link for the device
if(SUCCEEDED(hr))
hr = device->GetAllocatedString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &wSymbolicLink, &cchSymbolicLink);
//Allocate attributes
if (SUCCEEDED(hr))
hr = MFCreateAttributes(&attributes, 2);
//get attributes
if (SUCCEEDED(hr))
hr = attributes->SetUINT32(MF_READWRITE_DISABLE_CONVERTERS, TRUE);
// Set the callback pointer.
if (SUCCEEDED(hr))
hr = attributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK,this);
//Create the source reader
if (SUCCEEDED(hr))
hr = MFCreateSourceReaderFromMediaSource(source,attributes,&sourceReader);
// Try to find a suitable output type.
if (SUCCEEDED(hr))
{
for (DWORD i = 0; ; i++)
{
hr = sourceReader->GetNativeMediaType((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,i,&mediaType);
if (FAILED(hr)) { break; }
hr = IsMediaTypeSupported(mediaType);
if (FAILED(hr)) { break; }
//Get width and height
MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &width, &height);
if (mediaType)
{ mediaType->Release(); mediaType = NULL; }
if (SUCCEEDED(hr))// Found an output type.
break;
}
}
if (SUCCEEDED(hr))
{
// Ask for the first sample.
hr = sourceReader->ReadSample((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0, NULL, NULL,NULL,NULL);
}
if (FAILED(hr))
{
if (source)
{
source->Shutdown();
}
Close();
}
if (source) { source->Release(); source = NULL; }
if (attributes) { attributes->Release(); attributes = NULL; }
if (mediaType) { mediaType->Release(); mediaType = NULL; }
LeaveCriticalSection(&criticalSection);
return hr;
}
答案 0 :(得分:0)
通常,当yuv(nv12)到rgb(rgb32)时:
字节* p_rgb_Data =(((高度/ 2)+高度)*宽度)* 3; //没有阿尔法
修改强>
我的回答不正确,您想从相机获取rgb格式,对吗?
您必须从SourceReader枚举MediaType:
HRESULT EnumerateTypesForStream(IMFSourceReader *pReader, DWORD wStreamIndex){
HRESULT hr = S_OK;
DWORD dwMediaTypeIndex = 0;
while (SUCCEEDED(hr))
{
IMFMediaType *pType = NULL;
hr = pReader->GetNativeMediaType(dwStreamIndex, dwMediaTypeIndex, &pType);
if (hr == MF_E_NO_MORE_TYPES)
{
hr = S_OK;
break;
}
else if (SUCCEEDED(hr))
{
// Examine the media type. (Not shown.)
pType->Release();
}
++dwMediaTypeIndex;
}
return hr;
}
当MediaType为RGB时,设置MediaType:
hr = pReader->SetCurrentMediaType(dwStreamIndex, pMediaType);
修改... 强>
是来自:
字节* p_rgb_Data =(((高度/ 2)+高度)*宽度)* 3; //没有阿尔法
- >应该是Byte * p_rgb_Size =(((高度/ 2)+高度)*宽度)* 3; //没有alpha
我错过了转换。类似的东西:
BYTE GetR(const int bY, int const bU){
int iR = bY + (int)(1.402f * bU);
iR = iR > 255 ? 255 : iR < 0 ? 0 : iR;
return iR;
}
BYTE GetG(const int bY,const int bU,const int bV){
int iG = bY - (int)(0.344f * bV + 0.714f * bU);
iG = iG > 255 ? 255 : iG < 0 ? 0 : iG;
return iG;
}
BYTE GetB(const int bY,const int bV){
int iB = bY + (int)(1.772f * bV);
iB = iB > 255 ? 255 : iB < 0 ? 0 : iB;
return iB;
}
如果你想在NV12缓冲区和RGB缓冲区之间进行complet转换,只需询问。
答案 1 :(得分:0)
Video Processor MFT可以处理颜色转换。它是一个同步转换,因此设置和使用起来非常简单。 See this link for more information and examples.