我正在尝试将音频功能添加到捕获源过滤器,以便制作带有音频的虚拟摄像头。从TMH和rdp的代码开始,我将其扩展为另一个名为“音频”的引脚:
CUnknown * WINAPI CVCam::CreateInstance(LPUNKNOWN lpunk, HRESULT *phr)
{
ASSERT(phr);
CUnknown *punk = new CVCam(lpunk, phr);
return punk;
}
CVCam::CVCam(LPUNKNOWN lpunk, HRESULT *phr) : CSource(LPCSTR(FILTER_NAME), lpunk, CLSID_VirtualCam)
{
ASSERT(phr);
CAutoLock cAutoLock(&m_cStateLock);
m_paStreams = (CSourceStream **) new CVCamStream*[2];
m_paStreams[0] = new CVCamStream(phr, this, L"Video");
m_paStreams[1] = new CVAudioStream(phr, this, L"Audio");
}
HRESULT CVCam::QueryInterface(REFIID riid, void **ppv)
{
if (riid == _uuidof(IAMStreamConfig) || riid == _uuidof(IKsPropertySet))
{
HRESULT hr;
hr = m_paStreams[0]->QueryInterface(riid, ppv);
if (hr != S_OK) return hr;
hr = m_paStreams[1]->QueryInterface(riid, ppv);
if (hr != S_OK) return hr;
}
else return CSource::QueryInterface(riid, ppv);
return S_OK;
}
CVAudioStream::CVAudioStream(HRESULT *phr, CVCam *pParent, LPCWSTR pPinName) : CSourceStream(LPCSTR(pPinName), phr, pParent, pPinName), m_pParent(pParent)
{
GetMediaType(0, &m_mt);
}
CVAudioStream::~CVAudioStream()
{
}
HRESULT CVAudioStream::QueryInterface(REFIID riid, void **ppv)
{
if (riid == _uuidof(IAMStreamConfig)) *ppv = (IAMStreamConfig*)this;
else if (riid == _uuidof(IKsPropertySet)) *ppv = (IKsPropertySet*)this;
else if (riid == _uuidof(IAMBufferNegotiation)) *ppv = (IAMBufferNegotiation*)this;
else return CSourceStream::QueryInterface(riid, ppv);
AddRef();
return S_OK;
}
HRESULT CVAudioStream::FillBuffer(IMediaSample *pms)
{
// fill buffer with Windows audio samples
return NOERROR;
}
STDMETHODIMP CVAudioStream::Notify(IBaseFilter * pSender, Quality q)
{
return E_NOTIMPL;
}
HRESULT CVAudioStream::SetMediaType(const CMediaType *pmt)
{
HRESULT hr = CSourceStream::SetMediaType(pmt);
return hr;
}
HRESULT setupPwfex(WAVEFORMATEX *pwfex, AM_MEDIA_TYPE *pmt) {
pwfex->wFormatTag = WAVE_FORMAT_PCM;
pwfex->cbSize = 0;
pwfex->nChannels = 2;
HRESULT hr;
pwfex->nSamplesPerSec = 11025;
pwfex->wBitsPerSample = 16;
pwfex->nBlockAlign = (WORD)((pwfex->wBitsPerSample * pwfex->nChannels) / 8);
pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nBlockAlign;
hr = ::CreateAudioMediaType(pwfex, pmt, FALSE);
return hr;
}
/*HRESULT CVAudioStream::setAsNormal(CMediaType *pmt)
{
WAVEFORMATEX *pwfex;
pwfex = (WAVEFORMATEX *)pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
ZeroMemory(pwfex, sizeof(WAVEFORMATEX));
if (NULL == pwfex) return E_OUTOFMEMORY;
return setupPwfex(pwfex, pmt);
}*/
HRESULT CVAudioStream::GetMediaType(int iPosition, CMediaType *pmt)
{
if (iPosition < 0) return E_INVALIDARG;
if (iPosition > 0) return VFW_S_NO_MORE_ITEMS;
if (iPosition == 0)
{
*pmt = m_mt;
return S_OK;
}
WAVEFORMATEX *pwfex = (WAVEFORMATEX *)pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
setupPwfex(pwfex, pmt);
return S_OK;
}
HRESULT CVAudioStream::CheckMediaType(const CMediaType *pMediaType)
{
int cbFormat = pMediaType->cbFormat;
if (*pMediaType != m_mt) return E_INVALIDARG;
return S_OK;
}
const int WaveBufferChunkSize = 16 * 1024;
HRESULT CVAudioStream::DecideBufferSize(IMemAllocator *pAlloc, ALLOCATOR_PROPERTIES *pProperties)
{
CheckPointer(pAlloc, E_POINTER);
CheckPointer(pProperties, E_POINTER);
WAVEFORMATEX *pwfexCurrent = (WAVEFORMATEX*)m_mt.Format();
pProperties->cBuffers = 1;
pProperties->cbBuffer = expectedMaxBufferSize;
ALLOCATOR_PROPERTIES Actual;
HRESULT hr = pAlloc->SetProperties(pProperties, &Actual);
if (FAILED(hr)) return hr;
if (Actual.cbBuffer < pProperties->cbBuffer) return E_FAIL;
return NOERROR;
}
HRESULT CVAudioStream::OnThreadCreate()
{
//GetMediaType(0, &m_mt);
//HRESULT hr = LoopbackCaptureSetup();
//if (FAILED(hr)) return hr;
return NOERROR;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::SetFormat(AM_MEDIA_TYPE *pmt)
{
if (!pmt) return S_OK;
if (CheckMediaType((CMediaType *)pmt) != S_OK) return E_FAIL;
m_mt = *pmt;
IPin* pin;
ConnectedTo(&pin);
if (pin)
{
IFilterGraph *pGraph = m_pParent->GetGraph();
pGraph->Reconnect(this);
}
return S_OK;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::GetFormat(AM_MEDIA_TYPE **ppmt)
{
*ppmt = CreateMediaType(&m_mt);
return S_OK;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::GetNumberOfCapabilities(int *piCount, int *piSize)
{
*piCount = 1;
*piSize = sizeof(AUDIO_STREAM_CONFIG_CAPS);
return S_OK;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **pmt, BYTE *pSCC)
{
if (iIndex < 0) return E_INVALIDARG;
if (iIndex > 0) return S_FALSE;
if (pSCC == NULL) return E_POINTER;
*pmt = CreateMediaType(&m_mt);
if (*pmt == NULL) return E_OUTOFMEMORY;
DECLARE_PTR(WAVEFORMATEX, pAudioFormat, (*pmt)->pbFormat);
AM_MEDIA_TYPE * pm = *pmt;
setupPwfex(pAudioFormat, pm);
AUDIO_STREAM_CONFIG_CAPS* pASCC = (AUDIO_STREAM_CONFIG_CAPS*)pSCC;
ZeroMemory(pSCC, sizeof(AUDIO_STREAM_CONFIG_CAPS));
pASCC->guid = MEDIATYPE_Audio;
pASCC->MaximumChannels = pAudioFormat->nChannels;
pASCC->MinimumChannels = pAudioFormat->nChannels;
pASCC->ChannelsGranularity = 1; // doesn't matter
pASCC->MaximumSampleFrequency = pAudioFormat->nSamplesPerSec;
pASCC->MinimumSampleFrequency = pAudioFormat->nSamplesPerSec;
pASCC->SampleFrequencyGranularity = 11025; // doesn't matter
pASCC->MaximumBitsPerSample = pAudioFormat->wBitsPerSample;
pASCC->MinimumBitsPerSample = pAudioFormat->wBitsPerSample;
pASCC->BitsPerSampleGranularity = 16; // doesn't matter
return S_OK;
}
HRESULT CVAudioStream::Set(REFGUID guidPropSet, DWORD dwID, void *pInstanceData, DWORD cbInstanceData, void *pPropData, DWORD cbPropData)
{
return E_NOTIMPL;
}
HRESULT CVAudioStream::Get(
REFGUID guidPropSet,
DWORD dwPropID,
void *pInstanceData,
DWORD cbInstanceData,
void *pPropData,
DWORD cbPropData,
DWORD *pcbReturned
)
{
if (guidPropSet != AMPROPSETID_Pin) return E_PROP_SET_UNSUPPORTED;
if (dwPropID != AMPROPERTY_PIN_CATEGORY) return E_PROP_ID_UNSUPPORTED;
if (pPropData == NULL && pcbReturned == NULL) return E_POINTER;
if (pcbReturned) *pcbReturned = sizeof(GUID);
if (pPropData == NULL) return S_OK;
if (cbPropData < sizeof(GUID)) return E_UNEXPECTED;
*(GUID *)pPropData = PIN_CATEGORY_CAPTURE;
return S_OK;
}
HRESULT CVAudioStream::QuerySupported(REFGUID guidPropSet, DWORD dwPropID, DWORD *pTypeSupport)
{
if (guidPropSet != AMPROPSETID_Pin) return E_PROP_SET_UNSUPPORTED;
if (dwPropID != AMPROPERTY_PIN_CATEGORY) return E_PROP_ID_UNSUPPORTED;
if (pTypeSupport) *pTypeSupport = KSPROPERTY_SUPPORT_GET;
return S_OK;
}
我的第一个问题是当我在GraphStudioNext中插入过滤器并打开其属性页面时。音频引脚显示以下(不正确)信息:
majorType = GUID_NULL
subType = GUID_NULL
formattype = GUID_NULL
当然我无法连接到该引脚,因为它无效。
我期待像MEDIATYPE_Audio
这样的东西,因为我设置了它:
DEFINE_GUID(CLSID_VirtualCam, 0x8e14549a, 0xdb61, 0x4309, 0xaf, 0xa1, 0x35, 0x78, 0xe9, 0x27, 0xe9, 0x33);
const AMOVIESETUP_MEDIATYPE AMSMediaTypesVideo =
{
&MEDIATYPE_Video,
&MEDIASUBTYPE_NULL
};
const AMOVIESETUP_MEDIATYPE AMSMediaTypesAudio =
{
&MEDIATYPE_Audio,
&MEDIASUBTYPE_NULL
};
const AMOVIESETUP_PIN AMSPinVCam[] =
{
{
L"Video", // Pin string name
FALSE, // Is it rendered
TRUE, // Is it an output
FALSE, // Can we have none
FALSE, // Can we have many
&CLSID_NULL, // Connects to filter
NULL, // Connects to pin
1, // Number of types
&AMSMediaTypesVideo // Pin Media types
},
{
L"Audio", // Pin string name
FALSE, // Is it rendered
TRUE, // Is it an output
FALSE, // Can we have none
FALSE, // Can we have many
&CLSID_NULL, // Connects to filter
NULL, // Connects to pin
1, // Number of types
&AMSMediaTypesAudio // Pin Media types
}
};
const AMOVIESETUP_FILTER AMSFilterVCam =
{
&CLSID_VirtualCam, // Filter CLSID
FILTER_NAME, // String name
MERIT_DO_NOT_USE, // Filter merit
2, // Number pins
AMSPinVCam // Pin details
};
CFactoryTemplate g_Templates[] =
{
{
FILTER_NAME,
&CLSID_VirtualCam,
CVCam::CreateInstance,
NULL,
&AMSFilterVCam
},
};
int g_cTemplates = sizeof(g_Templates) / sizeof(g_Templates[0]);
STDAPI RegisterFilters( BOOL bRegister )
{
HRESULT hr = NOERROR;
WCHAR achFileName[MAX_PATH];
char achTemp[MAX_PATH];
ASSERT(g_hInst != 0);
if( 0 == GetModuleFileNameA(g_hInst, achTemp, sizeof(achTemp))) return AmHresultFromWin32(GetLastError());
MultiByteToWideChar(CP_ACP, 0L, achTemp, lstrlenA(achTemp) + 1, achFileName, NUMELMS(achFileName));
hr = CoInitialize(0);
if(bRegister)
{
hr = AMovieSetupRegisterServer(CLSID_VirtualCam, FILTER_NAME, achFileName, L"Both", L"InprocServer32");
}
if( SUCCEEDED(hr) )
{
IFilterMapper2 *fm = 0;
hr = CreateComObject( CLSID_FilterMapper2, IID_IFilterMapper2, fm );
if( SUCCEEDED(hr) )
{
if(bRegister)
{
IMoniker *pMoniker = 0;
REGFILTER2 rf2;
rf2.dwVersion = 1;
rf2.dwMerit = MERIT_DO_NOT_USE;
rf2.cPins = 2;
rf2.rgPins = AMSPinVCam;
hr = fm->RegisterFilter(CLSID_VirtualCam, FILTER_NAME, &pMoniker, &CLSID_VideoInputDeviceCategory, NULL, &rf2);
}
else
{
hr = fm->UnregisterFilter(&CLSID_VideoInputDeviceCategory, 0, CLSID_VirtualCam);
}
}
if(fm) fm->Release();
}
if( SUCCEEDED(hr) && !bRegister ) hr = AMovieSetupUnregisterServer( CLSID_VirtualCam );
CoFreeUnusedLibraries();
CoUninitialize();
return hr;
}
第二个问题:还有一个“Latency”选项卡,但是当我点击它时,GraphStudioNext会永久挂起 并且VS调试器(附加到该进程)什么也没说。什么代码控制这个标签?
解决了第一个问题:
HRESULT CVAudioStream::GetMediaType(int iPosition, CMediaType *pmt)
{
if (iPosition < 0) return E_INVALIDARG;
if (iPosition > 0) return VFW_S_NO_MORE_ITEMS;
WAVEFORMATEX *pwfex = (WAVEFORMATEX *)pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
setupPwfex(pwfex, pmt);
pmt->SetType(&MEDIATYPE_Audio);
pmt->SetFormatType(&FORMAT_WaveFormatEx);
pmt->SetTemporalCompression(FALSE);
pmt->SetSubtype(&MEDIASUBTYPE_PCM);
pmt->SetSampleSize(pwfex->nBlockAlign);
return S_OK;
}
答案 0 :(得分:2)
简短版本:Microsoft并不真正提供API来提供虚拟音频设备,以便应用程序很好地接受它,就像它是真正的音频捕获设备一样。
如果虚拟视频捕获过滤器通常由于历史原因而起作用,则音频不是这种情况。实现音频设备的内核级驱动程序是添加应用程序可识别的音频设备的方法。
延迟标签显示,因为您假装正在实施IAMBufferNegotiation
界面:
if (riid == _uuidof(IAMBufferNegotiation)) *ppv = (IAMBufferNegotiation*)this;
实现可能不正确,这会导致某些意外行为(冻结,崩溃等)。
在同一个过滤器上添加音频引脚是可能的,但如果您希望将流选为人工源,则可能不是最佳选择。它一般都有意义,但真正的设备几乎从不暴露这样的音频流。
长话短说,唯一可以利用这样的音频流的应用是你自己开发的应用:没有众所周知的应用尝试在视频源滤波器上定位音频引脚。出于这个原因执行
<{1}},特别是IAMStreamConfig
这样的针脚是没用的。
您将无法在“音频捕获源”类别下注册过滤器,因为您注册了一个过滤器,此过滤器首先公开视频输出引脚,然后才会显示一些辅助音频。如果您通过DirectShow定位一个消耗音频的应用程序(由于超出此问题范围的原因,这已经非常罕见),您应该开发一个单独的源过滤器。您当然可以让两个过滤器在幕后互相交流以协同提供某些馈送,但就DirectShow而言,过滤器通常显示为独立的。
...真正的网络摄像头暴露了两种不同的过滤器,这就是为什么在像Skype这样的应用中我们必须在视频和音频设备下选择它们。
是否应该更好地创建两个完全不同的项目和过滤器:一个用于视频,一个用于音频?
真实和典型的相机:
因为&#34;真实&#34;物理相机通常提供内核级驱动程序,它们在DirectShow中的存在通过WDM Video Capture Filter进行,this question充当代理并枚举&#34; DirectShow包装器&#34;相同类别的摄像头驱动程序视频捕获源,您可以在其中注册虚拟摄像头。
也就是说,这种设计使您能够在可用设备列表中混合使用真实和虚拟摄像机,这是基于DirectShow的应用程序在视频捕获时使用的。这种方法有其局限性,我在前面已经描述过,例如在Applicability of Virtual DirectShow Sources中,引用了帖子WASAPI。
由于DirectShow的继任者媒体基金会一般没有良好的接待,此外Media Foundation既没有良好的向后兼容性也没有提供视频捕获可扩展性,包括微软自己在内的众多应用程序仍在使用视频通过DirectShow捕获。相反,那些研究Windows视频捕获API的人也经常对DirectShow感兴趣,而不是&#34;当前&#34; API,因为样本和相关信息的可用性,API可扩展性,应用程序集成选项。
然而,音频并非如此。在DirectShow开发停止时,DirectShow音频捕获并不是一流的。 Windows Vista为音频Directshow.net don't detect all mics in Windows 7引入了新的API,并且DirectShow没有收到与新API的相应连接,既不用于音频捕获也不用于回放。音频本身更简单,WASAPI功能强大且开发人员友好,因此开发人员开始切换到用于音频相关任务的新API。使用DirectShow进行音频捕获的应用程序要少得多,并且实现虚拟音频源很可能是一个错过:您的设备将保持&#34;隐形&#34;适用于通过WASAPI进行音频捕获的应用程序。即使应用程序有一个用于Windows XP的后备代码补丁来通过DirectShow进行音频捕获,在新的操作系统中也不会让你感到宽慰。
跟进读取StackOverflow上的音频:
此外,您不必为视频和音频过滤器设置单独的项目。您可以将它们混合在同一个项目中,它们可以是单独注册的独立过滤器。