我正在尝试使用UWP的AudioGraph API来重现合成语音和短通知声音(“earcons”)的混合。
UWP有一个语音合成API,它给我一个包含WAV文件的流,但我不想对参数(比特率,样本深度等)做太多假设,所以我的想法是只要有一些语音要重现,就AudioSubmixNode
并添加AudioFrameInputNode
s。排队单独的话语有一些复杂性,以便它们不会重叠。
图表初始化为
private async Task InitAudioGraph()
{
var graphCreated = await AudioGraph.CreateAsync(new AudioGraphSettings(Windows.Media.Render.AudioRenderCategory.Speech)
{
QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency
});
if (graphCreated.Status != AudioGraphCreationStatus.Success) return;
_Graph = graphCreated.Graph;
var outputCreated = await _Graph.CreateDeviceOutputNodeAsync();
if (outputCreated.Status != AudioDeviceNodeCreationStatus.Success) return;
_Mixer = _Graph.CreateSubmixNode();
_Mixer.AddOutgoingConnection(outputCreated.DeviceOutputNode);
_Graph.Start();
}
然后用
播放当前的话语class SpeechStreamPlayer : IDisposable
{
internal static void Play(AudioGraph graph, AudioSubmixNode mixer, SpeechSynthesisStream speechStream)
{
if (!speechStream.ContentType.Equals("audio/wav", StringComparison.OrdinalIgnoreCase)) throw new NotSupportedException("Content type: " + speechStream.ContentType);
var stream = speechStream.AsStreamForRead();
// Read the RIFF header
uint chunkId = stream.ReadUint(); // "RIFF" - but in little-endian
if (chunkId != 0x46464952) throw new NotSupportedException("Magic: " + chunkId);
uint chunkSize = stream.ReadUint(); // Length of rest of stream
uint format = stream.ReadUint(); // "WAVE"
if (format != 0x45564157) throw new NotSupportedException("Stream format: " + format);
// "fmt " sub-chunk
uint subchunkId = stream.ReadUint();
if (subchunkId != 0x20746d66) throw new NotSupportedException("Expected fmt sub-chunk, found " + subchunkId);
uint subchunkSize = stream.ReadUint();
uint subchunk2Off = (uint)stream.Position + subchunkSize;
uint audioFormat = (uint)stream.ReadShort();
uint chans = (uint)stream.ReadShort();
uint sampleRate = stream.ReadUint();
uint byteRate = stream.ReadUint();
uint blockSize = (uint)stream.ReadShort();
uint bitsPerSample = (uint)stream.ReadShort();
// Possibly extra stuff added, so...
stream.Seek(subchunk2Off, SeekOrigin.Begin);
subchunkId = stream.ReadUint(); // "data"
if (subchunkId != 0x61746164) throw new NotSupportedException("Expected data sub-chunk, found " + subchunkId);
subchunkSize = stream.ReadUint();
// Ok, the stream is in the correct place to start extracting data and we have the parameters.
var props = AudioEncodingProperties.CreatePcm(sampleRate, chans, bitsPerSample);
var frameInputNode = graph.CreateFrameInputNode(props);
frameInputNode.AddOutgoingConnection(mixer);
new SpeechStreamPlayer(frameInputNode, mixer, stream, blockSize);
}
internal event EventHandler StreamFinished;
private SpeechStreamPlayer(AudioFrameInputNode frameInputNode, AudioSubmixNode mixer, Stream stream, uint sampleSize)
{
_FrameInputNode = frameInputNode;
_Mixer = mixer;
_Stream = stream;
_SampleSize = sampleSize;
_FrameInputNode.QuantumStarted += Source_QuantumStarted;
_FrameInputNode.Start();
}
private AudioFrameInputNode _FrameInputNode;
private AudioSubmixNode _Mixer;
private Stream _Stream;
private readonly uint _SampleSize;
private unsafe void Source_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
{
if (args.RequiredSamples <= 0) return;
System.Diagnostics.Debug.WriteLine("Requested {0} samples", args.RequiredSamples);
var frame = new AudioFrame((uint)args.RequiredSamples * _SampleSize);
using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
{
using (var reference = buffer.CreateReference())
{
byte* pBuffer;
uint capacityBytes;
var directBuffer = reference as IMemoryBufferByteAccess;
((IMemoryBufferByteAccess)reference).GetBuffer(out pBuffer, out capacityBytes);
uint bytesRemaining = (uint)_Stream.Length - (uint)_Stream.Position;
uint bytesToCopy = Math.Min(capacityBytes, bytesRemaining);
for (uint i = 0; i < bytesToCopy; i++) pBuffer[i] = (byte)_Stream.ReadByte();
for (uint i = bytesToCopy; i < capacityBytes; i++) pBuffer[i] = 0;
if (bytesRemaining <= capacityBytes)
{
Dispose();
StreamFinished?.Invoke(this, EventArgs.Empty);
}
}
}
sender.AddFrame(frame);
}
public void Dispose()
{
if (_FrameInputNode != null)
{
_FrameInputNode.QuantumStarted -= Source_QuantumStarted;
_FrameInputNode.Dispose();
_FrameInputNode = null;
}
if (_Stream != null)
{
_Stream.Dispose();
_Stream = null;
}
}
}
这个工作一次。当第一个话语结束时,StreamFinished?.Invoke(this, EventArgs.Empty);
通知队列管理系统应该播放下一个话语,并且该行
var frameInputNode = graph.CreateFrameInputNode(props);
抛出Exception
消息Exception from HRESULT: 0x88960001
。有点挖掘表明it corresponds to XAUDIO2_E_INVALID_CALL,但这不是很具描述性。
在这两种情况下,传递给AudioEncodingProperties.CreatePcm
的参数都是(22050, 1, 16)
。
我怎样才能找到有关出错的更多细节?在最糟糕的情况下,我想我可以抛弃整个图形并每次构建一个新图形,但这似乎效率很低。
答案 0 :(得分:0)
问题似乎在
当第一个话语结束时,
StreamFinished?.Invoke(this, EventArgs.Empty);
通知队列管理系统应该播放下一个话语
虽然AudioFrameInputNode.QuantumStarted的文档没有说明有关禁止的操作,但AudioGraph.QuantumStarted的文档说
QuantumStarted事件是同步的,这意味着您无法更新此事件处理程序中AudioGraph或各个音频节点的属性或状态。尝试执行诸如停止音频图形或添加,删除或启动单个音频节点等操作将导致抛出异常。
这似乎也适用于节点的QuantumStarted
事件。
简单的解决方案是使用
将图形操作移动到另一个线程 Task.Run(() => StreamFinished?.Invoke(this, EventArgs.Empty));