我在uwp项目中停留了很长时间。我从Microsoft复制了样本,但结果不正确
if (isListening == false) {
// The recognizer can only start listening in a continuous fashion if the recognizer is currently idle.
// This prevents an exception from occurring.
if (speechRecognizer.State == SpeechRecognizerState.Idle) {
isListening = true;
await speechRecognizer.ContinuousRecognitionSession.StartAsync();
isListening = false;
}
} else {
isListening = false;
if (speechRecognizer.State != SpeechRecognizerState.Idle) {
// Cancelling recognition prevents any currently recognized speech from
// generating a ResultGenerated event. StopAsync() will allow the final session to
// complete.
await speechRecognizer.ContinuousRecognitionSession.StopAsync();
}
}
break;
}
private async void SpeechRecognizer_HypothesisGenerated(
SpeechRecognizer sender,
SpeechRecognitionHypothesisGeneratedEventArgs args) {
string hypothesis = args.Hypothesis.Text;
string textboxContent = dictateBuilder.ToString() + " " + hypothesis + " ...";
await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
richEbitBox.Document.SetText(TextSetOptions.None, textboxContent);
});
}
private async void ContinuousRecognitionSession_ResultGenerated(
SpeechContinuousRecognitionSession sender,
SpeechContinuousRecognitionResultGeneratedEventArgs args) {
if (args.Result.Confidence == SpeechRecognitionConfidence.Medium ||
args.Result.Confidence == SpeechRecognitionConfidence.High) {
dictateBuilder.Append(args.Result.Text + " ");
await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => {
richEbitBox.Document.SetText(TextSetOptions.None, dictateBuilder.ToString());
});
}
}
private void ContinuousRecognitionSession_Completed(
SpeechContinuousRecognitionSession sender,
SpeechContinuousRecognitionCompletedEventArgs args) {
}
我试图打开听写功能,当我按下按钮时再次禁用它,一切都中断了,我收到一条错误消息,告诉我该错误与给定的文本无关
答案 0 :(得分:0)
按钮触发有争议的听写
看起来是线程问题,我将所有SpeechRecognizer
初始化步骤都移到了OnNavigatedTo
方法中,并且没有错误。
protected async override void OnNavigatedTo(NavigationEventArgs e)
{
base.OnNavigatedTo(e);
bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
if (permissionGained)
{
dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;
var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
speechRecognizer.Constraints.Add(dictationConstraint);
SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();
if (result.Status != SpeechRecognitionResultStatus.Success)
{
return;
}
speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
}
else
{
}
}
我已经上传了案例演示here,请检查。