我尝试使用此代码使用SRGS语法进行连续识别..但我在UWP APP中有错误:“访问已拒绝”例外。
我需要制作一个没有听写语法的方法,因为我想在互联网上使用我的应用程序..
在WP8上,您可以使用listGrammar中的“Choices”..但这里最复杂.. SOmebody可以提供帮助吗?
bool permissionGained = await CheckMicrophonePermission();
if (permissionGained)
{
var languagenum = new Windows.Globalization.Language("es-ES");
string langTag = languagenum.LanguageTag;
SpeechRecognizer recognizernum = new SpeechRecognizer(languagenum);
texto = " ..... ,, Indique digito a digito el número telefonico";
SpeechSynthesisStream streamnum = await sin.SynthesizeTextToStreamAsync(texto);
await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, new DispatchedHandler(
() => { mediaElementrec.AutoPlay = true; mediaElementrec.SetSource(streamnum, streamnum.ContentType); mediaElementrec.Play(); ; ; }));
this.speechRecognizer = new SpeechRecognizer(languagenum);
string fileName = String.Format("SRGS\\\\Grammar.xml", languagenum);
StorageFile grammarContentFile = await Package.Current.InstalledLocation.GetFileAsync(fileName);
try
{
speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();
if (result.Status != SpeechRecognitionResultStatus.Success)
{
sin.Voice = (SpeechSynthesizer.AllVoices.First(x => x.Gender == VoiceGender.Male));
texto = "No se puede compilar el Archivo Grammar";
SpeechSynthesisStream streamrec = await sin.SynthesizeTextToStreamAsync(texto);
await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, new DispatchedHandler(
() => { mediaElementrec.AutoPlay = true; mediaElementrec.SetSource(streamrec, streamrec.ContentType); mediaElementrec.Play(); ; ; }));
}
else
{
SpeechRecognitionGrammarFileConstraint grammarConstraint = new SpeechRecognitionGrammarFileConstraint(grammarContentFile);
speechRecognizer.Constraints.Add(grammarConstraint);
SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();
await speechRecognizer.ContinuousRecognitionSession.StartAsync();
speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
{
var messageDialog = new Windows.UI.Popups.MessageDialog("Cant Compile Grammar");
await messageDialog.ShowAsync();
}
else
{
await speechRecognizer.ContinuousRecognitionSession.StartAsync();
speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2);
speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
}
}
}
catch (Exception ex)
{
if ((uint)ex.HResult == HResultRecognizerNotFound)
{
var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
await messageDialog.ShowAsync();
}
else
{
var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
await messageDialog.ShowAsync();
}
if ((uint)ex.HResult == HResultRecognizerNotFound)
{
var messageDialog = new Windows.UI.Popups.MessageDialog("Language not installed ");
await messageDialog.ShowAsync();
}
else
{
var messageDialog = new Windows.UI.Popups.MessageDialog(ex.Message, "Exception");
await messageDialog.ShowAsync();
}
}
}
else
{
var messageDialog = new Windows.UI.Popups.MessageDialog("Dont have Permission Mic");
await messageDialog.ShowAsync();
}
//**/
break;