是否可以使用c#创建Windows服务,该服务将在后台运行并识别用户的语音并根据Alexa的命令执行某些操作? 我创建了一个可以识别用户语音的应用程序,但无法通过Windows服务产生相同的结果。 当我运行服务时,事件查看器显示服务已成功启动,但是当我说任何命令服务时,它什么也没做。那就是它不在函数recEngine_SpeechRecognized()中。我检查了语法是否已成功加载,并且该服务也能够为任何文本合成语音,但是它无法识别任何语音,不确定在这里我在做什么错。
请在下面找到我尝试创建的Windows服务的代码。
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.ServiceProcess;
using System.Text;
using System.Threading.Tasks;
using System.Timers;
using System.Speech.Recognition;
using System.Speech.Synthesis;
namespace WindowsService1
{
public partial class Service1 : ServiceBase
{
Timer timer = new Timer(); // name space(using System.Timers;)
SpeechRecognitionEngine recEngine = new SpeechRecognitionEngine();
SpeechSynthesizer synthesizer = new SpeechSynthesizer();
public Service1()
{
InitializeComponent();
}
protected override void OnStart(string[] args)
{
WriteToFile("Service is started at " + DateTime.Now);
SpeechRecog();
timer.Elapsed += new ElapsedEventHandler(OnElapsedTime);
timer.Interval = 5000; //number in milisecinds
timer.Enabled = true;
}
protected override void OnStop()
{
recEngine.RecognizeAsyncStop();
WriteToFile("Service is stopped at " + DateTime.Now);
}
private void OnElapsedTime(object source, ElapsedEventArgs e)
{
WriteToFile("Service is recall at " + DateTime.Now);
}
public void SpeechRecog()
{
WriteToFile("Inside Speech recog " + DateTime.Now);
Choices commands = new Choices();
commands.Add(new string[] { "say hello", "print my name" });
GrammarBuilder gBuilder = new GrammarBuilder();
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
recEngine.LoadGrammarAsync(grammar);
recEngine.LoadGrammarCompleted += LoadGrammarCompleted;
recEngine.SetInputToDefaultAudioDevice();
recEngine.SpeechRecognized += recEngine_SpeechRecognized;
recEngine.RecognizeAsync(RecognizeMode.Multiple);
//recEngine.EmulateRecognize("say hello");
}
private void LoadGrammarCompleted(object sender, LoadGrammarCompletedEventArgs e)
{
WriteToFile("Grammar loaded " + DateTime.Now);
}
private void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
WriteToFile("Speech recognized " + e.Result.Text + " " + DateTime.Now);
switch (e.Result.Text)
{
case "say hello": synthesizer.SpeakAsync("Hello Sayam");
WriteToFile("Hello sayam");
break;
case "say my name":
synthesizer.SpeakAsync("Sayam");
break;
}
}
public void WriteToFile(string Message)
{
string path = AppDomain.CurrentDomain.BaseDirectory + "\\Logs";
if (!Directory.Exists(path))
{
Directory.CreateDirectory(path);
}
string filepath = AppDomain.CurrentDomain.BaseDirectory + "\\Logs\\ServiceLog_" + DateTime.Now.Date.ToShortDateString().Replace('/', '_') + ".txt";
if (!File.Exists(filepath))
{
// Create a file to write to.
using (StreamWriter sw = File.CreateText(filepath))
{
sw.WriteLine(Message);
}
}
else
{
using (StreamWriter sw = File.AppendText(filepath))
{
sw.WriteLine(Message);
}
}
}
}
}