我正在尝试使用新的[IBM Watson SDK for Unity]来获取用户的语音输入,将其解析为文本,然后将该文本传递回watson的文本到语音,以便它可以重复用户所说的内容。
我已经使用了SDK的Speech To Text widget [从麦克风获取音频剪辑并解析为文本],并结合它的Speech display widget [从文本输入文本到文本并在画布上显示它场景]在屏幕上显示用户的语音。然后,我创建了一个UI输入字段,使用SpeechDisplaywidget的文本输出以及texttoSpeechWidget的输入对其进行了分析 - 然后创建了一个UI按钮,当单击该按钮时,将文本发送到文本到语音服务。它应该播放相关的文本到语音文件,但是当我单击按钮时,没有任何反应。 'status'显示只是读取“ready”并且永远不会改变,整个事情不会引发任何错误。我会粘贴代码,但我还没有写任何代码,只是连接了所有必要的序列化字段并点击了go。我该怎么办?
对于模糊不清而感到抱歉,但由于这是我的第一个问题,我无法发布更多图片或链接。 :/
以下是我正在尝试使用的“texttoSpeechWidget”的代码。
using UnityEngine;
using UnityEngine.UI;
using IBM.Watson.DeveloperCloud.Services.TextToSpeech.v1;
using IBM.Watson.DeveloperCloud.Logging;
using IBM.Watson.DeveloperCloud.DataTypes;
using System.Collections.Generic;
using IBM.Watson.DeveloperCloud.Utilities;
#pragma warning disable 414
namespace IBM.Watson.DeveloperCloud.Widgets
{
/// <summary>
/// TextToSpeech widget class wraps the TextToSpeech serivce.
/// </summary>
[RequireComponent(typeof(AudioSource))]
public class TextToSpeechWidget : Widget
{
#region Inputs
[SerializeField]
private Input m_TextInput = new Input("Text", typeof(TextToSpeechData),
"OnTextInput");
[SerializeField]
private Input m_VoiceInput = new Input("Voice", typeof(VoiceData),
"OnVoiceSelect");
#endregion
#region Outputs
[SerializeField]
private Output m_Speaking = new Output(typeof(SpeakingStateData), true);
[SerializeField]
private Output m_DisableMic = new Output(typeof(DisableMicData));
[SerializeField]
private Output m_LevelOut = new Output(typeof(LevelData));
#endregion
#region Private Data
TextToSpeech m_TextToSpeech = new TextToSpeech();
[SerializeField, Tooltip("How often to send level out data in seconds.")]
private float m_LevelOutInterval = 0.05f;
[SerializeField]
private float m_LevelOutputModifier = 1.0f;
[SerializeField]
private Button m_TextToSpeechButton = null;
[SerializeField]
private InputField m_Input = null;
[SerializeField]
private Text m_StatusText = null;
[SerializeField]
private VoiceType m_Voice = VoiceType.en_US_Michael;
[SerializeField]
private bool m_UsePost = false;
private AudioSource m_Source = null;
private int m_LastPlayPos = 0;
private class Speech
{
~Speech()
{
if (Clip != null)
UnityObjectUtil.DestroyUnityObject(Clip);
}
public bool Ready { get; set; }
public AudioClip Clip { get; set; }
public Speech(TextToSpeech textToSpeech, string text, bool usePost)
{
textToSpeech.ToSpeech(text, OnAudioClip, usePost);
}
private void OnAudioClip(AudioClip clip)
{
Clip = clip;
Ready = true;
}
};
private Queue<Speech> m_SpeechQueue = new Queue<Speech>();
private Speech m_ActiveSpeech = null;
#endregion
#region Public Memebers
/// <summary>
/// Gets or sets the voice. Default voice is English, US - Michael
/// </summary>
/// <value>The voice.</value>
public VoiceType Voice
{
get
{
return m_Voice;
}
set
{
m_Voice = value;
}
}
#endregion
#region Event Handlers
/// <summary>
/// Button event handler.
/// </summary>
public void OnTextToSpeech()
{
if (m_TextToSpeech.Voice != m_Voice)
m_TextToSpeech.Voice = m_Voice;
if (m_Input != null)
m_SpeechQueue.Enqueue(new Speech(m_TextToSpeech, m_Input.text, m_UsePost));
if (m_StatusText != null)
m_StatusText.text = "THINKING";
if (m_TextToSpeechButton != null)
m_TextToSpeechButton.interactable = false;
}
#endregion
#region Private Functions
private void OnTextInput(Data data)
{
TextToSpeechData text = data as TextToSpeechData;
if (text == null)
throw new WatsonException("Wrong data type received.");
if (!string.IsNullOrEmpty(text.Text))
{
if (m_TextToSpeech.Voice != m_Voice)
m_TextToSpeech.Voice = m_Voice;
m_SpeechQueue.Enqueue(new Speech(m_TextToSpeech, text.Text, m_UsePost));
}
}
private void OnVoiceSelect(Data data)
{
VoiceData voice = data as VoiceData;
if (voice == null)
throw new WatsonException("Unexpected data type");
m_Voice = voice.Voice;
}
private void OnEnable()
{
UnityObjectUtil.StartDestroyQueue();
if (m_StatusText != null)
m_StatusText.text = "READY";
}
/// <exclude />
protected override void Start()
{
base.Start();
m_Source = GetComponent<AudioSource>();
}
private void Update()
{
if (m_Source != null && !m_Source.isPlaying
&& m_SpeechQueue.Count > 0
&& m_SpeechQueue.Peek().Ready)
{
CancelInvoke("OnEndSpeech");
m_ActiveSpeech = m_SpeechQueue.Dequeue();
if (m_ActiveSpeech.Clip != null)
{
if (m_Speaking.IsConnected)
m_Speaking.SendData(new SpeakingStateData(true));
if (m_DisableMic.IsConnected)
m_DisableMic.SendData(new DisableMicData(true));
m_Source.spatialBlend = 0.0f; // 2D sound
m_Source.loop = false; // do not loop
m_Source.clip = m_ActiveSpeech.Clip; // clip
m_Source.Play();
Invoke("OnEndSpeech", ((float)m_ActiveSpeech.Clip.samples / (float)m_ActiveSpeech.Clip.frequency) + 0.1f);
if (m_LevelOut.IsConnected)
{
m_LastPlayPos = 0;
InvokeRepeating("OnLevelOut", m_LevelOutInterval, m_LevelOutInterval);
}
}
else
{
Log.Warning("TextToSpeechWidget", "Skipping null AudioClip");
}
}
if (m_TextToSpeechButton != null)
m_TextToSpeechButton.interactable = true;
if (m_StatusText != null)
m_StatusText.text = "READY";
}
private void OnLevelOut()
{
if (m_Source != null && m_Source.isPlaying)
{
int currentPos = m_Source.timeSamples;
if (currentPos > m_LastPlayPos)
{
float[] samples = new float[currentPos - m_LastPlayPos];
m_Source.clip.GetData(samples, m_LastPlayPos);
m_LevelOut.SendData(new LevelData(Mathf.Max(samples) * m_LevelOutputModifier, m_LevelOutputModifier));
m_LastPlayPos = currentPos;
}
}
else
CancelInvoke("OnLevelOut");
}
private void OnEndSpeech()
{
if (m_Speaking.IsConnected)
m_Speaking.SendData(new SpeakingStateData(false));
if (m_DisableMic.IsConnected)
m_DisableMic.SendData(new DisableMicData(false));
if (m_Source.isPlaying)
m_Source.Stop();
m_ActiveSpeech = null;
}
/// <exclude />
protected override string GetName()
{
return "TextToSpeech";
}
#endregion
}
}
答案 0 :(得分:3)
首先,您需要编写一些代码来实现它。
将输入设置为SpeechToText,将输出设置为TextToSpeech
[SerializeField]
private Input m_SpeechInput = new Input("Text", typeof(SpeechToTextData), "OnSpeech");
[SerializeField]
private Output m_SpeechOutput = new Output(typeof(TextToSpeechData), true);
然后你应该在你的小部件中添加OnSpeech功能,通过创建一个新的TextToSpeechData对象类型并使用输入SpeechToTextData的文本(如果它是最终的)来处理从SpeechToTextData到TextToSpeechData的对话。
在OnSpeech中使用您的输出;
if (m_SpeechOutput.IsConnected)
m_SpeechOutput.SendData( /* object of type SpeechToTextData*/ );
确保您在场景中存在SpeechToTextWidget和TextToSpeechWidget,并且它们都连接到此新窗口小部件。