这是我尝试使用网络上的样本。如标题中所述,在startListening之后,不会触发识别侦听器事件。 需要的是当onEndOfSpeech()时,我想播放音频文件。这是为了调试目的。请调试我完成任务。
package com.example.speech;
import android.os.Bundle;
import android.app.Activity;
import android.view.*;
import android.widget.*;
import android.speech.*;
import android.util.Log;
import android.content.Intent;
public class MainActivity extends Activity {
private SpeechRecognizer sr;
private static final String TAG = "MyStt3Activity";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
sr = SpeechRecognizer.createSpeechRecognizer(this);
sr.setRecognitionListener(new listener());
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
public void Click(View view) {
TextView txt = (TextView) findViewById(R.id.txtState);
Button btn=(Button) findViewById(R.id.btn);
txt.setText("Stopped Listening");
btn.setText("Start");
}
public void btn_Click(View view) {
TextView txt = (TextView) findViewById(R.id.txtState);
Button btn=(Button) findViewById(R.id.btn);
if(btn.getText()=="Stop"){
txt.setText("Stopped Listening");
btn.setText("Start");
sr.stopListening();
}
else {
txt.setText("Listening Now");
btn.setText("Stop");
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,"voice.recognition.test");
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,5);
sr.startListening(intent);
}
}
class listener implements RecognitionListener
{
public void onReadyForSpeech(Bundle params)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onReadyForSpeech");
Log.d(TAG, "onReadyForSpeech");
}
public void onBeginningOfSpeech()
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onBeginningOfSpeech");
Log.d(TAG, "onBeginningOfSpeech");
}
public void onRmsChanged(float rmsdB)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onRmsChanged");
Log.d(TAG, "onRmsChanged");
}
public void onBufferReceived(byte[] buffer)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onBufferReceived");
Log.d(TAG, "onBufferReceived");
}
public void onEndOfSpeech()
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onEndofSpeech");
Log.d(TAG, "onEndofSpeech");
}
public void onError(int error)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onError");
Log.d(TAG, "error " + error);
//mText.setText("error " + error);
}
public void onResults(Bundle results)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onResults");
//String str = new String();
Log.d(TAG, "onResults " + results);
//ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
//for (int i = 0; i < data.size(); i++)
//{
// Log.d(TAG, "result " + data.get(i));
// str += data.get(i);
//}
//mText.setText("results: "+String.valueOf(data.size()));
}
public void onPartialResults(Bundle partialResults)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onPartialResults");
Log.d(TAG, "onPartialResults");
}
public void onEvent(int eventType, Bundle params)
{
//TextView state = (TextView) findViewById(R.id.state);
//state.setText("onEvent ");
Log.d(TAG, "onEvent " + eventType);
}
}
}