我有一个项目,使用RecognizerIntent.ACTION_RECOGNIZE_SPEECH
UI是1个textview和1个命令按钮,分别显示语音识别结果和开始语音识别
我还使用此处显示的方法在AMR文件中录制语音识别器的输入:
record/save audio from voice recognition intent
这是可能的,因为语音识别器的输入音频在语音识别器活动返回的InputStream
中以Intent data
形式提供
但是,我注意到当语音识别器活动无法识别语音(与错误识别不同)时,仍会调用onActivityResult(...)
函数,但它不会返回Intent data
,从而导致NullPointerException在尝试读取AMR文件时。当语音识别器无法识别语音时,如何获得语音识别器的输入?
代码:
import android.content.ContentResolver;
import android.content.Intent;
import android.net.Uri;
import android.os.Environment;
import android.speech.RecognizerIntent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.TextView;
import android.widget.Toast;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Locale;
public class MainActivity extends AppCompatActivity {
Button btnSpeak;
TextView txtViewResult;
private static final int VOICE_RECOGNITION = 1;
String saveFileLoc = Environment.getExternalStorageDirectory().getPath();
File fileAmrFile = new File(saveFileLoc+"/recordedSpeech.amr");
OutputStream outputStream = null;
InputStream filestream = null;
Uri audioUri = null;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
btnSpeak = (Button)findViewById(R.id.button1);
txtViewResult = (TextView)findViewById(R.id.textView1);
if(fileAmrFile.exists() ){
fileAmrFile.delete();
}
runprog();
} // protected void onCreate(Bundle savedInstanceState) { CLOSED
private void runprog(){
btnSpeak.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
// Toast.makeText(getApplicationContext(), "Button pressed", Toast.LENGTH_LONG).show();
// Fire an intent to start the speech recognition activity.
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
// Specify free form input
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
intent.putExtra(RecognizerIntent.EXTRA_PROMPT,"Please start speaking");
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 1);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.ENGLISH);
// secret parameters that when added provide audio url in the result
intent.putExtra("android.speech.extra.GET_AUDIO_FORMAT", "audio/AMR");
intent.putExtra("android.speech.extra.GET_AUDIO", true);
startActivityForResult(intent, VOICE_RECOGNITION);
}
});
} // private void runprog(){ CLOSED
// handle result of speech recognition
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
// if (requestCode == VOICE_RECOGNITION && resultCode == RESULT_OK) {
if (requestCode == VOICE_RECOGNITION ) {
ArrayList<String> results;
results = data.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
txtViewResult.setText(results.get(0));
Log.v("MYLOG", "Speech 2 text");
// the required audio will be returned by getExtras:
Bundle bundle = data.getExtras();
ArrayList<String> matches = bundle.getStringArrayList(RecognizerIntent.EXTRA_RESULTS);
// Toast.makeText(getApplicationContext(), "HEREHERE", Toast.LENGTH_LONG).show();
Log.v("MYLOG", "B4 Uri creation");
// /*
// the recording url is in getData:
audioUri = data.getData();
ContentResolver contentResolver = getContentResolver();
try {
filestream = contentResolver.openInputStream(audioUri);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
// TODO: read audio file from inputstream
try {
outputStream = new FileOutputStream(new File(saveFileLoc + "/recordedSpeech.wav"));
} catch (FileNotFoundException e) {
e.printStackTrace();
}
try {
int read = 0;
byte[] bytes = new byte[1024];
while ((read = filestream.read(bytes)) != -1) {
outputStream.write(bytes, 0, read);
}
// System.out.println("Done!");
Toast.makeText(getApplicationContext(), "Done", Toast.LENGTH_LONG).show();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (filestream != null) {
try {
filestream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (outputStream != null) {
try {
// outputStream.flush();
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
// */
}
} // public void onActivityResult(int requestCode, int resultCode, Intent data) { CLOSED
} // protected void onCreate(Bundle savedInstanceState) { CLOSED
layout.xml:
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical" >
<TextView
android:id="@+id/textView1"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="@string/spoken" />
<Button
android:id="@+id/button1"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignLeft="@+id/textView1"
android:layout_below="@+id/textView1"
android:text="@string/button_text"
/>
</LinearLayout>
作为参考,这就是我说the speech recogniser fails to recognise speech
时的意思(请注意,这与语音识别器错误检测时的情况不同;如果检测错误,仍然会识别语音)
电话详情:
型号:三星Galaxy S4
型号:SHV-E330L
Android版本:5.01