我正在编写一个具有WebView的应用程序,它通过WebRTC处理语音呼叫。麦克风工作正常,因为我已授予WebView权限。
webView.setWebChromeClient(new WebChromeClient() {
@Override
public void onPermissionRequest(final PermissionRequest request) {
request.grant(request.getResources());
}
后来我决定添加SpeechRecognizer,以便我能够在WebRTC调用期间识别出我在说什么。我尝试在同一个活动中进行语音识别工作,后来我在一个单独的服务中完成它,但不幸的是,它不允许同时工作。麦克风被WebView占用,而且SpeechRecognizer没有任何声音(RMS一直是-2.12)。或者,如果我在通过WebView拨打电话之前尝试运行服务,那么我打电话给的人根本听不到我的声音(SpeechRecognizer占用了麦克风,WebView也没有得到任何东西)。 如果存在,我希望找到任何解决方案。我不是iOS开发人员,但我已经听过,它可以在iPhone上使用,所以这是一个惊喜,这在Android设备上是不可能的。 我的语音识别服务代码 public class RecognitionService extends Service实现RecognitionListener {
private String LOG_TAG = "RecognitionService";
private SpeechRecognizer speech = null;
private Intent recognizerIntent;
public RecognitionService() {
}
@Override
public IBinder onBind(Intent intent) {
// TODO: Return the communication channel to the service.
startRecognition();
return null;
}
@Override
public void onCreate() {
Log.i("Test", "RecognitionService: onCreate");
startRecognition();
}
private void startRecognition() {
speech = SpeechRecognizer.createSpeechRecognizer(this);
speech.setRecognitionListener(this);
recognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
recognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE,
"ru-RU");
recognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
getPackageName());
recognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH);
recognizerIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 3);
speech.startListening(recognizerIntent);
}
@Override
public void onBeginningOfSpeech() {
Log.i(LOG_TAG, "onBeginningOfSpeech");
}
@Override
public void onBufferReceived(byte[] buffer) {
Log.i(LOG_TAG, "onBufferReceived: " + buffer);
}
@Override
public void onEndOfSpeech() {
Log.i(LOG_TAG, "onEndOfSpeech");
}
@Override
public void onError(int errorCode) {
String errorMessage = getErrorText(errorCode);
Log.d(LOG_TAG, "FAILED " + errorMessage);
speech.destroy();
startRecognition();
}
@Override
public void onEvent(int arg0, Bundle arg1) {
Log.i(LOG_TAG, "onEvent");
}
@Override
public void onPartialResults(Bundle arg0) {
Log.i(LOG_TAG, "onPartialResults");
}
@Override
public void onReadyForSpeech(Bundle arg0) {
Log.i(LOG_TAG, "onReadyForSpeech");
}
@Override
public void onResults(Bundle results) {
Log.i(LOG_TAG, "onResults");
ArrayList<String> matches = results
.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
String text = "";
for (String result : matches)
text += result + "\n";
Toast.makeText(getApplicationContext(),text,Toast.LENGTH_SHORT).show();
speech.destroy();
startRecognition();
}
public static String getErrorText(int errorCode) {
String message;
switch (errorCode) {
case SpeechRecognizer.ERROR_AUDIO:
message = "Audio recording error";
break;
case SpeechRecognizer.ERROR_CLIENT:
message = "Client side error";
break;
case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
message = "Insufficient permissions";
break;
case SpeechRecognizer.ERROR_NETWORK:
message = "Network error";
break;
case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
message = "Network timeout";
break;
case SpeechRecognizer.ERROR_NO_MATCH:
message = "No match";
break;
case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
message = "RecognitionService busy";
break;
case SpeechRecognizer.ERROR_SERVER:
message = "error from server";
break;
case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
message = "No speech input";
break;
default:
message = "Didn't understand, please try again.";
break;
}
return message;
}
@Override
public void onRmsChanged(float rmsdB) {
Log.i(LOG_TAG, "onRmsChanged: " + rmsdB);
}
}