如何使麦克风直接流到watson speechToText服务

时间:2018-09-12 03:12:18

标签: ibm-watson speech-to-text

我们想直接通过麦克风将waton语音发送到文本服务,但是看来我们必须先通过.wav吗?请查看以下代码,特别是我正尝试将麦克风直接流式传输到SpeechToText服务。我相信这是使用麦克风的最常见方法,而不是将其通过管道传递到.wav,然后将.wav文件流式传输到stt:

var mic;
 var SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
 var fs = require('fs');
 var watson = require('watson-developer-cloud');
 var cp = require('child_process');
  mic = cp.spawn('arecord', ['--device=plughw:1,0', '--format=S16_LE', '--rate=44100', '--channels=1']); //, '--duration=10'
  mic.stderr.pipe(process.stderr);
  stt();

  function stt() {
     console.log("openCMDS");
         var speech_to_text = new SpeechToTextV1({
         username: '',
         password: ''
         });
     var params = {
     content_type: 'audio/wav',
     model: 'zh-CN_BroadbandModel',
     continuous: true,
     inactivity_timeout: -1
     };
     recognizeStream = speech_to_text.createRecognizeStream(params);
         mic.stdout.pipe(recognizeStream);
         //mic.stdout.pipe(require('fs').createWriteStream('test.wav')); 

         // Pipe in the audio.
         fs.createReadStream('test.wav').pipe(recognizeStream);
         recognizeStream.pipe(fs.createWriteStream('transcription.txt'));
     recognizeStream.setEncoding('utf8'); 
     console.log("start record");
        recognizeStream.on('data', function(event) { onEvent('Data:', event); });
        recognizeStream.on('error', function(event) { onEvent('Error:', event); });
        recognizeStream.on('close', function(event) { onEvent('Close:', event); });
 // Display events on the console.
 function onEvent(name, event) {
     console.log(name, JSON.stringify(event, null, 2));
      }

 }

1 个答案:

答案 0 :(得分:1)

语音转文本服务需要知道您要发送的音频格式。我看到的问题中有99%是因为该服务期望的音频格式与用户所使用的不同。

'--format=S16_LE', '--rate=44100', '--channels=1'

这看起来像是44.1kHz的PCM格式。

在您的代码中,您指定:

content_type: 'audio/wav'

看看supported audio formats

也许尝试使用audio/l16; rate=44100;。您还可以录制其他格式的音频。

最后,看看javascript-speech-sdk。我们有一些示例,说明了如何从浏览器流式传输麦克风。

更新

const mic = require('mic');
const SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
const speechToText = new SpeechToTextV1({
  username: 'YOUR USERNAME',
  password: 'YOUR PASSWORD',
  url: 'YOUR SERVICE URL',
  version: 'v1'
});

// 1. Microphone settings
const micInstance = mic({
  rate: 44100,
  channels: 2,
  debug: false,
  exitOnSilence: 6
});



// 2. Service recognize settings
const recognizeStream = speechToText.createRecognizeStream({
  content_type: 'audio/l16; rate=44100; channels=2',
  model: 'zh-CN_BroadbandModel',
  interim_results: true,
})

// 3. Start recording
const micInputStream = micInstance.getAudioStream();
micInstance.start();

console.log('Watson is listening, you may speak now.');

// 4. Pipe audio to service
const textStream = micInputStream.pipe(recognizeStream).setEncoding('utf8');

textStream.on('data', user_speech_text => console.log('Watson hears:', user_speech_text));
textStream.on('error', e => console.log(`error: ${e}`));
textStream.on('close', e => console.log(`close: ${e}`));