将麦克风录音机从浏览器发送到Google语音到文本。不需要流和套接字,也不需要通过Node.js到Google服务器的HTTP请求,也不需要客户端(浏览器)端的HTTP请求。
我面临的问题:
完成了客户端实现,以及服务器端实现。两种实现都彼此独立地工作。我正在从麦克风获取音频数据,并且能够播放它,并且能够使用Google提供的audio.raw示例测试服务器端实现。
但是,当我尝试将麦克风数据从浏览器发送到我的节点服务器然后再发送到Google服务器时,出现了编码问题:“从Google服务器获取空响应”。
我的问题是如何更改音频文件的编码,然后使用Javascript将其发送到Google Speech to Text服务器。
答案 0 :(得分:3)
我一直在玩,并且可以使用Google API和浏览器音频录制文字。我想知道config对象是否可能是您遇到问题的原因。
我使用的组件是Node.js服务器:server.js和一个简单的客户端(index.html和client-app.js)。全部在同一个文件夹中。
我为此使用https://pets.netlify.com/,因此您需要添加Google API密钥文件(APIKey.json)来提供凭据。
如果运行节点服务器,则将浏览器指向Google Speech to Text Client Library,这将使您能够测试代码。
我也使用Matt Diamond的Recorder.js代码从http://localhost:3000/中提取了很多客户端代码。
server.js
const express = require('express');
const multer = require('multer');
const fs = require('fs');
const upload = multer();
const app = express();
const port = 3000;
app.use(express.static('./'));
async function testGoogleTextToSpeech(audioBuffer) {
const speech = require('@google-cloud/speech');
const client = new speech.SpeechClient( { keyFilename: "APIKey.json"});
const audio = {
content: audioBuffer.toString('base64'),
};
const config = {
languageCode: 'en-US',
};
const request = {
audio: audio,
config: config,
};
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
return transcription;
}
app.post('/upload_sound', upload.any(), async (req, res) => {
console.log("Getting text transcription..");
let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
console.log("Text transcription: " + transcription);
res.status(200).send(transcription);
});
app.listen(port, () => {
console.log(`Express server listening on port: ${port}...`);
});
index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Speech to text test</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" type="text/css" href="https://bootswatch.com/4/cerulean/bootstrap.min.css">
</head>
<body style="padding:50px;">
<h1>Speech to text test</h1>
<div id="controls">
<button id="recordButton">Record</button>
<button id="transcribeButton" disabled>Get transcription</button>
</div>
<div id="output"></div>
<script src="https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js"></script>
<script src="client-app.js"></script>
</body>
</html>
client-app.js
let rec = null;
let audioStream = null;
const recordButton = document.getElementById("recordButton");
const transcribeButton = document.getElementById("transcribeButton");
recordButton.addEventListener("click", startRecording);
transcribeButton.addEventListener("click", transcribeText);
function startRecording() {
let constraints = { audio: true, video:false }
recordButton.disabled = true;
transcribeButton.disabled = false;
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
const audioContext = new window.AudioContext();
audioStream = stream;
const input = audioContext.createMediaStreamSource(stream);
rec = new Recorder(input, { numChannels:1 })
rec.record()
}).catch(function(err) {
recordButton.disabled = false;
transcribeButton.disabled = true;
});
}
function transcribeText() {
transcribeButton.disabled = true;
recordButton.disabled = false;
rec.stop();
audioStream.getAudioTracks()[0].stop();
rec.exportWAV(uploadSoundData);
}
function uploadSoundData(blob) {
let filename = new Date().toISOString();
let xhr = new XMLHttpRequest();
xhr.onload = function(e) {
if(this.readyState === 4) {
document.getElementById("output").innerHTML = `<br><br><strong>Result: </strong>${e.target.responseText}`
}
};
let formData = new FormData();
formData.append("audio_data", blob, filename);
xhr.open("POST", "/upload_sound", true);
xhr.send(formData);
}
答案 1 :(得分:1)
@ terry-lennox非常感谢。对于明确的答案。
但是我将React用作前端,所以得到了一个名为recorder-js的npm软件包
该代码可供以后查看此帖子的人参考。
import Recorder from 'recorder-js';
import micGrey from './mic-grey.svg';
import micWhite from './mic-white.svg';
import './App.css';
var recorder = null;
var audioStream = null;
class App extends Component {
constructor(props) {
super(props);
this.mic = React.createRef();
this.accessMic = this.accessMic.bind(this);
this.handleClick = this.handleClick.bind(this);
this.handleClick = this.handleClick.bind(this);
this.handleSuccess = this.handleSuccess.bind(this);
this.stopAccessingMic = this.stopAccessingMic.bind(this);
this.getTextFromGoogle = this.getTextFromGoogle.bind(this);
this.state = {
isMicActive: false
};
}
accessMic() {
const audioContext = new (window.AudioContext ||
window.webkitAudioContext)();
recorder = new Recorder(audioContext);
navigator.mediaDevices
.getUserMedia({ audio: true })
.then(this.handleSuccess)
.catch(err => console.log('Uh oh... unable to get stream...', err));
}
handleSuccess(stream) {
audioStream = stream;
recorder.init(stream);
recorder.start();
}
getTextFromGoogle(blob) {
let filename = new Date().toISOString();
let xhr = new XMLHttpRequest();
xhr.onload = function(e) {
if (this.readyState === 4) {
console.log(e.target.responseText);
}
};
let formData = new FormData();
formData.append('audio_data', blob, filename);
xhr.open('POST', 'http://localhost:3000/', true);
xhr.send(formData);
}
handleClick() {
const isMicActive = this.state.isMicActive;
this.setState({
isMicActive: !isMicActive
});
if (!isMicActive) {
this.checkPermissions();
this.accessMic();
} else {
this.stopAccessingMic();
}
}
stopAccessingMic() {
audioStream && audioStream.getTracks()[0].stop();
recorder.stop().then(({ blob, buffer }) => {
this.getTextFromGoogle(blob);
});
}
checkPermissions() {
navigator.permissions
.query({ name: 'microphone' })
.then(permissionObj => {
console.log('Permission status - ', permissionObj.state);
})
.catch(error => {
console.log('Permission status - Got error :', error);
});
}
render() {
return (
<div className='App'>
<div
id='mic'
ref={this.mic}
onClick={this.handleClick}
className={
this.state.isMicActive ? 'mic-btn mic-btn-active' : 'mic-btn'
}
>
<img src={this.state.isMicActive ? micWhite : micGrey} alt='mic' />
</div>
</div>
);
}
}
export default App;
参考的后端代码,我面临一个小的变化,错误是必须使用单声道(单声道)音频来解决此问题,我提到了Link ,Link。需要在配置中添加audioChannelCount: 2
。
var router = express.Router();
const multer = require('multer');
const fs = require('fs');
const upload = multer();
process.env.GOOGLE_APPLICATION_CREDENTIALS =
'C:/Users/user/Desktop/Speech-to-Text-e851cb3889e5.json';
/* GET home page. */
router.post('/', upload.any(), async (req, res, next) => {
console.log('Getting text transcription..');
try {
let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
console.log('Text transcription: ' + transcription);
res.status(200).send(transcription);
} catch (error) {
console.log(error);
res.status(400).send(error);
}
});
async function testGoogleTextToSpeech(audioBuffer) {
const speech = require('@google-cloud/speech');
const client = new speech.SpeechClient();
const audio = {
content: audioBuffer.toString('base64')
};
const config = {
languageCode: 'en-US',
audioChannelCount: 2
};
const request = {
audio: audio,
config: config
};
try {
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
return transcription;
} catch (error) {
return error;
}
}
module.exports = router;