我们已经构建了一个Web应用程序。该应用程序的核心是在网络上安排会议。因此,用户A(会议协调员)将安排会议/会话,而所有其他参与者B,C,D等将加入会议/会话。因此,我已经使用Twilio群组视频通话来实现它。
我有以下用例。 我们要进行用户A(会议协调员)语音的音高转换。因此,所有其他参与者都将在组视频中接收变调的声音。我们已经分析了Twilio中的AWS Polly,但它与我们的用例不匹配。
所以请咨询一下Twilio中是否有任何服务可以实现这种情况。
(要么)
是否可以打断Twilio的群呼并将音调改变的声音传递给其他参与者?
使用的示例代码
initAudio();
function initAudio() {
analyser1 = audioContext.createAnalyser();
analyser1.fftSize = 1024;
analyser2 = audioContext.createAnalyser();
analyser2.fftSize = 1024;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!navigator.getUserMedia)
return(alert("Error: getUserMedia not supported!"));
navigator.getUserMedia({ audio: true }, function(stream){
gotStream(stream);
}, function(){ console.log('Error getting Microphone stream'); });
if ((typeof MediaStreamTrack === 'undefined')||(!MediaStreamTrack.getSources)){
console.log("This browser does not support MediaStreamTrack, so doesn't support selecting sources.\n\nTry Chrome Canary.");
} else {
MediaStreamTrack.getSources(gotSources);
}
}
function gotStream (stream) {
audioInput = audioContext.createMediaStreamSource(stream);
outputMix = audioContext.createGain();
dryGain = audioContext.createGain();
wetGain = audioContext.createGain();
effectInput = audioContext.createGain();
audioInput.connect(dryGain);
audioInput.connect(effectInput);
dryGain.connect(outputMix);
wetGain.connect(outputMix);
audioOutput = audioContext.createMediaStreamDestination();
outputMix.connect(audioOutput);
outputMix.connect(analyser2);
crossfade(1.0);
changeEffect();
}
function crossfade (value) {
var gain1 = Math.cos(value * 0.5 * Math.PI);
var gain2 = Math.cos((1.0 - value) * 0.5 * Math.PI);
dryGain.gain.value = gain1;
wetGain.gain.value = gain2;
}
function createPitchShifter () {
effect = new Jungle( audioContext );
effect.output.connect( wetGain );
effect.setPitchOffset(1);
return effect.input;
}
function changeEffect () {
if (currentEffectNode)
currentEffectNode.disconnect();
if (effectInput)
effectInput.disconnect();
var effect = 'pitch';
switch (effect) {
case 'pitch':
currentEffectNode = createPitchShifter();
break;
}
audioInput.connect(currentEffectNode);
}
在将Localaudiotrack添加到房间时遇到错误
var mediaStream = new Twilio.Video.LocalAudioTrack(audioOutput.stream);
room.localParticipant.publishTrack(mediaStream, {
name: 'adminaudio'
});
错误: 未捕获(承诺)TypeError:无法在MediaStream上执行addTrack:参数1的类型不是MediaStreamTrack。
答案 0 :(得分:0)
这里是Twilio开发人员的传播者。
Twilio本身没有音调会改变声音。
如果要在浏览器中进行构建,则可以使用Web Audio API从用户的麦克风获取输入并进行音高转换,然后将结果音频流而不是原始麦克风流提供给Video API。
答案 1 :(得分:0)
以上答案中的评论很有帮助!我已经研究了几个星期,发布到 Twilio-video.js 无济于事,最后恰到好处的措辞在 S.O 上提出了这个!
但总结并添加我发现的工作,因为很难遵循所有 27 个问题/评论/代码摘录:
连接到 Twilio 时:
const room = await Video.connect(twilioToken, {
name: roomName,
tracks: localTracks,
audio: false, // if you don't want to hear the normal voice at all, you can hide this and add the shifted track upon participant connections
video: true,
logLevel: "debug",
}).then((room) => {
return room;
});
在新的(远程)参与者连接上:
const stream = new MediaStream([audioTrack.mediaStreamTrack]);
const audioContext = new AudioContext();
const audioInput = audioContext.createMediaStreamSource(stream);
source.disconnect(audioOutput);
console.log("using PitchShift.js");
var pitchShift = PitchShift(audioContext);
if (isFinite(pitchVal)) {
pitchShift.transpose = pitchVal;
console.log("gain is " + pitchVal);
}
pitchShift.wet.value = 1;
pitchShift.dry.value = 0.5;
try {
audioOutput.stream.getAudioTracks()[0]?.applyConstraints({
echoCancellation: true,
noiseSuppression: true,
});
} catch (e) {
console.log("tried to constrain audio track " + e);
}
var biquadFilter = audioContext.createBiquadFilter();
// Create a compressor node
var compressor = audioContext.createDynamicsCompressor();
compressor.threshold.setValueAtTime(-50, audioContext.currentTime);
compressor.knee.setValueAtTime(40, audioContext.currentTime);
compressor.ratio.setValueAtTime(12, audioContext.currentTime);
compressor.attack.setValueAtTime(0, audioContext.currentTime);
compressor.release.setValueAtTime(0.25, audioContext.currentTime);
//biquadFilter.type = "lowpass";
if (isFinite(freqVal)) {
biquadFilter.frequency.value = freqVal;
console.log("gain is " + freqVal);
}
if (isFinite(gainVal)) {
biquadFilter.gain.value = gainVal;
console.log("gain is " + gainVal);
}
source.connect(compressor);
compressor.connect(biquadFilter);
biquadFilter.connect(pitchShift);
pitchShift.connect(audioOutput);
const localAudioWarpedTracks = new Video.LocalAudioTrack(audioOutput.stream.getAudioTracks()[0]);
const audioElement2 = document.createElement("audio");
document.getElementById("audio_div").appendChild(audioElement2);
localAudioWarpedTracks.attach();