我想记录用户网络摄像头和音频,并将其保存到服务器上的文件中。然后,这些文件可以提供给其他用户。
播放时没有问题,但是我在录制内容方面遇到了问题。
我的理解是还没有编写getUserMedia .record()
函数 - 到目前为止只提出了一个提案。
我想使用PeerConnectionAPI在我的服务器上创建对等连接。我知道这有点hacky,但我认为应该可以在服务器上创建一个peer并记录client-peer发送的内容。
如果可以,我应该能够将这些数据保存为flv或任何其他视频格式。
我的偏好实际上是录制网络摄像头+音频客户端,允许客户端在上传前不喜欢他们的第一次尝试时重新录制视频。这也将允许网络连接中断。我已经看到一些代码允许通过将数据发送到画布来记录网络摄像头中的各个“图像” - 这很酷,但我也需要音频。
这是我到目前为止的客户端代码:
<video autoplay></video>
<script language="javascript" type="text/javascript">
function onVideoFail(e) {
console.log('webcam fail!', e);
};
function hasGetUserMedia() {
// Note: Opera is unprefixed.
return !!(navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia);
}
if (hasGetUserMedia()) {
// Good to go!
} else {
alert('getUserMedia() is not supported in your browser');
}
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
var video = document.querySelector('video');
var streamRecorder;
var webcamstream;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true, video: true}, function(stream) {
video.src = window.URL.createObjectURL(stream);
webcamstream = stream;
// streamrecorder = webcamstream.record();
}, onVideoFail);
} else {
alert ('failed');
}
function startRecording() {
streamRecorder = webcamstream.record();
setTimeout(stopRecording, 10000);
}
function stopRecording() {
streamRecorder.getRecordedData(postVideoToServer);
}
function postVideoToServer(videoblob) {
/* var x = new XMLHttpRequest();
x.open('POST', 'uploadMessage');
x.send(videoblob);
*/
var data = {};
data.video = videoblob;
data.metadata = 'test metadata';
data.action = "upload_video";
jQuery.post("http://www.foundthru.co.uk/uploadvideo.php", data, onUploadSuccess);
}
function onUploadSuccess() {
alert ('video uploaded');
}
</script>
<div id="webcamcontrols">
<a class="recordbutton" href="javascript:startRecording();">RECORD</a>
</div>
答案 0 :(得分:44)
你一定要看看Kurento。它提供了一个WebRTC服务器基础结构,允许您从WebRTC源记录等等。您还可以找到您计划的应用程序here的一些示例。向该演示添加录制功能非常容易,并将媒体文件存储在URI(本地磁盘或任何地方)中。
该项目在 LGPL Apache 2.0
编辑1
从这篇文章开始,我们添加了一个新的教程,展示了如何在几个场景中添加录像机
免责声明:我是开发Kurento团队的一员。
答案 1 :(得分:15)
答案 2 :(得分:9)
我认为使用kurento或其他MCU仅用于录制视频会有点过分,特别是考虑到自v25以来Chrome已经从v47和Firefox获得了MediaRecorder API支持。所以在这个结点,你可能甚至不需要一个外部的js库来完成这项工作,尝试使用MediaRecorder记录视频/音频的演示:
Demo - 可以在chrome和firefox中工作(故意将blob推送到服务器代码)
如果运行firefox,你可以在这里测试它(chrome需要https
):
'use strict'
let log = console.log.bind(console),
id = val => document.getElementById(val),
ul = id('ul'),
gUMbtn = id('gUMbtn'),
start = id('start'),
stop = id('stop'),
stream,
recorder,
counter = 1,
chunks,
media;
gUMbtn.onclick = e => {
let mv = id('mediaVideo'),
mediaOptions = {
video: {
tag: 'video',
type: 'video/webm',
ext: '.mp4',
gUM: {
video: true,
audio: true
}
},
audio: {
tag: 'audio',
type: 'audio/ogg',
ext: '.ogg',
gUM: {
audio: true
}
}
};
media = mv.checked ? mediaOptions.video : mediaOptions.audio;
navigator.mediaDevices.getUserMedia(media.gUM).then(_stream => {
stream = _stream;
id('gUMArea').style.display = 'none';
id('btns').style.display = 'inherit';
start.removeAttribute('disabled');
recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => {
chunks.push(e.data);
if (recorder.state == 'inactive') makeLink();
};
log('got media successfully');
}).catch(log);
}
start.onclick = e => {
start.disabled = true;
stop.removeAttribute('disabled');
chunks = [];
recorder.start();
}
stop.onclick = e => {
stop.disabled = true;
recorder.stop();
start.removeAttribute('disabled');
}
function makeLink() {
let blob = new Blob(chunks, {
type: media.type
}),
url = URL.createObjectURL(blob),
li = document.createElement('li'),
mt = document.createElement(media.tag),
hf = document.createElement('a');
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter++}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.appendChild(li);
}
&#13;
button {
margin: 10px 5px;
}
li {
margin: 10px;
}
body {
width: 90%;
max-width: 960px;
margin: 0px auto;
}
#btns {
display: none;
}
h1 {
margin-bottom: 100px;
}
&#13;
<link type="text/css" rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css">
<h1> MediaRecorder API example</h1>
<p>For now it is supported only in Firefox(v25+) and Chrome(v47+)</p>
<div id='gUMArea'>
<div>
Record:
<input type="radio" name="media" value="video" checked id='mediaVideo'>Video
<input type="radio" name="media" value="audio">audio
</div>
<button class="btn btn-default" id='gUMbtn'>Request Stream</button>
</div>
<div id='btns'>
<button class="btn btn-default" id='start'>Start</button>
<button class="btn btn-default" id='stop'>Stop</button>
</div>
<div>
<ul class="list-unstyled" id='ul'></ul>
</div>
<script src="https://code.jquery.com/jquery-2.2.0.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js"></script>
&#13;
答案 3 :(得分:7)
是的,正如您所理解的,MediaStreamRecorder目前尚未实现。
MediaStreamRecorder是一个用于记录getUserMedia()流的WebRTC API。它允许Web应用程序通过实时音频/视频会话创建文件。
或者你可能会喜欢这个http://ericbidelman.tumblr.com/post/31486670538/creating-webm-video-from-getusermedia,但音频缺失部分。
答案 4 :(得分:4)
您可以使用RecordRTC-together,它基于RecordRTC。
它支持在单独的文件中一起录制视频和音频。您需要ffmpeg
之类的工具将两个文件合并到服务器上。
答案 5 :(得分:1)
Web Call Server 4可以将WebRTC音频和视频记录到WebM容器中。 使用用于音频的Vorbis编解码器和用于视频的VP8编解码器完成录制。 Iniitial WebRTC编解码器是Opus或G.711和VP8。因此,如果需要使用另一个容器,即AVI,服务器端记录需要Opus / G.711到Vorbis服务器端转码或VP8-H.264转码。
答案 6 :(得分:1)
查看Janus。这是一个录音演示:
https://janus.conf.meetecho.com/recordplaytest.html
与Kurento不同,在收购Twilio之后,其开发速度已经大幅放缓,Janus继续得到积极开发和支持。
答案 7 :(得分:0)
为了记录我还没有足够的知识,
但我在Git hub-
上找到了这个<!DOCTYPE html>
<html>
<head>
<title>XSockets.WebRTC Client example</title>
<meta charset="utf-8" />
<style>
body {
}
.localvideo {
position: absolute;
right: 10px;
top: 10px;
}
.localvideo video {
max-width: 240px;
width:100%;
margin-right:auto;
margin-left:auto;
border: 2px solid #333;
}
.remotevideos {
height:120px;
background:#dadada;
padding:10px;
}
.remotevideos video{
max-height:120px;
float:left;
}
</style>
</head>
<body>
<h1>XSockets.WebRTC Client example </h1>
<div class="localvideo">
<video autoplay></video>
</div>
<h2>Remote videos</h2>
<div class="remotevideos">
</div>
<h2>Recordings ( Click on your camera stream to start record)</h2>
<ul></ul>
<h2>Trace</h2>
<div id="immediate"></div>
<script src="XSockets.latest.js"></script>
<script src="adapter.js"></script>
<script src="bobBinder.js"></script>
<script src="xsocketWebRTC.js"></script>
<script>
var $ = function (selector, el) {
if (!el) el = document;
return el.querySelector(selector);
}
var trace = function (what, obj) {
var pre = document.createElement("pre");
pre.textContent = JSON.stringify(what) + " - " + JSON.stringify(obj || "");
$("#immediate").appendChild(pre);
};
var main = (function () {
var broker;
var rtc;
trace("Ready");
trace("Try connect the connectionBroker");
var ws = new XSockets.WebSocket("wss://rtcplaygrouund.azurewebsites.net:443", ["connectionbroker"], {
ctx: '23fbc61c-541a-4c0d-b46e-1a1f6473720a'
});
var onError = function (err) {
trace("error", arguments);
};
var recordMediaStream = function (stream) {
if ("MediaRecorder" in window === false) {
trace("Recorder not started MediaRecorder not available in this browser. ");
return;
}
var recorder = new XSockets.MediaRecorder(stream);
recorder.start();
trace("Recorder started.. ");
recorder.oncompleted = function (blob, blobUrl) {
trace("Recorder completed.. ");
var li = document.createElement("li");
var download = document.createElement("a");
download.textContent = new Date();
download.setAttribute("download", XSockets.Utils.randomString(8) + ".webm");
download.setAttribute("href", blobUrl);
li.appendChild(download);
$("ul").appendChild(li);
};
};
var addRemoteVideo = function (peerId, mediaStream) {
var remoteVideo = document.createElement("video");
remoteVideo.setAttribute("autoplay", "autoplay");
remoteVideo.setAttribute("rel", peerId);
attachMediaStream(remoteVideo, mediaStream);
$(".remotevideos").appendChild(remoteVideo);
};
var onConnectionLost = function (remotePeer) {
trace("onconnectionlost", arguments);
var peerId = remotePeer.PeerId;
var videoToRemove = $("video[rel='" + peerId + "']");
$(".remotevideos").removeChild(videoToRemove);
};
var oncConnectionCreated = function () {
console.log(arguments, rtc);
trace("oncconnectioncreated", arguments);
};
var onGetUerMedia = function (stream) {
trace("Successfully got some userMedia , hopefully a goat will appear..");
rtc.connectToContext(); // connect to the current context?
};
var onRemoteStream = function (remotePeer) {
addRemoteVideo(remotePeer.PeerId, remotePeer.stream);
trace("Opps, we got a remote stream. lets see if its a goat..");
};
var onLocalStream = function (mediaStream) {
trace("Got a localStream", mediaStream.id);
attachMediaStream($(".localvideo video "), mediaStream);
// if user click, video , call the recorder
$(".localvideo video ").addEventListener("click", function () {
recordMediaStream(rtc.getLocalStreams()[0]);
});
};
var onContextCreated = function (ctx) {
trace("RTC object created, and a context is created - ", ctx);
rtc.getUserMedia(rtc.userMediaConstraints.hd(false), onGetUerMedia, onError);
};
var onOpen = function () {
trace("Connected to the brokerController - 'connectionBroker'");
rtc = new XSockets.WebRTC(this);
rtc.onlocalstream = onLocalStream;
rtc.oncontextcreated = onContextCreated;
rtc.onconnectioncreated = oncConnectionCreated;
rtc.onconnectionlost = onConnectionLost;
rtc.onremotestream = onRemoteStream;
rtc.onanswer = function (event) {
};
rtc.onoffer = function (event) {
};
};
var onConnected = function () {
trace("connection to the 'broker' server is established");
trace("Try get the broker controller form server..");
broker = ws.controller("connectionbroker");
broker.onopen = onOpen;
};
ws.onconnected = onConnected;
});
document.addEventListener("DOMContentLoaded", main);
</script>
在我的案例代码中的第89行OnrecordComplete实际上附加了一个记录文件的链接,如果你点击该链接它将开始下载,你可以将该路径保存为服务器作为文件。
录制代码看起来像这样
recorder.oncompleted = function (blob, blobUrl) {
trace("Recorder completed.. ");
var li = document.createElement("li");
var download = document.createElement("a");
download.textContent = new Date();
download.setAttribute("download", XSockets.Utils.randomString(8) + ".webm");
download.setAttribute("href", blobUrl);
li.appendChild(download);
$("ul").appendChild(li);
};
blobUrl保存路径。我解决了这个问题,希望有人能找到这个有用的
答案 8 :(得分:-4)
从技术上讲,您可以在后端使用FFMPEG来混合视频和音频