我正在开发一个MVC ASP.Net 4 HTML5的项目(默认浏览器是google-chrome v29.0.1547.57)我可以与这些工具互动并拍摄照片,但只能使用前置摄像头,我如何可以启用后置摄像头吗? 平板电脑的特点:三星Galaxy Tab 2 我希望你能帮助我
答案 0 :(得分:47)
查看https://simpl.info/getusermedia/sources/,了解如何使用
选择来源MediaStreamTrack.getSources(gotSources);
然后,您可以选择源并将其作为可选项传入getUserMedia
var constraints = {
audio: {
optional: [{sourceId: audioSource}]
},
video: {
optional: [{sourceId: videoSource}]
}
};
navigator.getUserMedia(constraints, successCallback, errorCallback);
现在完全可以在稳定的Chrome和移动版中使用(截至第30版)
答案 1 :(得分:24)
可以在https://webrtc.github.io/samples/src/content/devices/input-output/找到演示。这样可以访问前置和后置摄像头。
您会发现许多演示依赖于已弃用的功能:
MediaStreamTrack.getSources()
从Chrome 45和FireFox 39开始,您需要使用以下功能:
MediaDevices.enumerateDevices()
示例:
if (!navigator.mediaDevices || !navigator.mediaDevices.enumerateDevices) {
console.log("enumerateDevices() not supported.");
return;
}
// List cameras and microphones.
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
devices.forEach(function(device) {
console.log(device.kind + ": " + device.label +
" id = " + device.deviceId);
});
})
.catch(function(e) {
console.log(e.name + ": " + e.message);
});

可在此处找到更多文档:https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/enumerateDevices
答案 2 :(得分:7)
在我的Samsung S8上的Chrome中,我可以使用“facingMode”=“environment”从“后置摄像头”拍摄视频。默认似乎是“用户”(“前置”摄像头)
TypeScript中的:
const video = document.getElementById("video");
const constraints = {
advanced: [{
facingMode: "environment"
}]
};
navigator.mediaDevices
.getUserMedia({
video: constraints
})
.then((stream) => {
video.src = window.URL.createObjectURL(stream);
video.play();
});
答案 3 :(得分:0)
//----------------------------------------------------------------------
// Here we list all media devices, in order to choose between
// the front and the back camera.
// videoDevices[0] : Front Camera
// videoDevices[1] : Back Camera
// I used an array to save the devices ID
// which i get using devices.forEach()
// Then set the video resolution.
//----------------------------------------------------------------------
navigator.mediaDevices.enumerateDevices()
.then(devices => {
var videoDevices = [0,0];
var videoDeviceIndex = 0;
devices.forEach(function(device) {
console.log(device.kind + ": " + device.label +
" id = " + device.deviceId);
if (device.kind == "videoinput") {
videoDevices[videoDeviceIndex++] = device.deviceId;
}
});
var constraints = {width: { min: 1024, ideal: 1280, max: 1920 },
height: { min: 776, ideal: 720, max: 1080 },
deviceId: { exact: videoDevices[1] }
};
return navigator.mediaDevices.getUserMedia({ video: constraints });
})
.then(stream => {
if (window.webkitURL) {
video.src = window.webkitURL.createObjectURL(stream);
localMediaStream = stream;
} else if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
} else if (video.srcObject !== undefined) {
video.srcObject = stream;
} else {
video.src = stream;
}})
.catch(e => console.error(e));
答案 4 :(得分:0)
上次我开发了这个代码,soo这里是我使用的版本:你直接在你的代码中直接调用了相同的功能,你可以使用哪个相机"用户","环境"或者"计算机"'如果你在电脑上跑步的话)
`//----------------------------------------------------------------------
// whichCamera(Type)
// For smartphone or tablet :
// Start the type={user,environment} camera.
// For computer it's simple :
// type = "computer".
//----------------------------------------------------------------------
var streamSrc, cameraType;
function whichCamera(type){
var cameraFacing;
cameraType = type;
if( type == "user")
cameraFacing = 0;
else if( type == "environment")
cameraFacing = 1;
else if( type == "computer"){
cameraFacing = 2;
}
console.log(type+" index : "+cameraFacing);
// Here we list all media devices, in order to choose between
// the front and the rear camera.
// videoDevices[0] : user Camera
// videoDevices[1] : environment Camera
// Then set the video resolution.
navigator.mediaDevices.enumerateDevices()
.then(devices => {
var videoDevices, videoDeviceIndex, constraints;
// Initialize the array wich will contain all video resources IDs.
// Most of devices have two video resources (Front & Rear Camera).
videoDevices = [0,0];
// Simple index to browse the videa resources array (videoDevices).
videoDeviceIndex = 0;
// devices.forEach(), this function will detect all media resources (Audio, Video) of the device
// where we run the application.
devices.forEach(function(device) {
console.log(device.kind + ": " + device.label +
" id = " + device.deviceId);
// If the kind of the media resource is video,
if (device.kind == "videoinput") {
// then we save it on the array videoDevices.
videoDevices[videoDeviceIndex++] = device.deviceId;
console.log(device.deviceId+" = "+videoDevices[videoDeviceIndex-1]);
}
});
console.log("Camera facing ="+cameraFacing+" ID = "+videoDevices[videoDeviceIndex-1]);
// Here we specified which camera we start,
// videoDevices[0] : Front Camera
// videoDevices[1] : Back Camera
if( cameraFacing != "computer"){
constraints = { deviceId: { exact: videoDevices[cameraFacing] }};
return navigator.mediaDevices.getUserMedia({ video:
constraints,
width: { min: 1280, ideal: 1600, max: 1920 },
height: { min: 720, ideal: 1200, max: 1080 }
}
);
}else
return navigator.mediaDevices.getUserMedia({ video: true });
})
// Then we retrieve the link to the video stream.
.then(stream => {
if (window.webkitURL) {
video.src = window.webkitURL.createObjectURL(stream);
localMediaStream = stream;
console.log(localMediaStream +" = "+ stream)
} else if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
console.log(video.mozSrcObject +" = "+ stream)
} else if (video.srcObject !== undefined) {
video.srcObject = stream;
console.log(video.srcObject +" = "+ stream)
} else {
video.src = stream;
console.log(video.src +" = "+ stream)
}
streamSrc = stream;
})
.catch(e => console.error(e));
}