我目前正在尝试在观看者授予网络摄像头许可后3秒钟显示警报。警报也应该与它们有音频,因为它们应该同时被触发。
网络摄像头具有面部跟踪功能,因此有很多额外的代码。
以前,我问了这个问题,这里得到了解答; why are these alerts not appearing?。
虽然我不得不在其文件夹中移动一些文件,但是要收集特定的音频文件。这对我来说是愚蠢的,因为现在这并没有产生与我之前的问题所回答的结果相同的结果。
由于之前的回答是将delayedAlert()附加到window.onload,现在正在使网络摄像头视频不显示。
有什么建议解决这个问题吗?我不知道javascript,所以请视觉效果比声明更有帮助。
这是我正在使用的当前代码:
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>eve_</title>
<link rel="stylesheet" href="assets/demo.css">
<link rel="icon" rel="preload" href="../images/evecircle.png" />
<script src="../build/tracking-min.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="https://code.responsivevoice.org/responsivevoice.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<style>
video, canvas {
margin-left: 70px;
margin-top: 50px;
position: absolute;
}
</style>
<script>
var timeoutID;
function delayedAlert() {
timeoutID = window.setTimeout(slowAlert, 3000);
}
function slowAlert() {
var audio= document.getElementsByTagName('audio')[0];
const audio2 = document.getElementsByTagName('audio')[1];
var audio3 = document.getElementsByTagName('audio')[2];
var audio4 = document.getElementsByTagName('audio')[3];
audio.play();
var myvar1;alert('Oh, there is more of you.');
audio2.play();
var myvar1;alert('Why are there multiple yous?');
audio3.play();
const name = prompt('What is your name?')
const sentence = 'Hello,' + name +'.I am Eve. ....'+name+name+name+'.'+name+'is human...Ive never seen a. human....What does' +name +' look like?';
responsiveVoice.speak(sentence, "US English Female", {
rate: 0.7}
</script>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var tracker = new tracking.ObjectTracker('face');
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track('#video', tracker, { camera: true });
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data.forEach(function(rect) {
context.strokeStyle = '#a64ceb';
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
});
});
var gui = new dat.GUI();
gui.add(tracker, 'edgesDensity', 0.1, 0.5).step(0.01);
gui.add(tracker, 'initialScale', 1.0, 10.0).step(0.1);
gui.add(tracker, 'stepSize', 1, 5).step(0.1);
};
</script>
</head>
<body>
<div class="demo-container">
<video id="video" width="920" height="540" preload autoplay loop muted></video>
<canvas id="canvas" width="920" height="540"></canvas>
</div>
</div>
<audio>
<source src="../audio/multiple_yous.wav" type="audio/wav" preload=true>
</audio>
<audio>
<source src="../audio/is_something_here.wav" type="audio/wav" preload=true>
</audio>
<audio>
<source src="../audio/oh_something_is_here.wav" type="audio/wav" preload=true>
</audio>
</body>
</html>