我已尝试在自己的计算机上运行Javascript sample code taken from jsfiddle并发现我没有从尝试连接到摄像头的探测器获得回调。
我看到第一条消息,单击了开始按钮,但没有收到有关允许或拒绝网络摄像头访问的第二条消息。此代码与用于访问我的网络摄像头的jsfiddle几乎相同。当我在localhost上运行时,似乎探测器永远不会调用onWebcamConnectSuccess / Failure的回调
我没有在控制台上看到任何错误,而是在带有Web服务器(Tomcat)的localhost上运行。
这是我的HTML文件,它加载了affdex sdk和我的脚本代码js / testaffectiva.js
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
<script src = "https://code.jquery.com/jquery.js" type="text/javascript"> </script>
<link href = "css/bootstrap.min.css" rel="stylesheet" type="text/css">
<script src = "js/bootstrap.min.js" type="text/javascript"> </script>
<script src="https://download.affectiva.com/js/3.1/affdex.js" type="text/javascript"> </script>
<script src = "js/testaffectiva.js" type="text/javascript"> </script>
</head>
<body>
<div class="container-fluid">
<div class="row">
<div class="col-md-8" id="affdex_elements" style="width:680px;height:480px;"></div>
<div class="col-md-4">
<div style="height:25em;">
<strong>EMOTION TRACKING RESULTS</strong>
<div id="results" style="word-wrap:break-word;"></div>
</div>
<div>
<strong>DETECTOR LOG MSGS</strong>
</div>
<div id="logs"></div>
</div>
</div>
<div>
<button id="start" onclick="onStart()">Start</button>
<button id="stop" onclick="onStop()">Stop</button>
<button id="reset" onclick="onReset()">Reset</button>
<h3>Affectiva JS SDK CameraDetector to track different emotions.</h3>
<p>
<strong>Instructions</strong>
</br>
Press the start button to start the detector.
<br/> When a face is detected, the probabilities of the different emotions are written to the DOM.
<br/> Press the stop button to end the detector.
</p>
</div>
</div>
</body>
</html>
JS / testaffectiva.js
/**
* Created by david on 10/6/2016.
*/
// SDK Needs to create video and canvas nodes in the DOM in order to function
// Here we are adding those nodes a predefined div.
var divRoot = $("#affdex_elements")[0];
var width = 640;
var height = 480;
var faceMode = affdex.FaceDetectorMode.LARGE_FACES;
//Construct a CameraDetector and specify the image width / height and face detector mode.
var detector = new affdex.CameraDetector(divRoot, width, height, faceMode);
//Enable detection of all Expressions, Emotions and Emojis classifiers.
detector.detectAllEmotions();
detector.detectAllExpressions();
detector.detectAllEmojis();
detector.detectAllAppearance();
//Add a callback to notify when the detector is initialized and ready for runing.
detector.addEventListener("onInitializeSuccess", function() {
log('#logs', "The detector reports initialized");
//Display canvas instead of video feed because we want to draw the feature points on it
$("#face_video_canvas").css("display", "block");
$("#face_video").css("display", "none");
});
function log(node_name, msg) {
$(node_name).append("<span>" + msg + "</span><br />")
}
//function executes when Start button is pushed.
function onStart() {
if (detector && !detector.isRunning) {
$("#logs").html("");
detector.start();
}
log('#logs', "Clicked the start button");
}
//function executes when the Stop button is pushed.
function onStop() {
log('#logs', "Clicked the stop button");
if (detector && detector.isRunning) {
detector.removeEventListener();
detector.stop();
}
};
//function executes when the Reset button is pushed.
function onReset() {
log('#logs', "Clicked the reset button");
if (detector && detector.isRunning) {
detector.reset();
$('#results').html("");
}
};
//Add a callback to notify when camera access is allowed
detector.addEventListener("onWebcamConnectSuccess", function() {
log('#logs', "Webcam access allowed");
});
//Add a callback to notify when camera access is denied
detector.addEventListener("onWebcamConnectFailure", function() {
log('#logs', "webcam denied");
console.log("Webcam access denied");
});
//Add a callback to notify when detector is stopped
detector.addEventListener("onStopSuccess", function() {
log('#logs', "The detector reports stopped");
$("#results").html("");
});
//Add a callback to receive the results from processing an image.
//The faces object contains the list of the faces detected in an image.
//Faces object contains probabilities for all the different expressions, emotions and appearance metrics
detector.addEventListener("onImageResultsSuccess", function(faces, image, timestamp) {
$('#results').html("");
log('#results', "Timestamp: " + timestamp.toFixed(2));
log('#results', "Number of faces found: " + faces.length);
if (faces.length > 0) {
log('#results', "Appearance: " + JSON.stringify(faces[0].appearance));
log('#results', "Emotions: " + JSON.stringify(faces[0].emotions, function(key, val) {
return val.toFixed ? Number(val.toFixed(0)) : val;
}));
log('#results', "Expressions: " + JSON.stringify(faces[0].expressions, function(key, val) {
return val.toFixed ? Number(val.toFixed(0)) : val;
}));
log('#results', "Emoji: " + faces[0].emojis.dominantEmoji);
drawFeaturePoints(image, faces[0].featurePoints);
}
});
//Draw the detected facial feature points on the image
function drawFeaturePoints(img, featurePoints) {
var contxt = $('#face_video_canvas')[0].getContext('2d');
var hRatio = contxt.canvas.width / img.width;
var vRatio = contxt.canvas.height / img.height;
var ratio = Math.min(hRatio, vRatio);
contxt.strokeStyle = "#FFFFFF";
for (var id in featurePoints) {
contxt.beginPath();
contxt.arc(featurePoints[id].x,
featurePoints[id].y, 2, 0, 2 * Math.PI);
contxt.stroke();
}
}
答案 0 :(得分:2)
问题是$("#affdex_elements")[0]
返回undefined
,因此CameraDetector
无法在DOM中添加必要的元素以使其工作(画布,视频)并静默中断。
所以这一行
var divRoot = $("#affdex_elements")[0];
需要在加载DOM内容后执行:
var detector = null;
$(document).ready(function(){
// SDK Needs to create video and canvas nodes in the DOM in order to function
// Here we are adding those nodes a predefined div.
var divRoot = $("#affdex_elements")[0];
var width = 640;
var height = 480;
var faceMode = affdex.FaceDetectorMode.LARGE_FACES;
//Construct a CameraDetector and specify the image width / height and face detector mode.
detector = new affdex.CameraDetector(divRoot, width, height, faceMode);
//Enable detection of all Expressions, Emotions and Emojis classifiers.
detector.detectAllEmotions();
detector.detectAllExpressions();
detector.detectAllEmojis();
detector.detectAllAppearance();
//Add a callback to notify when the detector is initialized and ready for runing.
detector.addEventListener("onInitializeSuccess", function() {
log('#logs', "The detector reports initialized");
//Display canvas instead of video feed because we want to draw the feature points on it
$("#face_video_canvas").css("display", "block");
$("#face_video").css("display", "none");
});
//Add a callback to notify when camera access is allowed
detector.addEventListener("onWebcamConnectSuccess", function() {
log('#logs', "Webcam access allowed");
});
//Add a callback to notify when camera access is denied
detector.addEventListener("onWebcamConnectFailure", function() {
log('#logs', "webcam denied");
console.log("Webcam access denied");
});
//Add a callback to notify when detector is stopped
detector.addEventListener("onStopSuccess", function() {
log('#logs', "The detector reports stopped");
$("#results").html("");
});
//Add a callback to receive the results from processing an image.
//The faces object contains the list of the faces detected in an image.
//Faces object contains probabilities for all the different expressions, emotions and appearance metrics
detector.addEventListener("onImageResultsSuccess", function(faces, image, timestamp) {
$('#results').html("");
log('#results', "Timestamp: " + timestamp.toFixed(2));
log('#results', "Number of faces found: " + faces.length);
if (faces.length > 0) {
log('#results', "Appearance: " + JSON.stringify(faces[0].appearance));
log('#results', "Emotions: " + JSON.stringify(faces[0].emotions, function(key, val) {
return val.toFixed ? Number(val.toFixed(0)) : val;
}));
log('#results', "Expressions: " + JSON.stringify(faces[0].expressions, function(key, val) {
return val.toFixed ? Number(val.toFixed(0)) : val;
}));
log('#results', "Emoji: " + faces[0].emojis.dominantEmoji);
drawFeaturePoints(image, faces[0].featurePoints);
}
});
//Draw the detected facial feature points on the image
function drawFeaturePoints(img, featurePoints) {
var contxt = $('#face_video_canvas')[0].getContext('2d');
var hRatio = contxt.canvas.width / img.width;
var vRatio = contxt.canvas.height / img.height;
var ratio = Math.min(hRatio, vRatio);
contxt.strokeStyle = "#FFFFFF";
for (var id in featurePoints) {
contxt.beginPath();
contxt.arc(featurePoints[id].x,
featurePoints[id].y, 2, 0, 2 * Math.PI);
contxt.stroke();
}
}
});
function log(node_name, msg) {
$(node_name).append("<span>" + msg + "</span><br />")
}
//function executes when Start button is pushed.
function onStart() {
if (detector && !detector.isRunning) {
$("#logs").html("");
detector.start();
}
log('#logs', "Clicked the start button");
}
//function executes when the Stop button is pushed.
function onStop() {
log('#logs', "Clicked the stop button");
if (detector && detector.isRunning) {
detector.removeEventListener();
detector.stop();
}
};
//function executes when the Reset button is pushed.
function onReset() {
log('#logs', "Clicked the reset button");
if (detector && detector.isRunning) {
detector.reset();
$('#results').html("");
}
};
答案 1 :(得分:0)
我在使用Chrome浏览器的Windows 7计算机上运行此功能,并且在控制台或网络标签中看不到错误。