所以,我的模型为我提供了测试图像相当准确的结果
import cv2
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
import matplotlib.pyplot as plt
face_haar_cascade = cv2.CascadeClassifier('/content/gdrive/My Drive/New FEC Facial Expression/haarcascade_frontalface_default.xml')
from IPython.display import Image
try:
filename = '/content/gdrive/My Drive/photo-1533227268428-f9ed0900fb3b.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_haar_cascade.detectMultiScale(gray, 1.3,6)
print('faces', faces)
for(x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
plt.grid(None)
plt.xticks([])
plt.yticks([])
imgplot = plt.imshow(img)
# Show the image which was just taken.
# display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
import cv2
import sys
imagePath ='/content/gdrive/My Drive/photo-1533227268428-f9ed0900fb3b.jpg'
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier('/content/gdrive/My Drive/New FEC Facial Expression/haarcascade_frontalface_default.xml')
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=3,
minSize=(30, 30)
)
print("[INFO] Found {0} Faces.".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_color = image[y:y + h, x:x + w]
print("[INFO] Object found. Saving locally.")
cv2.imwrite('/content/gdrive/My Drive/converted Images/faces.jpg', roi_color)
status = cv2.imwrite('faces_detected.jpg', image)
print("[INFO] Image faces_detected.jpg written to filesystem: ", status)
# from skimage import io
from keras.preprocessing import image
img = image.load_img('/content/gdrive/My Drive/converted Images/faces.jpg', color_mode = "grayscale", target_size=(48, 48))
x = image.img_to_array(img)
x = np.expand_dims(x, axis = 0)
x /= 255
show_img=image.load_img('/content/gdrive/My Drive/converted Images/faces.jpg', grayscale=False, target_size=(200, 200))
plt.gray()
plt.imshow(show_img)
plt.show()
if len(faces):
custom = model.predict(x)
index = np.argmax(custom[0])
emotion1 = custom[0][index]*100
print(custom)
print(emotion_label_to_text[index],' => ', emotion1)
else:
print('No Face Detected')
这给出了很好的结果,并且对于相同结果的输出是正确的,我插入的图像是快乐图像,opencv用于检测人脸并对其进行裁剪,然后使用该裁剪后的图像放入模型中并给了我好的结果,
但是在tf.js部分中,我使用tfjs转换器将keras模型转换为.json,并编写了以下代码
const classifier = new cv.CascadeClassifier(cv.HAAR_FRONTALFACE_ALT2);
try {
const canvImg = await canvas.loadImage(
path.join(__dirname, `images/${req.file.filename}`)
);
const image = await cv.imread(path.join(__dirname, `/images/${req.file.filename}`));
const classifier = new cv.CascadeClassifier(cv.HAAR_FRONTALFACE_ALT2);
const { objects, numDetections } = classifier.detectMultiScale(image.bgrToGray());
if (!objects.length) {
return next({
msg: 'No face detected'
})
} else {
const model = await tf.loadLayersModel(
"http://localhost:8000/models/model.json"
);
const obj = objects[0]
const cnvs = canvas.createCanvas(48, 48);
const ctx = cnvs.getContext("2d");
ctx.drawImage(canvImg, obj.x, obj.y, obj.width, obj.height, 0, 0, cnvs.width, cnvs.height);
var tensor = tf.browser
.fromPixels(cnvs)
.mean(2)
.toFloat()
.expandDims(-1)
.expandDims(0, 'None')
const prediction = await model.predict(tensor).data();
console.log(prediction);
var emotions = [
"angry",
"disgust",
"fear",
"happy",
"sad",
"surprise",
];
var index = Object.values(prediction).findIndex(
(p) => p === Math.max(...Object.values(prediction))
);
res.status(200).json(emotions[index])
fs.unlink(
path.join(process.cwd(), "./faceDetection/images/" + req.file.filename),
function(err, removed) {
if (err) console.log("file removing err");
else console.log("file removed");
}
);
}
} catch (e) {
return next(e)
}
我使用opencv4nodejs来检测图像,使用canvas来裁剪图像(canvas给我很好的结果来裁剪面部部分),并使用tf.js进行预测,但是每次在所有这些键中的输出都给我相同的结果在对象中,其中一个将得到1(在这种情况下为恐惧),并为我在keras中测试的同一张图片提供相同的结果。
我在操纵张量时做错了吗?
答案 0 :(得分:0)
一个可能的原因。在python中,您使用nil
将图像输入“标准化”为[0,1]。您不是用Javascript做的。
答案 1 :(得分:0)
js中的预处理不同于python中的预处理。
在python中,图像通过除以255进行归一化
在Js中,通过计算第三轴上的均值(mean(2))将图像转换为灰度。张量应该是这样的:
@{
@:<p>Name: <strong>@Model.Email</strong></p>
}