我正在使用以下代码检测面部并在面部上绘制矩形,就像这样。
Inference.py 在这个文件中,我们试图围绕面部绘制raw_bounding_box:
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
def load_image(image_path, grayscale=False, target_size=None):
pil_image = image.load_face_coordinates(image_path, grayscale, target_size)
return image.face_coordinates_to_array(pil_image)
def load_detection_model(model_path):
detection_model = cv2.CascadeClassifier(model_path)
return detection_model
def detect_faces(detection_model, gray_image_array):
return detection_model.detectMultiScale(gray_image_array, 1.3, 5)
def draw_bounding_box(face_coordinates, image_array, color,r,d):
x1,y1,x2,y2 = face_coordinates
# cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
cv2.line(image_array, (x1 + r, y1), (x1 + r + d, y1), color, 2)
cv2.line(image_array, (x1, y1 + r), (x1, y1 + r + d), color, 2)
cv2.ellipse(image_array, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, 2)
# Top right
cv2.line(image_array, (x2 - r, y1), (x2 - r - d, y1), color, 2)
cv2.line(image_array, (x2, y1 + r), (x2, y1 + r + d), color, 2)
cv2.ellipse(image_array, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, 2)
# Bottom left
cv2.line(image_array, (x1 + r, y2), (x1 + r + d, y2), color, 2)
cv2.line(image_array, (x1, y2 - r), (x1, y2 - r - d), color, 2)
cv2.ellipse(image_array, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, 2)
# Bottom right
cv2.line(image_array, (x2 - r, y2), (x2 - r - d, y2), color, 2)
cv2.line(image_array, (x2, y2 - r), (x2, y2 - r - d), color, 2)
cv2.ellipse(image_array, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, 2)
image_array = np.zeros((256,256,3), dtype=np.uint8)
detectface.py 在这个文件中,我们检测面部并从Inference.py中调用函数来绘制面部周围的框。
# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
while True:
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((0, 128, 255))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_bounding_box(face_coordinates, rgb_image, color)
此文件中的最后一行代码( detectface.py )似乎不对,所以我不知道如何添加两个缺少的必需位置参数:'r'和'd'在这个文件中。 如果您有任何想法实现这一目标,请分享
答案 0 :(得分:1)
draw_bounding_box()
所做的是绘制样本图像中的绿色框架,包括支持圆角。
恕我直言,这是一张图片胜过千言万语的情况,所以让我们看看左上角(其他3个片段遵循相同的模式,只是旋转)。
由
生成cv2.line(image_array, (x1 + r, y1), (x1 + r + d, y1), color, 2)
cv2.line(image_array, (x1, y1 + r), (x1, y1 + r + d), color, 2)
cv2.ellipse(image_array, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, 2)
和
(x1, y1)
指定我们想要绘制框架的矩形区域的左上角。r
是圆弧的半径(圆角)d
是2行(水平和垂直)的长度color
是用2
是线条和弧线的粗细关于如何设置参数...
r
参数看起来更像是一种美学选择 - 我说8周围的东西可能看起来不错,尽管示例图像似乎没有圆角,这意味着r == 0
。我不确定(意思是我现在懒得试试;))多么快乐cv2.ellipse
将绘制一个0范围的椭圆,但一个简单的if
语句可以解决这个问题问题(即cv2.ellipse
)时只调用r > 0
。
d
参数似乎应设置为差距应大约为ROI的33%。我选择ROI的较小维度(即min(width, height)
),除以3,减去r
并使用结果。