该剧本的目标是使用Keras'用于视频数据增强的现有图像预处理模块。在这个原型中,一个示例视频被分成一个帧数组并进行处理,最后的步骤涉及执行随机旋转,移位,剪切和缩放:
from keras import backend as K
from keras.preprocessing.image import random_rotation, random_shift, random_shear, random_zoom
K.set_image_dim_ordering("th")
import cv2
import numpy as np
video_file_path = "./training-data/yes/1.mov"
samples_generated_per_sample = 10
self_rows = 100
self_columns = 150
self_frames_per_sequence = 45
# haar cascades for localizing oral region
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
mouth_cascade = cv2.CascadeClassifier('haarcascade_mcs_mouth.xml')
video = cv2.VideoCapture(video_file_path)
success, frame = video.read()
frames = []
success = True
# convert to grayscale, localize oral region, equalize dimensions,
# normalize pixels, equalize lengths, and accumulate valid frames
while success:
success, frame = video.read()
if success:
# convert to grayscale
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# localize single facial region
faces_coords = face_cascade.detectMultiScale(frame, 1.3, 5)
if len(faces_coords) == 1:
face_x, face_y, face_w, face_h = faces_coords[0]
frame = frame[face_y:face_y + face_h, face_x:face_x + face_w]
# localize oral region
mouth_coords = mouth_cascade.detectMultiScale(frame, 1.3, 5)
threshold = 0
for (mouth_x, mouth_y, mouth_w, mouth_h) in mouth_coords:
if (mouth_y > threshold):
threshold = mouth_y
valid_mouth_coords = (mouth_x, mouth_y, mouth_w, mouth_h)
else:
pass
mouth_x, mouth_y, mouth_w, mouth_h = valid_mouth_coords
frame = frame[mouth_y:mouth_y + mouth_h, mouth_x:mouth_x + mouth_w]
frames.append(frame)
# ignore multiple facial region detections
else:
pass
# pre-pad short sequences and equalize sequence lengths
if len(frames) < self_frames_per_sequence:
frames = [frames[0]]*(self_frames_per_sequence - len(frames)) + frames
frames = frames[0:self_frames_per_sequence]
frames = np.asarray(frames)
rotated_frames = random_rotation(frames, rg=45)
shifted_frames = random_shift(rotated_frames, wrg=0.25, hrg=0.25)
sheared_frames = random_shear(shifted_frames, intensity=0.79)
zoomed_frames = random_zoom(sheared_frames, zoom_range=(1.25, 1.25))
答案 0 :(得分:2)
查看参数:https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py#L25。
您将frames
作为平面数组,但它期望一个至少包含三个轴的数组,因此默认情况下可能需要row_axis=1, col_axis=2
。要么正确指定这些参数,要么提供正确形状的数组。
答案 1 :(得分:0)
问题是由于框架尺寸不等。解决方案是在应用转换之前首先均衡帧尺寸:
# pre-pad short sequences, equalize frame dimensions, and equalize sequence lengths
if len(frames) < self_frames_per_sequence:
frames = [frames[0]]*(self_frames_per_sequence - len(frames)) + frames
frames = frames[0:self_frames_per_sequence]
frames = [cv2.resize(frame, (self_columns, self_rows)).astype('float32') for frame in frames]
frames = np.asarray(frames)
rotated_frames = random_rotation(frames, rg=45)
shifted_frames = random_shift(rotated_frames, wrg=0.25, hrg=0.25)
sheared_frames = random_shear(shifted_frames, intensity=0.79)
zoomed_frames = random_zoom(sheared_frames, zoom_range=(1.25, 1.25))