主程序
# In[10]:
cap = FileVideoStream('london_piccadilly_circus.mp4').start()
fps = FPS().start()
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
### for webcam ###
image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
print("Finish Actual Detection")
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
print("Finish Visualization")
### for webcam ###
cv2.imshow('object detection', cv2.resize(image_np, (300,300)))
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
cap.stop()
break
fps.update()
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
用于多线程的FileVideoStream程序
class FileVideoStream:
def __init__(self, path, queueSize=128):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(path)
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
print("FileVideoStream Initialized.")
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
return
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# add the frame to the queue
self.Q.put(frame)
print("Queue size {}".format(self.Q.qsize()))
print("Pass")
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
我正在尝试增加用于对象检测的FPS。我正在使用Tensorflow API进行检测。我也遵循了本指南(https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/),它使用线程来尝试增加FPS。主程序和filevideostream代码如上所示。我得到的FPS小于1.从我观察到的情况来看,与提供帧的filevideostream线程相比,可视化和实际检测是如此之慢。这导致滞后。如何增加FPS以便可以更平滑地显示帧(实时)?
非常感谢
答案 0 :(得分:0)
您可以尝试一些方法:
您可以在https://www.tensorflow.org/performance/performance_guide
找到更多有关改善效果的一般建议