我试图像这样将这两个家伙结合起来。我在MAC OSX上使用openCV,python 3.6和pylibfreenect2。
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
frames = listener.waitForNewFrame()
color = frames["color"].asarray(np.uint8)
color = cv2.cvtColor(color, cv2.COLOR_BGRA2BGR)
color = cv2.cvtColor(color,cv2.COLOR_BGR2RGB)
color = cv2.resize(color, (1280,720))
image_np_expanded = np.expand_dims(color, axis=0) #(1, 720, 1280, 3)
# NOTE for visualization:
# cv2.imshow without OpenGL backend seems to be quite slow to draw all
# things below. Try commenting out some imshow if you don't have a fast
# visualization backend.
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run([boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(image_np_expanded, np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=8)
cv2.imshow('object detection', color)
listener.release(frames)
key = cv2.waitKey(0) & 0xFF
if key == ord('q'):
break
device.stop()
device.close()
sys.exit(0)
这不起作用,我认为会议可能会有些混乱,所以我和他们一起玩。它没有帮助。有什么主意吗?