如何将TensorFlow对象检测标签和边界框导出为CSV文件?

时间:2019-07-03 13:26:03

标签: python csv tensorflow object-detection

我正在研究一种对象检测模型,以帮助进行检查。一个功能是,如果在输入视频中发现错误,它将发出通知并将数据导出到CSV。我希望它导出错误的标签和边界框坐标,以及时间戳和帧数(如果可能)。

我已经看到one answer关于将边界框导出到CSV的信息,但尚未成功,并且给了我错误

File "C:\Users\Charles.averill\AppData\Local\Programs\Python\Python36\lib\site-packages\numpy\lib\npyio.py", line 1377, in savetxt "Expected 1D or 2D array, got %dD array instead" % X.ndim) ValueError: Expected 1D or 2D array, got 3D array instead

我拥有它以便对视频进行注释,完成后,它会提示用户是否导出为CSV,所以我有另一种导出方法。这是我的代码:

def annotate(self):
    if("annotated" in self.video_path):
        messagebox.showinfo("Error", "You can't annotate an annotated video!")
    elif(self.mode == "V" and not self.video_path is None):
        fourcc = cv2.VideoWriter_fourcc(*'MP4V')
        time = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')
        path = 'output/videos/annotated_' + time + '_output.mp4'
        out = cv2.VideoWriter(path, fourcc, 20.0, (960, 540))
        self.rewind()
        NUM_CLASSES = 2
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.compat.v1.GraphDef()
            with tf.io.gfile.GFile(self.model_graph, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
        lmap = label_map_util.load_labelmap(self.label_map)
        categories = label_map_util.convert_label_map_to_categories(lmap, max_num_classes=NUM_CLASSES, use_display_name=True)
        category_index = label_map_util.create_category_index(categories)

        with detection_graph.as_default():
            with tf.compat.v1.Session(graph=detection_graph) as sess:
                while not self.currentFrame is None:
                    image_np = self.get_just_frame()
                    if(image_np is None):
                        break
                    image_np_expanded = np.expand_dims(image_np, axis=0)

                    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

                    self.boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

                    self.scores = detection_graph.get_tensor_by_name('detection_scores:0')

                    classes = detection_graph.get_tensor_by_name('detection_classes:0')

                    num_detections = detection_graph.get_tensor_by_name(
                        'num_detections:0')

                    (self.boxes, self.scores, classes, num_detections) = sess.run(
                        [self.boxes, self.scores, classes, num_detections],
                        feed_dict={image_tensor: image_np_expanded})

                    vis_util.visualize_boxes_and_labels_on_image_array(
                        image_np,
                        np.squeeze(self.boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(self.scores),
                        category_index,
                        use_normalized_coordinates=True,
                        line_thickness=2)

                    # Display output
                    out.write(image_np)
        self.video.release()
        out.release()
        self.video = None
        self.set_video_path(path)
        self.video = cv2.VideoCapture(self.video_path)
        if(not self.video.isOpened()):
            raise ValueError("Unable to open video source", self.video_path)
        ret, frame = self.get_frame()
        if(ret and not frame is None):
            self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))  
            self.canvas.create_image(0, 0, image = self.photo, anchor = NW)
        MsgBox = tk.messagebox.askquestion ('Export to CSV','Do you want to export the video to CSV?',icon = 'warning')
        if MsgBox == 'yes':
           self.export_CSV()
    if(self.video_path is None):
        messagebox.showinfo("Error", "No video selected")

def export_CSV(self):
    if(not self.boxes is None):
        print(self.boxes)
        for i, box in enumerate(np.squeeze(self.boxes)):
            if(np.squeeze(self.scores)[i] > 0.5):
                print("ymin={}, xmin={}, ymax={}, xmax{}".format(box[0]*540,box[1]*960,box[2]*540,box[3]*960))
        time = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')
        path = 'output/csv/' + time + '_output.csv'
        np.savetxt(path, self.boxes, delimiter=',')
    else:
        messagebox.showinfo("Error", "No boxes, you must\nannotate the video first")

如何将标签和边界框一起导出?

谢谢!

1 个答案:

答案 0 :(得分:0)

您需要将预测的类和类别传递给writer方法,也可以使用csv库来编写预测。

在文件开头添加:

import csv

我对您的代码做了一些改动,但是您可以自由更正它。

def annotate(self):
    if("annotated" in self.video_path):
        messagebox.showinfo("Error", "You can't annotate an annotated video!")
    elif(self.mode == "V" and not self.video_path is None):
        fourcc = cv2.VideoWriter_fourcc(*'MP4V')
        time = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')
        path = 'output/videos/annotated_' + time + '_output.mp4'
        out = cv2.VideoWriter(path, fourcc, 20.0, (960, 540))
        self.rewind()
        NUM_CLASSES = 2
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.compat.v1.GraphDef()
            with tf.io.gfile.GFile(self.model_graph, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
        lmap = label_map_util.load_labelmap(self.label_map)
        categories = label_map_util.convert_label_map_to_categories(lmap, max_num_classes=NUM_CLASSES, use_display_name=True)
        category_index = label_map_util.create_category_index(categories)

        with detection_graph.as_default():
            with tf.compat.v1.Session(graph=detection_graph) as sess:
                while not self.currentFrame is None:
                    image_np = self.get_just_frame()
                    if(image_np is None):
                        break
                    image_np_expanded = np.expand_dims(image_np, axis=0)

                    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

                    self.boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

                    self.scores = detection_graph.get_tensor_by_name('detection_scores:0')

                    classes = detection_graph.get_tensor_by_name('detection_classes:0')

                    num_detections = detection_graph.get_tensor_by_name(
                        'num_detections:0')

                    (self.boxes, self.scores, classes, num_detections) = sess.run(
                        [self.boxes, self.scores, classes, num_detections],
                        feed_dict={image_tensor: image_np_expanded})

                    vis_util.visualize_boxes_and_labels_on_image_array(
                        image_np,
                        np.squeeze(self.boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(self.scores),
                        category_index,
                        use_normalized_coordinates=True,
                        line_thickness=2)

                    # Display output
                    out.write(image_np)
        self.video.release()
        out.release()
        self.video = None
        self.set_video_path(path)
        self.video = cv2.VideoCapture(self.video_path)
        if(not self.video.isOpened()):
            raise ValueError("Unable to open video source", self.video_path)
        ret, frame = self.get_frame()
        if(ret and not frame is None):
            self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))  
            self.canvas.create_image(0, 0, image = self.photo, anchor = NW)
        MsgBox = tk.messagebox.askquestion ('Export to CSV','Do you want to export the video to CSV?',icon = 'warning')
        if MsgBox == 'yes':
           self.export_CSV(self.boxes, self.scores, classes, category_index)
    if(self.video_path is None):
        messagebox.showinfo("Error", "No video selected")

def export_CSV(self, boxes, scores, classes, category_index):
    if (boxes is None):
        messagebox.showinfo("Error", "No boxes, you must\nannotate the video first")
        return

    time = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')
    path = 'output/csv/' + time + '_output.csv'
    print(boxes)

    with open(path, 'wb') as write_file:
        writer = csv.writer(write_file)

        for box, score, predicted_class in zip(np.squeeze(boxes), np.squeeze(scores), classes):
            if(score > 0.5):
                print("ymin={}, xmin={}, ymax={}, xmax{}".format(box[0]*540,box[1]*960,box[2]*540,box[3]*960))    
                writer.writerow([box[0], box[1], box[2], box[3], category_index[predicted_class]['name']])

我不确定category_index是否有一个名为“名称”的字段,但是正如我所说的,您可以自由更改自己的需求:

writer.writerow([box[0], box[1], box[2], box[3], category_index[predicted_class]['name']])