1个子文件夹的数据被添加到2个子文件夹

时间:2019-04-16 00:00:13

标签: python

我有一个用于对象识别的脚本。在将输出写入1个子文件夹的磁盘输出时,会附加到第二个子文件夹的输出

该代码可以很好地用于对象识别,可以完美地写入第一个子文件夹的数据,但是在写入第二个子文件夹的输出时,第一个子文件夹的输出也将添加到第二个子文件夹的输出中

def recognize_object(model_name,ckpt_path,label_path,test_img_path,img_output):

    count=0
    sys.path.append("..")

    MODEL_NAME = model_name

    PATH_TO_CKPT = ckpt_path

    PATH_TO_LABELS = label_path

    if not os.path.exists(img_output):
        os.makedirs(img_output,exist_ok=True)

    folders = glob(test_img_path)
    print(folders)
    img_list=[]

    for folder in folders:
        folder_name=os.path.basename(folder)
        print(folder_name)
        out=img_output+"\\"+folder_name
        os.makedirs(out,exist_ok=True)
        print(out)

        for f in glob(folder+"/*.jpg"):
            img_list.append(f)

        for x in range(len(img_list)):
            PATH_TO_IMAGE = img_list[x]
            v1=os.path.basename(img_list[x])
            img_name = os.path.splitext(v1)[0]

            NUM_CLASSES = 3

            label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
            categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
            category_index = label_map_util.create_category_index(categories)

            detection_graph = tf.Graph()

            with detection_graph.as_default():
                od_graph_def = tf.GraphDef()
                with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                    serialized_graph = fid.read()
                    od_graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(od_graph_def, name='')

                sess = tf.Session(graph=detection_graph)

            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')


            image = cv2.imread(PATH_TO_IMAGE)
            image_expanded = np.expand_dims(image, axis=0)

            (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],feed_dict={image_tensor: image_expanded})


            vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=4,
            min_score_thresh=0.80,
            skip_scores=True)


            coordinates=vis_util.return_coordinates(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=4,
            min_score_thresh=0.80)

            threshold=0.80

            cv2.imwrite(out+"\\{}.jpg".format(img_name),image)
            cv2.waitKey(0)
            cv2.destroyAllWindows()




            objects = []
            with open(out+'/metadata.csv','a') as csv_file:
                writer = csv.writer(csv_file)
                for index, value in enumerate(classes[0]):
                    object_dict = {}
                    if scores[0, index] > threshold:
                        object_dict[(category_index.get(value)).get('name').encode('utf8')] = scores[0, index]
                        objects.append(object_dict)
                writer.writerow(objects)
                print (objects)



            filename_string='coordinates_data'

            textfile = open("json/"+filename_string+".json", "a")
            textfile.write(json.dumps(coordinates))
            textfile.write("\n")

            textfile = open("json/"+"img_names"+".json", "a")
            textfile.write(json.dumps(PATH_TO_IMAGE))
            textfile.write("\n")
    img_list=[]


model_name='inference_graph'
ckpt_path=("C:\\new_multi_cat\\models\\research\\object_detection\\inference_graph\\frozen_inference_graph.pb")
label_path=("C:\\new_multi_cat\\models\\research\\object_detection\\training\\labelmap.pbtxt")
test_img_path=("C:\\Python35\\target_non_target\\Target_images_new\\*")
img_output=("C:\\new_multi_cat\\models\\research\\object_detection\\my_imgs")

recognize = recognize_object(model_name,ckpt_path,label_path,test_img_path,img_output)

假设存在一个带有子文件夹C和D的文件夹Y。我希望将数据写入其各自的文件夹中。当前,子文件夹C的数据被完美地写入,但是在为子文件夹D写入数据时,文件夹C的数据也被附加到D。此问题与缩进还是其他有关?

1 个答案:

答案 0 :(得分:1)

再缩第二次SELECT rol.rolname, dat.datname, split_part(kvp.kvp, '=', 2) timezone FROM pg_authid rol CROSS JOIN (SELECT dat.oid, dat.datname FROM pg_database dat UNION ALL SELECT 0::oid oid, '' datname) dat LEFT JOIN pg_db_role_setting set ON set.setdatabase = dat.oid AND set.setrole = rol.oid LEFT JOIN LATERAL unnest(set.setconfig) kvp (kvp) ON lower(split_part(kvp.kvp, '=', 1)) = 'timezone' ORDER BY 1, 2; ,它在文件夹循环之外。