为什么我在珊瑚USB棒上的TFLite模型没有反应?

时间:2019-12-05 05:38:17

标签: python tensorflow tensorflow-lite realsense google-coral

我正在尝试在树莓派4上使用带有谷歌珊瑚USB加速器的真实感相机(而非深度相机),我已经解决了所有错误,现在处于显示的位置该图像在我的程序末尾,但是就注释而言,什么也没有发生。我认为注释部分正确,但是我不知道我的模型实际上正在处理图像。它与我已经在usb网络摄像头上运行的程序非常相似,仅适用于realsense。我已将参数放入代码中,供有兴趣自己尝试的人使用。任何指导将不胜感激!

# launch file with  *** python realsense_example4.py --model mobilenet_ssd_v2/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --labels mobilenet_ssd_v2/coco_labels.txt --confidence 50 ***

import sys
import cv2
import time 
import argparse
import numpy as np
import pyrealsense2 as rs
from PIL import Image
from edgetpu.detection.engine import DetectionEngine

ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to Tensorflow Lite object detection model")
ap.add_argument("-l", "--labels", required=True, help="path to labels file")
ap.add_argument("-c", "--confidence", type=float, default=0.3)

args = vars(ap.parse_args())
labels={}

for row in open(args["labels"]):
    (classID, label) = row.strip().split(maxsplit=1)
    labels[int(classID)] = label.strip()

print("Tensorflow Lite model loading...")
model = DetectionEngine(args["model"])

print("Initializing Realsense...")
pipeline = rs.pipeline()

config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

profile = pipeline.start(config)

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth scale is:", depth_scale)

align_with = rs.stream.color
align = rs.align(align_with)


while True:
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        #color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
        color_image = Image.fromarray(color_image)
        orig = np.asanyarray(color_frame.get_data())

        if not depth_frame or not color_frame:
            continue

        start = time.time()
        results = model.DetectWithImage(color_image, threshold=args["confidence"],
            keep_aspect_ratio=True, relative_coord=False)
        end = time.time()

        for r in results:
            bounding_box = r.bounding_box.flatten().astype("int")
            (startX, startY, endX, endY) = bounding_box 
            label = labels[r.label_id]
            print(labels[r.label_id])
            cv2.rectangle(orig, (startX, startY), (endX, endY),
                (0, 255, 0), 2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            text = "{}: {:.2f}%".format(label, r.score*100)
            cv2.putText(orig, text, (startX,y),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),2) 

        cv2.namedWindow('Real Sense Object Detection', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('Real Sense Object Detection', orig)
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q") or key == 27:
            break

pipeline.stop()

0 个答案:

没有答案