如何在烧瓶服务器上显示网络摄像头?

时间:2019-05-16 10:04:55

标签: python flask webcam yolo

我有两个工作文件。 Main.py和camera.py main.py文件包含从camera.py文件调用的函数,camera.py文件包含我的yolo_webcam代码。我收到一个NameError:名称'self'未定义。这是两个文件。

main.py

from flask import Flask, render_template, Response
from camera import VideoCamera

app = Flask(__name__)

@app.route('/')
def index():
    return render_template('index.html')

def gen(camera):
    while True:
        frame = camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')

@app.route('/video_feed')
def video_feed():
    return Response(gen(VideoCamera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')

if __name__ == '__main__':
    app.run(host='0.0.0.0', debug=True)

camera.py

import cv2
import time
import numpy as np
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_head, yolo_eval
from yad2k.yolo_utils import read_classes, read_anchors, preprocess_webcam_image, draw_boxes, generate_colors

class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        self.video = cv2.VideoCapture(0)
        class_names = read_classes("model_data/coco_classes.txt")
        anchors = read_anchors("model_data/yolo_anchors.txt")
        image_shape = (480., 640.)
        yolo_model = load_model("model_data/yolo.h5")
        print(yolo_model.summary())
        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
        scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')

    def predict(sess, frame):
        # Preprocess your image
        image, image_data = preprocess_webcam_image(frame, model_image_size=(608, 608))

        # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
        # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data,
        K.learning_phase(): 0})
        # Print predictions info
        print('Found {} boxes'.format(len(out_boxes)))
        # Generate colors for drawing bounding boxes.
        colors = generate_colors(class_names)
        # Draw bounding boxes on the image file
        draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

        return np.array(image)

    sess = K.get_session()
    while True:
        # Capture frame-by-frame
        grabbed, frame = self.video.read()
        if not grabbed:
            break

        #Run detection
        start = time.time()
        output_image = predict(sess, frame)
        end = time.time()
        print("Inference time: {:.2f}s".format(end - start))

    def __del__(self):
        self.video.release()
        cv2.destroyAllWindows()

    def get_frame(self):
        success, image = self.video.read()
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', image)
        # Display the resulting frame
        cv2.imshow('', output_image)
        return jpeg.tobytes()

我要去哪里错了? 请帮助我,我是新手。

1 个答案:

答案 0 :(得分:0)

predict中,您将第一个方法参数命名为sess,而不是self。 因此,您不能

grabbed, frame = self.video.read()

您可能想尝试

grabbed, frame = sess.video.read()

或者,或者将第一个参数重命名为self,并相应地更改sesspredict的用法。