我正在开发一个应用程序,在该应用程序中,运行烧瓶服务器后,我的网络摄像头应立即启动(确实如此!),并在网页本身上显示yolo模型(我自己独立运行的那个)不想)。 我不确定该如何解决。也许通过查看代码,您将可以提供帮助。
main.py
from flask import Flask, render_template, Response
from camera import VideoCamera
import tablib
import os
app = Flask(__name__)
dataset = tablib.Dataset()
with open(os.path.join(os.path.dirname(__file__), 'object.csv')) as f:
dataset.csv = f.read()
@app.route('/')
def index():
data = dataset.html
return render_template('index.html', data=data)
def gen(camera):
while True:
count = 0
frame = camera.get_frame()
yield (b'--frame+str("%d"%count)\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/table')
def display_table():
#do something to create a pandas datatable
df = pd.DataFrame(data=[[person], [Timestamps]])
df_html = df.to_html() #using pandas to autogenerate html
return render_template('index.html', 'table_html=df_html')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
camera.py
import datetime
import cv2
import time
import numpy as np
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_head, yolo_eval
from yad2k.yolo_utils import read_classes, read_anchors, preprocess_webcam_image, draw_boxes, generate_colors
import pandas as pd
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
self.class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (480., 640.)
self.yolo_model = load_model("model_data/yolo.h5")
#print(self.yolo_model.summary(), "fndkjcndkn")
yolo_outputs = yolo_head(self.yolo_model.output, anchors, len(self.class_names))
self.scores, self.boxes, self.classes = yolo_eval(yolo_outputs, image_shape)
def predict(self,sess,frame):
# Preprocess your image
image, image_data = preprocess_webcam_image(frame, model_image_size=(608, 608))
out_scores, out_boxes, out_classes = sess.run([self.scores, self.boxes, self.classes], feed_dict={self.yolo_model.input: image_data,
K.learning_phase(): 0})
# Print predictions info
#print('Found {} boxes'.format(len(out_boxes))) #here it prints object names!
# Generate colors for drawing bounding boxes.
colors = generate_colors(self.class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, self.class_names, colors)
return np.array(image), out_boxes
def __del__(self):
self.video.release()
cv2.destroyAllWindows()
def get_frame(self):
sess = K.get_session()
while True:
# Capture frame-by-frame
grabbed, frame = self.video.read()
if not grabbed:
break
#Run detection
start = time.time()
output_image,image = self.predict(sess,frame)
#df = pd.DataFrame({'Object': [image], 'Timestamp': [datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")]}, index=[0])
#df.to_csv('objects3.csv', sep=';', header = 2, index=False, encoding='utf-8', mode = 'a', columns=['Object', 'Timestamp'])
#df.to_csv('object2.csv', header= 2, mode = 'a')
end = time.time()
cv2.imshow('', output_image)
if(cv2.waitKey(1) & 0xFF == ord('q')):
return 0
#dt = datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
#print (dt)
#print("Inference time: {:.2f}s".format(end - start))
success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', image)
# Display the resulting frame
cv2.imshow('', output_image)
#
# When everything done, release the capture
stream.release()
cv2.destroyAllWindows()
index.html
<html>
<head>
<title>Video Streaming Demonstration</title>
<link rel=stylesheet type="text/css" href="{{ url_for('static', filename='css/style.css')}}"/>
</head>
<body>
<h1>Keep Looking...</h1>
<img id="bg" src="{{ url_for('video_feed') }}">
{{ table_html | safe }}
<table border="1" class="dataframe">
<thead>
<tr style="text-align: left;">
<th>Objects</th>
<th>Timestamps</th>
</tr>
</thead>
<tbody>
<tr>
<td>{{ output_image }} </td>
<td>{{ dt }}</td>
</tr>
</tbody>
</table>
<!-- new code -->
<div class="table">
{% block body %}
{{ data|safe }}
{% endblock %}
</div>
</body>
</html>
答案 0 :(得分:0)
This example可以为您服务。他不是从相机而是从文件流式传输。您可以通过一个脚本在Yolo中写入此文件,然后从Flask中读取它。您只需要删除OpenCV摄像机代码即可。
test.py
//@formatter:on
/templates/index.html
from flask import Flask, render_template, Response
import cv2
import socket
import io
app = Flask(__name__)
vc = cv2.VideoCapture(0)
@app.route('/')
def index():
"""Video streaming ."""
return render_template('index.html')
def gen():
"""Video streaming generator function."""
while True:
rval, frame = vc.read()
cv2.imwrite('pic.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + open('pic.jpg', 'rb').read() + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(debug=True, threaded=True)