流式传输到网站而不是Window OpenCV

时间:2019-04-19 18:41:51

标签: python opencv web video-streaming

人类认知计划

PeopleTracker类:​​

hog = cv2.HOGDescriptor()
caps = cv2.VideoCapture(r'C:/Users/Emyr/Documents/Jupyter/pedestrian-detection/video/Ped4.MOV')
count = int(caps.get(cv2.CAP_PROP_FRAME_COUNT))
center = []
recCount = 0
pick = 0
#          Red       Yellow      Blue      Green     Purple 
colors = [(255,0,0),(255,255,0),(0,0,255),(0,128,0),(128,0,128)]

def BBoxes(self, frame):
    #frame = imutils.resize(frame, width = min(frame.shape[0], frame.shape[1]))
    frame = imutils.resize(frame, width= 1000,height = 1000)

    # detect people in the image
    (rects, weights) = self.hog.detectMultiScale(frame, winStride=(1,1), padding=(3, 3), scale=0.5)

    # apply non-maxima suppression to the bounding boxes using a
    # fairly large overlap threshold to try to maintain overlapping
    # boxes that are still people

    rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])

    self.pick = non_max_suppression(rects, probs=None, overlapThresh=0.7)

    # draw the final bounding boxes
    self.recCount  = 0

    for (xA, yA, xB, yB) in self.pick:

        #cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)

        CentxPos = int((xA + xB)/2)
        CentyPos = int((yA + yB)/2)

        cv2.circle(frame,(CentxPos, CentyPos), 5, (0,255,0), -1)
        self.recCount += 1

        if len(rects) >1:
               self.center.append([CentxPos, CentyPos])


    return frame


def Clustering(self, frame):

    db = DBSCAN(eps= 70, min_samples = 2).fit(self.center)

    labels = db.labels_

    # Number of clusters in labels, ignoring noise if present.
    n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
    n_noise_ = list(labels).count(-1)
    #print("Labels: ", labels)
    # Black removed and is used for noise instead.
    unique_labels = set(labels)
    #print("Unique Labels: ", unique_labels)

    #colors = plt.cm.rainbow(np.linspace(0, 255, len(unique_labels)))

    #colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for k in range(len(unique_labels)) ]

    #print(self.colors)

    i = 0

    for (xA, yA, xB, yB) in self.pick:

        if labels[i] == -1:
            cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 0, 0), 2)
            i += 1
        else:

            cv2.rectangle(frame, (xA, yA), (xB, yB), (self.colors[labels[i]][0], self.colors[labels[i]][1], self.colors[labels[i]][2]), 2)
            i += 1


    #print("Colours: ", colors)
    center = np.asarray(self.center)

    #fig, ax = plt.subplots()

    #ax.set_xlim(0,frame.shape[1])
    #ax.set_ylim(frame.shape[0], 0)

    #for k, col in zip(unique_labels, colors):

        #if k == -1:
             #Black used for noise.
             #col = [0, 0, 0, 1]

        #class_member_mask = (labels == k)
        #xy = center[class_member_mask]
        #plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=8)

def main():

PT = PeopleTracker()
PT.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())



while PT.count > 1:

    PT.center = []

    ret, frame = PT.caps.read()

    frame = PT.BBoxes(frame)

    if PT.recCount >= 2:

        PT.Clustering(frame)


        #plt.title('Estimated number of clusters: %d' % n_clusters_)
        #plt.show()   
        cv2.imshow("Tracker", frame)
        cv2.waitKey(1)
        #cv2.destroyAllWindows()
        PT.count = PT.count - 1

    else:

        cv2.imshow("Tracker", frame)
        cv2.waitKey(1)
        #cv2.destroyAllWindows()
        PT.count = PT.count - 1

screenshot

我目前使用的代码将现有的人类识别视频流显示到窗口中(如链接中的图片所示),如果可能的话,我想知道是否可以通过某种方式将视频提要发送到我正在开发而不是使用窗口的网站?

预先感谢您:)

1 个答案:

答案 0 :(得分:0)

我无法正常工作,最终我使用了flask,但问题是,我显示的原始视频不是由opencv制作的视频,我想知道是否有人对我可以在其中实现先前的代码有任何想法?并为视频供稿使用“ 框架”变量

from flask import Flask, render_template, Response
import cv2
import sys
import numpy

app = Flask(__name__)

@app.route('/')
def index():
    return render_template('index.html')

def gen():
    i=1
    while i < 10:
        yield (b'--frame\r\n'b'Content-Type: text/plain\r\n\r\n'+str(i)+b'\r\n')
        i+=1

def get_frame():
    ramp_frames=100

    camera=cv2.VideoCapture('IMG_2649.MOV')

    i=1
    while True:
        retval, im = camera.read()
        imgencode=cv2.imencode('.jpg',im)[1]
        stringData=imgencode.tostring()
        yield (b'--frame\r\n'
            b'Content-Type: text/plain\r\n\r\n'+stringData+b'\r\n')
        i+=1

    del(camera)

@app.route('/calc')
def calc():
     return Response(get_frame(),mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == '__main__':
    app.run(host='localhost', debug=True, threaded=True)

HTML代码

   <html>
      <head>
        <title>Video Streaming Demonstration</title>
      </head>
      <body>
        <h1>Video Streaming Demonstration</h1>
        <img src="{{ url_for('calc') }}">
        <!-- <h1>{{ url_for('calc') }}</h1> -->
      </body>
    </html>