我正在尝试以特定的开始和结束时间运行视频。它以某种方式起作用,但是给出了这个错误
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
显然是错误的,但我不确定是什么
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
import numpy as np
def get_sec(time_str):
m, s = time_str.split(':')
return int(m) * 60 + int(s)
if __name__ == '__main__':
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str, help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
ap.add_argument('--s', type=str, default='00:00', help='start time to crop')
ap.add_argument('--e', type=str, default='00:00', help='end time to crop')
#args = vars(ap.parse_args())
args = ap.parse_args()
cap = cv2.VideoCapture('C4-13.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
stime = get_sec(args.s)
starttime = args.s
etime = get_sec(args.e)
sframe=int(float(fps)*float(stime))
#sframe=int((float(stime)*float(fps))/(float(length)*float(fps)))
eframe=int(float(fps)*float(etime))
cap.set(1,sframe)
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
tracker = cv2.Tracker_create(args["tracker"].upper())
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
tracker = OPENCV_OBJECT_TRACKERS["csrt"]()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
# if a video path was not supplied, grab the reference to the web cam
'''if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)'''
# otherwise, grab a reference to the video file
#else:
#vs = cv2.VideoCapture(args["video"])'''
# initialize the FPS throughput estimator
#fps = None'
# loop over frames from the video stream
#for seq in range(sframe,eframe):
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
ret, frame = cap.read()
#frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame (so we can process it faster) and grab the
# frame dimensions
frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
# update the FPS counter
fps.update()
fps.stop()
# initialize the set of information we'll be displaying on
# the frame
info = [
("Tracker", args["tracker"]),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
这就是结果,以及正在播放的视频
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
[NULL @ 00000194b8334300] non-existing PPS 0 referenced
Traceback (most recent call last):
File "Tracking3.py", line 154, in <module>
if not args.get("video", False):