OpenCV中的KLT跟踪器与Python无法正常工作

时间:2018-04-13 06:59:52

标签: python opencv computer-vision video-tracking feature-tracking

我正在使用KLT(Kanade-Lucas-Tomasi跟踪)跟踪算法来跟踪印度的交通流量。我正在正确地跟踪交通一侧的流量,但是根本没有检测到在框架中移动的交通的另一侧。

算法由cv2.goodFeaturesToTrackcv2.calcOpticalFlowPyrLK组成,以实现结果。

enter image description here

在图片中,您可以看到红色和银色汽车没有跟踪功能。左侧的黄色自动也未被跟踪。有什么理由吗?角落仍在那里。

cv2.goodFeaturesToTrack的功能参数:

feature_params = dict( maxCorners = 500,   # How many pts. to locate
                       qualityLevel = 0.1,  # b/w 0 & 1, min. quality below which everyone is rejected
                       minDistance = 7,   # Min eucledian distance b/w corners detected
                       blockSize = 3 ) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood

cv2.calcOpticalFlowPyrLK的功能参数:

lk_params = dict( winSize  = (15,15),  # size of the search window at each pyramid level
                  maxLevel = 2,   #  0, pyramids are not used (single level), if set to 1, two levels are used, and so on
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

我必须使用的视频是60分钟。很长时间和KLT在5分钟后停止跟踪 ..任何建议或帮助都会很棒。感谢。

2 个答案:

答案 0 :(得分:2)

基本上,您正在做正确的事情,只需要重新初始化进行跟踪的优点即可

p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)

每5帧说完一次或之后说什么 希望能帮助到你 ! 以下是我的代码:

import cv2
import numpy as np

cap = cv2.VideoCapture('side.avi')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
for i in range(60):
    ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
print(p0)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
    ret,frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame_no = cap.get(cv2.CAP_PROP_POS_FRAMES)
    if int(frame_no)%5 == 0:
        p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]
    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel()
        c,d = old.ravel()
        mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
        frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
    img = cv2.add(frame,mask)
    cv2.imshow('frame',img)
    k = cv2.waitKey(2000) & 0xff
    if k == 27:
        break
    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()

答案 1 :(得分:1)

import numpy as np
import cv2

video_path = ''
output_file = ""     
cap = cv2.VideoCapture(video_path)

fourcc = cv2.VideoWriter_fourcc(*'DIVX')

# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 500,   # How many pts. to locate
                       qualityLevel = 0.1,  # b/w 0 & 1, min. quality below which everyone is rejected
                       minDistance = 7,   # Min eucledian distance b/w corners detected
                       blockSize = 3 ) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood

# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),  # size of the search window at each pyramid level
                  maxLevel = 2,   #  0, pyramids are not used (single level), if set to 1, two levels are used, and so on
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

''' Criteria : Termination criteria for iterative search algorithm.
    after maxcount { Criteria_Count } : no. of max iterations.
    or after { Criteria Epsilon } : search window moves by less than this epsilon '''


# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)  #use goodFeaturesToTrack to find the location of the good corner.

# Create a mask image for drawing purposes filed with zeros
mask = np.zeros_like(old_frame)

y = 0
is_begin = True # To save the output video
count = 1  # for the frame count
n = 50  # Frames refresh rate for feature generation

while True:
    ret,frame = cap.read()
    if frame is None:
        break
    processed = frame

    #Saving the Video
    if is_begin:
        h, w, _ = processed.shape
        out = cv2.VideoWriter(output_file, fourcc, 30, (w, h), True)
        is_begin = False

    # Convert to Grey Frame
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if count%n == 0:  # Refresh the tracking features after every 50 frames
        cv2.imwrite('img/r{0:05d}.jpg'.format(y), img)
        y += 1
        ret, old_frame = cap.read()
        old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
        p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
        mask = np.zeros_like(old_frame)

    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]

    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel() #tmp new value
        c,d = old.ravel() #tmp old value
        #draws a line connecting the old point with the new point
        mask = cv2.line(mask, (a,b),(c,d), (0,255,0), 1)
        #draws the new point
        frame = cv2.circle(frame,(a,b),2,(0,0,255), -1)
    img = cv2.add(frame,mask)

    out.write(img)
    cv2.imshow('frame',img)
    k = cv2.waitKey(30) & 0xff

    #Show the Output
    if k == 27:
        cv2.imshow('', img)
        break

    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1,1,2)

    count += 1

# release and destroy all windows
cv2.destroyAllWindows()
cap.release()

我添加了GoodFeaturetoTrack的刷新率,该刷新率正在运行,但我们无法获得完整的轨迹。现在就做吧。