使用Harris Corner Detection对2张图像进行Python OpenCV搜索对应

时间:2018-11-24 22:56:56

标签: python opencv computer-vision corner-detection correspondence-analysis

我的老师给我们做了以下练习:

Exercise

目前,我唯一要做的过程就是使用cv2.cornerHarris()获取两张图片的哈里斯角,并将图片彼此相邻放置。

现在,我不知道如何获取拐角本身和拐角周围的区域以生成可用于模板匹配的模板。

我希望,如果我掌握了这个窍门,我也许可以解决其余的练习。 也许有些人可以帮助我?简短说明一下它是如何工作的,这样我可以学到更多:)

这是我当前的代码:

yarn config set cache-folder /usr/local/Caches/yarn

1 个答案:

答案 0 :(得分:0)

您可以在下面尝试我的代码:

import numpy as np
import cv2
from matplotlib import pyplot as plt

"""
Difference between goodFeaturesToTrack and Harrisdetector:
The main difference with the Harris algorithm is that you should
specify the minimum distance between each point, the quality level
and the number of corners to detect.

"""
#You can use this Method to detect the Harriscorners instead of goodFeaturesToTrack :

#dst1 = cv2.cornerHarris(gray1, 5, 7, 0.04)
#ret1, dst1 = cv2.threshold(dst1, 0.1 * dst1.max(), 255, 0)
#dst1 = np.uint8(dst1)
#ret1, labels1, stats1, centroids1 = cv2.connectedComponentsWithStats(dst1)
#criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
#corners1 = cv2.cornerSubPix(gray1, np.float32(centroids1), (5, 5), (-1, -1), 
#criteria)
#corners1 = np.int0(corners1)


def correlation_coefficient(window1, window2):
    product = np.mean((window1 - window1.mean()) * (window2 - window2.mean()))
    stds = window1.std() * window2.std()
    if stds == 0:
        return 0
    else:
        product /= stds
        return product


window_size_width = 7
window_size_height = 7
lineThickness = 2

img1 = cv2.imread('church_left.png')
img2 = cv2.imread('church_right.png')
width, height, ch = img1.shape[::]
img2_copy = img2.copy()
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

corners1 = cv2.goodFeaturesToTrack(gray1, 30, 0.01, 5)
corners1 = np.int0(corners1)

corners2 = cv2.goodFeaturesToTrack(gray2, 30, 0.01, 5)
corners2 = np.int0(corners2)

corners_windows1 = []

for i in corners1:
    x, y = i.ravel()
    cv2.circle(img1, (x, y), 3, 255, -1)

corners_windows2 = []
for i in corners2:
    x, y = i.ravel()
    cv2.circle(img2, (x, y), 3, 255, -1)

plt.imshow(img1), plt.show()

methods = ['SSD', 'NCC']
for method in methods:
    matches = []
    for id1, i in enumerate(corners1):
        x1, y1 = i.ravel()
        if y1 - window_size_height < 0 or y1 + window_size_height > height or x1 - window_size_width < 0 or x1 + window_size_width > width:
            continue
        pt1 = (x1, y1)
        print("pt1: ", pt1)
        template = img1[y1 - window_size_height:y1 + window_size_height, x1 - window_size_width:x1 + window_size_width]
        max_val = 0
        Threshold = 1000000
        id_max = 0
        for id2, i in enumerate(corners2):
            x2, y2 = i.ravel()

            if y2 - window_size_height < 0 or y2 + window_size_height > height or x2 - window_size_width < 0 or x2 + window_size_width > width:
                continue
            window2 = img2[y2 - window_size_height:y2 + window_size_height,
                      x2 - window_size_width:x2 + window_size_width]
            if method == 'SSD':
                temp_min_val = np.sum((template - window2) ** 2)
            elif method == 'NCC':
                temp_min_val = correlation_coefficient(template, window2)
            if temp_min_val < Threshold:
                Threshold = temp_min_val
                pt2 = (x2 + 663, y2)
        matches.append((pt1, pt2))
    stacked_img = np.hstack((img1, img2))
    #show the first 15 matches
    for match in matches[:15]:
        cv2.line(stacked_img, match[0], match[1], (0, 255, 0), lineThickness)
    matches = []
    plt.imshow(stacked_img), plt.show()