我有一个数字形式的原始页面和同一页面的几个扫描版本。我的目标是对扫描的页面进行校正,使其尽可能与原始页面匹配。我知道我可以使用描述here的概率霍夫变换来固定旋转但扫描的纸张尺寸也不同,因为有些人将页面缩放到不同的纸张格式。我认为OpenCV中的findHomography()函数与SIFT / SURF的关键点组合正是我解决这个问题所需要的。但是,我无法让我的deskew()函数起作用。
我的大部分代码都来自以下两个来源: http://www.learnopencv.com/homography-examples-using-opencv-python-c/和http://docs.opencv.org/3.1.0/d1/de0/tutorial_py_feature_homography.html。
import numpy as np
import cv2
from matplotlib import pyplot as plt
# FIXME: doesn't work
def deskew():
im_out = cv2.warpPerspective(img1, M, (img2.shape[1], img2.shape[0]))
plt.imshow(im_out, 'gray')
plt.show()
# resizing images to improve speed
factor = 0.4
img1 = cv2.resize(cv2.imread("image.png", 0), None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
img2 = cv2.resize(cv2.imread("imageSkewed.png", 0), None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
surf = cv2.xfeatures2d.SURF_create()
kp1, des1 = surf.detectAndCompute(img1, None)
kp2, des2 = surf.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h, w = img1.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
deskew()
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
# show matching keypoints
draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
singlePointColor=None,
matchesMask=matchesMask, # draw only inliers
flags=2)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
plt.imshow(img3, 'gray')
plt.show()
答案 0 :(得分:7)
原来我非常接近解决自己的问题。 这是我的代码的工作版本:
import numpy as np
import cv2
from matplotlib import pyplot as plt
import math
def deskew():
im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M), (orig_image.shape[1], orig_image.shape[0]))
plt.imshow(im_out, 'gray')
plt.show()
orig_image = cv2.imread(r'image.png', 0)
skewed_image = cv2.imread(r'imageSkewed.png', 0)
surf = cv2.xfeatures2d.SURF_create(400)
kp1, des1 = surf.detectAndCompute(orig_image, None)
kp2, des2 = surf.detectAndCompute(skewed_image, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
ss = M[0, 1]
sc = M[0, 0]
scaleRecovered = math.sqrt(ss * ss + sc * sc)
thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
print("Calculated scale difference: %.2f\nCalculated rotation difference: %.2f" % (scaleRecovered, thetaRecovered))
deskew()
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
答案 1 :(得分:0)
这是一个适用于OpenCV 2.4.x的实现。上面的答案使用OpenCV 3.x:
import numpy as np
import cv2
import os
import errno
from os import path
SRC_FOLDER = "images/source/{YOUR_SOURCE_IMAGE_DIR}"
OUT_FOLDER = "images/output"
DETECTOR = cv2.SURF()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
MATCHER = cv2.FlannBasedMatcher(index_params, search_params)
MIN_MATCH_COUNT = 10
def deskew(base_image_shape, skewed_image, homography):
return cv2.warpPerspective(skewed_image, np.linalg.inv(homography), (base_image_shape[1], base_image_shape[0]))
def compute_points_and_descriptors(image):
"""
:param image: numpy.ndarray
:return: keypoints, descriptors
"""
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
eq_hist_gray_image = cv2.equalizeHist(gray_image)
return DETECTOR.detectAndCompute(eq_hist_gray_image, None)
def find_homography(base_keypoints, base_descriptors, skewed_image):
skewed_keypoints, skewed_descriptors = compute_points_and_descriptors(skewed_image)
matches = MATCHER.knnMatch(base_descriptors, skewed_descriptors, k=2)
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
# print(len(good))
if len(good) < MIN_MATCH_COUNT: return None
base_pts = np.float32([base_keypoints[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
skewed_pts = np.float32([skewed_keypoints[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
homography, _ = cv2.findHomography(base_pts, skewed_pts, cv2.RANSAC, 5.0)
return homography
if __name__ == "__main__":
src_contents = os.walk(SRC_FOLDER)
dirpath, _, fnames = src_contents.next()
image_dir = os.path.split(dirpath)[-1]
output_dir = os.path.join(OUT_FOLDER, image_dir)
try:
os.makedirs(output_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
print "Processing '" + image_dir + "' folder..."
image_files = sorted([os.path.join(dirpath, name) for name in fnames])
img_stack = [cv2.imread(name) for name in image_files]
base_image = img_stack[0]
base_image_shape = base_image.shape
base_keypoints, base_descriptors = compute_points_and_descriptors(base_image)
cv2.imwrite(path.join(output_dir, "output0.png"), base_image)
for ix, image in enumerate(img_stack[1:]):
homography = find_homography(base_keypoints, base_descriptors, image)
deskewed_image = deskew(base_image_shape, image, homography)
cv2.imwrite(path.join(output_dir, "output{}.png".format(ix+1)), deskewed_image)
print("Done")