我有一幅图像,需要在该行中检测一个X
符号。
图片:
如上图所示,一行中有一个X
符号。我想知道该符号的X和Y坐标。有没有办法在这张图片中找到这个符号,或者它很小?
import cv2
import numpy as np
def calculateCenterSpot(results):
startX, endX = results[0][0], results[0][2]
startY, endY = results[0][1], results[0][3]
centerSpotX = (endX - startX) / 2 + startX
centerSpotY = (endY - startY) / 2 + startY
return [centerSpotX, centerSpotY]
img = cv2.imread('crop_1.png')
res2 = img.copy()
cords = [[1278, 704, 1760, 1090]]
center = calculateCenterSpot(cords)
cv2.circle(img, (int(center[0]), int(center[1])), 1, (0,0,255), 30)
cv2.line(img, (int(center[0]), 0), (int(center[0]), img.shape[0]), (0,255,0), 10)
cv2.line(img, (0, int(center[1])), (img.shape[1], int(center[1])), (255,0,0), 10)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# You can either use threshold or Canny edge for HoughLines().
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# Perform HoughLines tranform.
lines = cv2.HoughLines(thresh,0.5,np.pi/180,1000)
for line in lines:
for rho,theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 5000*(-b))
y1 = int(y0 + 5000*(a))
x2 = int(x0 - 5000*(-b))
y2 = int(y0 - 5000*(a))
if x2 == int(center[0]):
cv2.circle(img, (x2,y1), 100, (0,0,255), 30)
if y2 == int(center[1]):
print('hell2o')
# cv2.line(res2,(x1,y1),(x2,y2),(0,0,255),2)
#Display the result.
cv2.imwrite('h_res1.png', img)
cv2.imwrite('h_res3.png', res2)
cv2.imwrite('image.png', img)
我已经尝试使用HoughLines
来做到这一点,但这并不成功。
答案 0 :(得分:2)
替代使用template matching,而不使用cv2.HoughLines()
。这个想法是在更大的图像中搜索并找到模板图像的位置。为了执行此方法,模板在输入图像上滑动(类似于2D卷积),在此执行比较方法以确定像素相似度。这是模板匹配的基本思想。不幸的是,这种基本方法有缺陷,因为它仅在模板图像大小与要在输入图像中找到的所需项目相同的情况下起作用。因此,如果模板图像小于在输入图像中找到的所需区域,则此方法将不起作用。
要解决此限制,我们可以使用np.linspace()
动态地重新缩放图像以更好地进行模板匹配。每次迭代时,我们都会调整输入图像的大小并跟踪比率。我们继续调整大小,直到模板图像的大小大于调整大小的图像,同时跟踪最高的相关值。相关值越高,匹配越好。遍历各种尺度后,我们找到匹配度最大的比率,然后计算边界框的坐标以确定ROI。
使用此屏幕截图模板图片
这是结果
import cv2
import numpy as np
# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# Grab the image size and initialize dimensions
dim = None
(h, w) = image.shape[:2]
# Return original image if no need to resize
if width is None and height is None:
return image
# We are resizing height if width is none
if width is None:
# Calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
# We are resizing width if height is none
else:
# Calculate the ratio of the 0idth and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
# Return the resized image
return cv2.resize(image, dim, interpolation=inter)
# Load template, convert to grayscale, perform canny edge detection
template = cv2.imread('template.png')
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("template", template)
# Load original image, convert to grayscale
original_image = cv2.imread('1.png')
gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
found = None
# Dynamically rescale image for better template matching
for scale in np.linspace(0.1, 3.0, 20)[::-1]:
# Resize image to scale and keep track of ratio
resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# Stop if template image size is larger than resized image
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# Detect edges in resized image and apply template matching
canny = cv2.Canny(resized, 50, 200)
detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)
(_, max_val, _, max_loc) = cv2.minMaxLoc(detected)
# Uncomment this section for visualization
'''
clone = np.dstack([canny, canny, canny])
cv2.rectangle(clone, (max_loc[0], max_loc[1]), (max_loc[0] + tW, max_loc[1] + tH), (0,255,0), 2)
cv2.imshow('visualize', clone)
cv2.waitKey(0)
'''
# Keep track of correlation value
# Higher correlation means better match
if found is None or max_val > found[0]:
found = (max_val, max_loc, r)
# Compute coordinates of bounding box
(_, max_loc, r) = found
(start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))
(end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))
# Draw bounding box on ROI
cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0,255,0), 2)
cv2.imshow('detected', original_image)
cv2.imwrite('detected.png', original_image)
cv2.waitKey(0)
答案 1 :(得分:1)
对于多个模板图像,您可以使用for循环处理您拥有的不同模板图像的数量,然后使用阈值来找到多个模板匹配项。
for i in range(templateAmount):
template = cv2.imread('template{}.png'.format(i),0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
答案 2 :(得分:0)
如果有多个图像需要检测此X
符号,并且如果此X
符号始终相同且尺寸相同,则可以运行二维{{3 }}在每个图像上,您要卷积的convolution是您要检测的X
符号,孤立的符号。然后,您可以检查最大强度像素的二维卷积的输出,该像素的归一化坐标(x/w,y/h)
与输入图像中X
符号的归一化坐标的概率很高。这是二维卷积的数学表达式:
kernel 在opencv中,您可以定义(确保仅将十字形保留在背景中,而别无其他),然后将其应用于图像。