如何在图像中检测游戏板

时间:2019-07-24 14:22:43

标签: python image opencv image-processing computer-vision

我需要找到一个go游戏板并使用python上的opencv2在照片上检测芯片,但是现在我在板检测方面遇到了问题,相同轮廓中有奇怪的点,我不明白如何删除它们。那就是我现在所拥有的:

from skimage import exposure
import numpy as np
import argparse
import imutils
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-r", required = True,
    help = "ratio", type=int, default = 800)
args = vars(ap.parse_args())

img = cv2.imread('3.jpg') #upload image and change resolution
ratio = img.shape[0] / args["r"]
orig = img.copy()
img = imutils.resize(img, height = args["r"])

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)

cnts= cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #search contours and sorting them
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None

for cnt in cnts:

    rect = cv2.minAreaRect(cnt) # try to fit each contour in rectangle
    box = cv2.boxPoints(rect)
    box = np.int0(box)

    area = int(rect[1][0]*rect[1][1]) # calculating contour area

    if (area > 300000):
        print(area)
        cv2.drawContours(img, cnt, -1, (255, 0, 0), 4) #dots in contour
        hull = cv2.convexHull(cnt) # calculating convex hull
        cv2.drawContours(img, [hull], -1, (0, 0, 255), 3)

cv2.imshow("death", img)
cv2.waitKey(0)

来源

source image

结果

result image

2 个答案:

答案 0 :(得分:5)

这是一种检测棋盘的方法

  • 使用双边滤镜将图像转换为灰度并模糊
  • 获取二进制图像的阈值
  • 找到轮廓
  • 使用轮廓区域和轮廓形状近似进行过滤
  • 执行透视变换以提取板的投资回报率

阈值

找到轮廓,然后使用cv2.contourArea()和最小阈值区域进行过滤。另外,使用轮廓近似作为cv2.approxPolyDP()的第二个滤波器。本质上,如果轮廓具有四个顶点,则它必须是正方形或矩形(板)。

我们还可以提取电路板的边框并将其放在蒙版上

最后,如果我们想获得电路板的俯视图,则可以执行透视变换

import cv2
import numpy as np

def perspective_transform(image, corners):
    def order_corner_points(corners):
        # Separate corners into individual points
        # Index 0 - top-right
        #       1 - top-left
        #       2 - bottom-left
        #       3 - bottom-right
        corners = [(corner[0][0], corner[0][1]) for corner in corners]
        top_r, top_l, bottom_l, bottom_r = corners[0], corners[1], corners[2], corners[3]
        return (top_l, top_r, bottom_r, bottom_l)

    # Order points in clockwise order
    ordered_corners = order_corner_points(corners)
    top_l, top_r, bottom_r, bottom_l = ordered_corners

    # Determine width of new image which is the max distance between 
    # (bottom right and bottom left) or (top right and top left) x-coordinates
    width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
    width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
    width = max(int(width_A), int(width_B))

    # Determine height of new image which is the max distance between 
    # (top right and bottom right) or (top left and bottom left) y-coordinates
    height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
    height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
    height = max(int(height_A), int(height_B))

    # Construct new points to obtain top-down view of image in 
    # top_r, top_l, bottom_l, bottom_r order
    dimensions = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], 
                    [0, height - 1]], dtype = "float32")

    # Convert to Numpy format
    ordered_corners = np.array(ordered_corners, dtype="float32")

    # Find perspective transform matrix
    matrix = cv2.getPerspectiveTransform(ordered_corners, dimensions)

    # Return the transformed image
    return cv2.warpPerspective(image, matrix, (width, height))

image = cv2.imread('1.jpg')
original = image.copy()
blur = cv2.bilateralFilter(image,9,75,75)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray,40,255, cv2.THRESH_BINARY_INV)[1]

cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]

mask = np.zeros(image.shape, dtype=np.uint8)
for c in cnts:
    area = cv2.contourArea(c)
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.015 * peri, True)

    if area > 150000 and len(approx) == 4:
        cv2.drawContours(image,[c], 0, (36,255,12), 3)
        cv2.drawContours(mask,[c], 0, (255,255,255), -1)
        transformed = perspective_transform(original, approx)

mask = cv2.bitwise_and(mask, original)

cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('mask', mask)
cv2.imshow('transformed', transformed)
cv2.waitKey()

答案 1 :(得分:2)

我还从事棋盘检测的类似任务。我使用了两种不同的方法。第一个类似于nathancy的答案,所以我认为我不需要张贴那个,第二个是基于模板的方法(我使用了SIFT)。这是一个示例:

模板图片: enter image description here

查询图片示例: enter image description here

结果: enter image description here

代码:

import numpy as np
import cv2
from matplotlib import pyplot as plt
import os


MIN_MATCH_COUNT = 5


template_image = cv2.imread('go_board_template.png')
template_image_gray = cv2.cvtColor(template_image, cv2.COLOR_BGR2GRAY)


# Initiate SIFT detector
#sift = cv2.SIFT()
sift = cv2.xfeatures2d.SIFT_create()


# find the keypoints and descriptors with SIFT in template image
kp_template, des_template = sift.detectAndCompute(template_image_gray, None)


FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)

flann = cv2.FlannBasedMatcher(index_params, search_params)


img = cv2.imread("1.jpg")  #  use second parameter 0 for auto gray conversion?


#  convert image to gray
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#  find the keypoints and descriptors with SIFT in query image
kp_img, des_img = sift.detectAndCompute(img, None)

#  get image dimension info
img_height, img_width = img_gray.shape
print("Image height:{}, image width:{}".format(img_height, img_width))


matches = flann.knnMatch(des_template,des_img,k=2)


# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
    if m.distance < 0.7*n.distance:
        good.append(m)


if len(good)>MIN_MATCH_COUNT:
    src_pts = np.float32([ kp_template[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp_img[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
    matchesMask = mask.ravel().tolist()

    h,w = template_image_gray.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,M)

    img_board = img.copy()
    cv2.polylines(img_board,[np.int32(dst)],True,255,10, cv2.LINE_AA)
    """
    draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                   singlePointColor = None,
                   matchesMask = matchesMask, # draw only inliers
                   flags = 2)

    img3 = cv2.drawMatches(template_image,kp_template,img,kp_img,good,None,**draw_params)
    """
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.show()






    #  get axis aligned bounding box for chessboard in input image
    x,y,w,h = cv2.boundingRect(dst)
    img_crop = img.copy()
    cv2.rectangle(img_crop,(x,y),(x+w,y+h),(0,0,255),5)


    #  draw OBB and AABB
    fig = plt.figure()
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)
    ax1.axis("off")
    ax2.axis("off")
    ax1.set_title('OBB')
    ax2.set_title('AABB')
    ax1.imshow(cv2.cvtColor(img_board, cv2.COLOR_BGR2RGB))
    ax2.imshow(cv2.cvtColor(img_crop, cv2.COLOR_BGR2RGB))
    plt.show()


    #  crop board
    cropped_img = img[y:y+h, x:x+w].copy()
    plt.imshow(cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB))
    plt.show()

    #  convert cropped area to gray
    cropped_img_gray = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2GRAY)
    plt.imshow(cropped_img_gray, cmap="gray")
    plt.show()

else:
    print("Not enough match")