python opencv cv2 matchTemplate具有透明度

时间:2016-02-26 02:48:45

标签: python opencv opencv3.0 matchtemplate

OpenCV 3.0.0增加了在执行templateMatch时指定掩码的功能。当我指定掩码时,我收到此错误:# read the template emoji with the alpha channel template = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED) channels = cv2.split(template) zero_channel = np.zeros_like(channels[0]) mask = np.array(channels[3]) # all elements in alpha_channel that have value 0 are set to 1 in the mask matrix mask[channels[3] == 0] = 1 # all elements in alpha_channel that have value 100 are set to 0 in the mask matrix mask[channels[3] == 100] = 0 transparent_mask = cv2.merge([zero_channel, zero_channel, zero_channel, mask]) print image.shape, image.dtype # (72, 232, 3) uint8 print template.shape, template.dtype # (40, 40, 4) uint8 print transparent_mask.shape, transparent_mask.dtype # (40, 40, 4) uint8 # find the matches res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED, mask=transparent_mask)

模板图片(带透明度的PNG):

enter image description here

来源图片:

enter image description here

代码

     sudo vim /root/.asoundrc 

图像类型有问题吗?我无法使用matchTemplate方法的新mask参数找到任何示例(在Python中)。有谁知道如何创建面具?

4 个答案:

答案 0 :(得分:4)

我能够使用Python 2.7.13和opencv-python==3.1.0.4

来实现这一点

以下是代码。

import cv2
import numpy as np
import sys

if len(sys.argv) < 3:
    print 'Usage: python match.py <template.png> <image.png>'
    sys.exit()

template_path = sys.argv[1]
template = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
channels = cv2.split(template)
zero_channel = np.zeros_like(channels[0])
mask = np.array(channels[3])

image_path = sys.argv[2]
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)

mask[channels[3] == 0] = 1
mask[channels[3] == 100] = 0

# transparent_mask = None
# According to http://www.devsplanet.com/question/35658323, we can only use
# cv2.TM_SQDIFF or cv2.TM_CCORR_NORMED
# All methods can be seen here:
# http://docs.opencv.org/2.4/doc/tutorials/imgproc/histograms/template_matching/template_matching.html#which-are-the-matching-methods-available-in-opencv
method = cv2.TM_SQDIFF  # R(x,y) = \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2 (essentially, sum of squared differences)

transparent_mask = cv2.merge([zero_channel, zero_channel, zero_channel, mask])
result = cv2.matchTemplate(image, template, method, mask=transparent_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print 'Lowest squared difference WITH mask', min_val

# Now we'll try it without the mask (should give a much larger error)
transparent_mask = None
result = cv2.matchTemplate(image, template, method, mask=transparent_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print 'Lowest squared difference WITHOUT mask', min_val

Here这是一个要点。

基本上,您需要确保使用正确的匹配方法。

答案 1 :(得分:1)

我的环境使用的是opencv 3.1.0和python 2.7.11。

以下是在模板使用透明度(Alpha通道)的另一个图像中查找图像的代码。我希望这可以帮到你。

def getMultiFullInfo(all_matches,w,h):
    #This function will rearrange the data and calculate the tuple
    #   for the square and the center and the tolerance for each point
    result = []
    for match in all_matches:
        tlx = match[0]
        tly = match[1]
        top_left = (tlx,tly)
        brx = match[0] + w
        bry = match[1] + h 
        bottom_right = (brx,bry)     
        centerx = match[0] + w/2
        centery = match[1] + h/2
        center = [centerx,centery]
        result.append({'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':match[2]})
    return result

def getMulti(res, tolerance,w,h):
    #We get an opencv image in the form of a numpy array and we need to
    #   find all the occurances in there knowing that 2 squares cannot intersect
    #This will give us exactly the matches that are unique

    #First we need to get all the points where value is >= tolerance
    #This wil get sometimes some squares that vary only from some pixels and that are overlapping
    all_matches_full = np.where (res >= tolerance)
    logging.debug('*************Start of getMulti function')
    logging.debug('All >= tolerance')
    logging.debug(all_matches_full)
    #Now we need to arrange it in x,y coordinates
    all_matches_coords = []
    for pt in zip(*all_matches_full[::-1]):
        all_matches_coords.append([pt[0],pt[1],res[pt[1]][pt[0]]])
    logging.debug('In coords form')
    logging.debug(all_matches_coords)
    #Let's sort the new array
    all_matches_coords = sorted(all_matches_coords)
    logging.debug('Sorted')
    logging.debug(all_matches_coords)

    #This function will be called only when there is at least one match so if matchtemplate returns something
    #This means we have found at least one record so we can prepare the analysis and loop through each records 
    all_matches = [[all_matches_coords[0][0],all_matches_coords[0][1],all_matches_coords[0][2]]]
    i=1
    for pt in all_matches_coords:
        found_in_existing = False
        logging.debug('%s)',i)
        for match in all_matches:
            logging.debug(match)
            #This is the test to make sure that the square we analyse doesn't overlap with one of the squares already found
            if pt[0] >= (match[0]-w) and pt[0] <= (match[0]+w) and pt[1] >= (match[1]-h) and pt[1] <= (match[1]+h):
                found_in_existing = True
                if pt[2] > match[2]:
                    match[0] = pt[0]
                    match[1] = pt[1]
                    match[2] = res[pt[1]][pt[0]]
        if not found_in_existing:
            all_matches.append([pt[0],pt[1],res[pt[1]][pt[0]]])
        i += 1
    logging.debug('Final')
    logging.debug(all_matches)
    logging.debug('Final with all info')
    #Before returning the result, we will arrange it with data easily accessible
    all_matches = getMultiFullInfo(all_matches,w,h)
    logging.debug(all_matches)   
    logging.debug('*************End of getMulti function')
    return all_matches

def checkPicture(screenshot,templateFile, tolerance, multiple = False):
    #This is an intermediary function so that the actual function doesn't include too much specific arguments
    #We open the config file

    configFile = 'test.cfg'

    config = SafeConfigParser()

    config.read(configFile)
    basepics_dir = config.get('general', 'basepics_dir')

    debug_dir = config.get('general', 'debug_dir')

    font = cv2.FONT_HERSHEY_PLAIN

    #The value -1 means we keep the file as is meaning with color and alpha channel if any
    #   btw, 0 means grayscale and 1 is color
    template = cv2.imread(basepics_dir+templateFile,-1)

    #Now we search in the picture
    result = findPicture(screenshot,template, tolerance, multiple)
    #If it didn't get any result, we log the best value

    if not result['res']:
        logging.debug('Best value found for %s is: %f',templateFile,result['best_val'])  

    elif logging.getLogger().getEffectiveLevel() == 10:
        screenshot_with_rectangle = screenshot.copy()
        for pt in result['points']:
            cv2.rectangle(screenshot_with_rectangle, pt['top_left'], pt['bottom_right'], 255, 2)
            fileName_top_left = (pt['top_left'][0],pt['top_left'][1]-10)
            cv2.putText(screenshot_with_rectangle,str(pt['tolerance'])[:4],fileName_top_left, font, 1,(255,255,255),2)
            #Now we save to the file if needed
            filename = time.strftime("%Y%m%d-%H%M%S") + '_' + templateFile[:-4] + '.jpg'
            cv2.imwrite(debug_dir + filename, screenshot_with_rectangle)

    result['name']=templateFile

    return result

def extractAlpha(img, hardedge = True):
    if img.shape[2]>3:
        logging.debug('Mask detected')
        channels = cv2.split(img)

        mask = np.array(channels[3])
        if hardedge:
            for idx in xrange(len(mask[0])):
                if mask[0][idx] <=128:
                    mask[0][idx] = 0
                else:
                    mask[0][idx] = 255


        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)


        return {'res':True,'image':img,'mask':mask}
    else:
        return {'res':False,'image':img}


def findPicture(screenshot,template, tolerance, multiple = False):

    #This function will work with color images 3 channels minimum
    #The template can have an alpha channel and we will extract it to have the mask


    logging.debug('Looking for %s' , template)

    logging.debug('Tolerance to check is %f' , tolerance)


    logging.debug('*************Start of checkPicture')


    h = template.shape[0]
    w = template.shape[1]

    #We will now extract the alpha channel
    tmpl = extractAlpha(template)

    logging.debug('Image width: %d - Image heigth: %d',w,h)


    # the method used for comparison, can be ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

    meth = 'cv2.TM_CCORR_NORMED'

    method = eval(meth)



    # Apply template Matching
    if tmpl['res']:

        res = cv2.matchTemplate(screenshot,tmpl['image'],method, mask = tmpl['mask'])
    else:
        res = cv2.matchTemplate(screenshot,tmpl['image'],method)


    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)



    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum

    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:

        top_left = min_loc

        best_val = 1 - min_val

    else:

        top_left = max_loc

        best_val = max_val
    #We need to ensure we found at least one match otherwise we return false
    if best_val >= tolerance:

        if multiple:
            #We need to find all the time the image is found
            all_matches = getMulti(res, float(tolerance),int(w),int(h))
        else:
            bottom_right = (top_left[0] + w, top_left[1] + h)

            center = (top_left[0] + (w/2), top_left[1] + (h/2))
            all_matches = [{'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':best_val}]

        #point will be in the form: [{'tolerance': 0.9889718890190125, 'center': (470, 193), 'bottom_right': (597, 215), 'top_left': (343, 172)}]
        logging.debug('The points found will be:')
        logging.debug(all_matches)
        logging.debug('*************End of checkPicture')

        return {'res': True,'points':all_matches}

    else:
        logging.debug('Could not find a value above tolerance')
        logging.debug('*************End of checkPicture')

        return {'res': False,'best_val':best_val}

答案 2 :(得分:1)

我现在已经在 Python 3.9 中运行了整个程序,我已经在这里创建了一个新问题,它使用 opencv-python 明确引用了 Python 3.x: Template Matching with Python 3.9.1, opencv.python 4.5.1.48 and mask (transparency)。 我从这个问题中提取的图形,因为我也私下使用这些图形进行测试。

答案 3 :(得分:0)

在OpenCV 4.2.0中,前两个建议代码为我导致以下错误:

cv2.error:OpenCV(4.2.0)C:\ projects \ opencv-python \ opencv \ modules \ imgproc \ src \ templmatch.cpp:766:错误:(-215:断言失败)(深度== CV_8U ||深度== CV_32F)&&类型== _templ.type()&& _img.dims()<= 2在函数'cv :: matchTemplateMask'

在此期间,事情似乎变得容易得多。这是我尝试最大程度减少的Python代码。文件“ crowncap_85x85_mask.png”是黑白图像。匹配过程中将忽略蒙版中所有黑色像素。

使用掩码时,仅支持匹配方法TM_SQDIFF和TM_CCORR_NORMED。

import cv2 as cv
    
img = cv.imread("fridge_zoomed.png", cv.IMREAD_COLOR)
templ = cv.imread("crowncap_85x85.png", cv.IMREAD_COLOR)
mask = cv.imread( "crowncap_85x85_mask.png", cv.IMREAD_COLOR )

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)
cv.imshow("Matching with mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation WITH mask', max_val)

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)
cv.imshow("Matching without mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation without mask', max_val)

while True:
    if cv.waitKey(10) == 27:
        break

cv.destroyAllWindows()

如果要从模板的Alpha通道生成遮罩,则可以执行以下操作:

import cv2 as cv
import numpy as np

img = cv.imread("fridge_zoomed.png", cv.IMREAD_COLOR)
templ = cv.imread("crowncap_85x85_transp.png", cv.IMREAD_COLOR)
templ_incl_alpha_ch = cv.imread("crowncap_85x85_transp.png", cv.IMREAD_UNCHANGED)

channels = cv.split(templ_incl_alpha_ch)
#extract "transparency" channel from image
alpha_channel = np.array(channels[3]) 
#generate mask image, all black dots will be ignored during matching
mask = cv.merge([alpha_channel,alpha_channel,alpha_channel])
cv.imshow("Mask", mask)

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)
cv.imshow("Matching with mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation WITH mask', max_val)

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)
cv.imshow("Matching without mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation without mask', max_val)

while True:
    if cv.waitKey(10) == 27:
        break

cv.destroyAllWindows()