OpenCV错误:参数'%s'的预期Ptr <cv :: UMat>

时间:2019-10-19 17:51:10

标签: python opencv

我是OpenCV的新手,并尝试使用适用于Python 3.7的OpenCV将五个图像拼接在一起成为一个奇异图像。我最初使用的源代码只允许将两个图像缝合在一起,因此我不得不对其进行修改以允许将五个网络摄像头图像缝合在一起。但是,出现此错误:

  

Traceback (most recent call last): File "C:\Users\colby\OneDrive\Desktop\New folder\photomosaic.py", line 80, in <module> img3 = cv2.drawMatches(kpsA,secondImg,kpsB,thirdImg, kpsC, forthImg, kpsD, fifthImg, kpsE, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) TypeError: Expected Ptr<cv::UMat> for argument '%s'

我的代码:

  import cv2
   import numpy as np
   import matplotlib.pyplot as plt
   import imageio
   import imutils
   import cameracapture
   cv2.ocl.setUseOpenCL(False)
   feature_extractor = 'orb' # one of 'sift', 'surf', 'brisk', 'orb'
   feature_matching = 'bf'
   firstImg = imageio.imread('cap1.jpg')
   firstImg_gray = cv2.cvtColor(firstImg, cv2.COLOR_RGB2GRAY)
   secondImg = imageio.imread('cap2.jpg')
   secondImg_gray = cv2.cvtColor(secondImg, cv2.COLOR_RGB2GRAY)
   thirdImg = imageio.imread('cap3.jpg')
   thirdImg_gray = cv2.cvtColor(thirdImg, cv2.COLOR_RGB2GRAY)
   forthImg = imageio.imread('cap4.jpg')
   forthImg_gray = cv2.cvtColor(forthImg, cv2.COLOR_RGB2GRAY)
   fifthImg = imageio.imread('cap5.jpg')
   fifthImg_gray = cv2.cvtColor(fifthImg, cv2.COLOR_RGB2GRAY)

   plt.show()
   def detectAndDescribe(image, method=None):   
       assert method is not None, "You need to define a feature detection 
method. Values are: 'sift', 'surf'"

    if method == 'sift':
        descriptor = cv2.xfeatures2d.SIFT_create()
    elif method == 'surf':
        descriptor = cv2.xfeatures2d.SURF_create()
    elif method == 'brisk':
       descriptor = cv2.BRISK_create()
    elif method == 'orb':
       descriptor = cv2.ORB_create()

    # get keypoints and descriptors
    (kps, features) = descriptor.detectAndCompute(image, None)    
    return (kps, features)
kpsA, featuresA = detectAndDescribe(firstImg_gray, method=feature_extractor)
kpsB, featuresB = detectAndDescribe(secondImg_gray, method=feature_extractor)
kpsC, featuresC = detectAndDescribe(thirdImg_gray, method=feature_extractor)
kpsD, featuresD = detectAndDescribe(forthImg_gray, method=feature_extractor)
kpsE, featuresE = detectAndDescribe(fifthImg_gray, method=feature_extractor) 
def createMatcher(method,crossCheck):
     if method == 'sift' or method == 'surf':
     bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
 elif method == 'orb' or method == 'brisk':
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
 return bf
 def matchKeyPointsBF(featuresA, featuresB, featuresC, featuresD, featuresE, method):
    bf = createMatcher(method, crossCheck=True)

    # Match descriptors.
    best_matches = bf.match(featuresA,featuresB)
    best_matchs = bf.match(featuresA,featuresB + featuresC + featuresD + featuresE)

    # Sort the features in order of distance.
    # The points with small distance (more similarity) are ordered first in the vector
    rawMatches = sorted(best_matches, key = lambda x:x.distance)
    print("Raw matches (Brute force):", len(rawMatches))
    return rawMatches
def matchKeyPointsKNN(featuresA, featuresB, featuresC, featuresD, featuresE, ratio, method):
    bf = createMatcher(method, crossCheck=False)
# compute the raw matches and initialize the list of actual matches
    rawMatches = bf.knnMatch(featuresA,featuresB, featuresC, featuresD, featuresE, 2)
    print("Raw matches (knn):", len(rawMatches))
    matches = []

    # loop over the raw matches
    for m,n in rawMatches:
        # ensure the distance is within a certain ratio of each
        # other (i.e. Lowe's ratio test)
        if m.distance < n.distance * ratio:
            matches.append(m)
    return matches
print("Using: {} feature matcher".format(feature_matching))

fig = plt.figure(figsize=(20,8))

if feature_matching == 'bf':
    matches = matchKeyPointsBF(featuresA,featuresB, featuresC, featuresD, 
featuresE, method=feature_extractor)
    img3 = cv2.drawMatches(kpsA,secondImg,kpsB,thirdImg, kpsC, forthImg, 
kpsD, fifthImg, kpsE, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
elif feature_matching == 'knn':
    matches = matchKeyPointsKNN(featuresA,featuresB, featuresC, featuresD, 
featuresE, ratio=0.75, method=feature_extractor)
    img3 = cv2.drawMatches(firstImg,kpsA,secondImg,kpsB,thirdImg, kpsC, 
forthImg, kpsD, fifthImg, kpsE, np.random.choice(matches,100),

None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)


plt.imshow(img3)
plt.show()
  def getHomography(kpsA, kpsB, kpsC, kpsD, kpsE, featuresA, featuresB, 
  featuresC, featuresD, featuresE, matches, reprojThresh):
        # convert the keypoints to numpy arrays
    kpsA = np.float32([kp.pt for kp in kpsA])
    kpsB = np.float32([kp.pt for kp in kpsB])
    kpsC = np.float32([kp.pt for kp in kpsC])
    kpsD = np.float32([kp.pt for kp in kpsD])
    kpsE = np.float32([kp.pt for kp in kpsE])

    if len(matches) > 4:

    # construct the two sets of points
    ptsA = np.float32([kpsA[m.firstIdx] for m in matches])
    ptsB = np.float32([kpsB[m.secondIdx] for m in matches])
    ptsC = np.float32([kpsC[m.thirdIdx] for m in matches])
    ptsD = np.float32([kpsD[m.forthIdx] for m in matches])
    ptsE = np.float32([kpsE[m.fifthIdx] for m in matches])
        # estimate the homography between the sets of points
        (H, status) = cv2.findHomography(ptsA,ptsB,ptsC,ptsD,ptsE, cv2.RANSAC, reprojThresh)

        return (matches, H, status)
    else:
        return None
M = getHomography(kpsA, kpsB, kpsC, kpsD, kpsE, featuresA, featuresB, 
featuresC, featuresD, featuresE, matches, reprojThresh=4)
if M is None:
    print("Error!")
(matches, H, status) = M
print(H)
# Apply panorama correction
width = trainImg.shape[1] + queryImg.shape[1]
height = trainImg.shape[0] + queryImg.shape[0]

result = cv2.warpPerspective(trainImg, H, (width, height))
result[0:queryImg.shape[0], 0:queryImg.shape[1]] = queryImg

plt.figure(figsize=(20,10))
plt.imshow(result)

plt.axis('off')
plt.show()
# transform the panorama image to grayscale and threshold it 
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]

# Finds contours from the binary image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, 
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# get the maximum contour area
c = max(cnts, key=cv2.contourArea)

# get a bbox from the contour area
(x, y, w, h) = cv2.boundingRect(c)

# crop the image to the bbox coordinates
result = result[y:y + h, x:x + w]

# show the cropped image
plt.figure(figsize=(20,10))
plt.imshow(result)

预期输出:

  

我希望该程序能够使用我的网络摄像头(通过 cameracapture 导入)来拍摄五张照片,然后将它们缝合在一起以制作一张图像。任何建议或修复此错误将不胜感激。

1 个答案:

答案 0 :(得分:0)

 firstImg = imageio.imread('cap1.jpg')
 print(type(firstImg))

如果输出为

<class 'PIL.JpegImagePlugin.JpegImageFile'>

然后使用以下命令将其隐藏到数组中:-

firstImg = np.asarray(firstImg)

加载此firstImg将其转换为灰度图像