我目前正在研究一个确定轮廓区域的 opencv 项目。
我试图制定一个适当的阈值并对其应用 cv2.findContours
。
但是,我发现当轮廓与图像的边界相交时,使用 cv2.contourArea
会大大低估该区域。
如何解决这个问题?
(请检查附加的图像,它在示例图像上显示了错误的结果)
import cv2
import numpy as np
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
Input = cv2.imread("Thresh.png")
Thresh = cv2.cvtColor(Input,cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(Thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minArea = 0
for cnt in contours:
area = cv2.contourArea(cnt)
print(area)
if (area > minArea) :
x,y,w,h = cv2.boundingRect(cnt)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
Input = cv2.drawContours(Input,[box],0,(255,0,0),2)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
cv2.putText(Input, "{:1f}".format(area),
(int(tltrX), int(tltrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 255, 0), 2)
cv2.imshow('Input',Input)
cv2.waitKey(0)
cv2.destroyAllWindows()
Input threshold image Determined contours (bounding rectangle) with written contour areas on it