我一直致力于一个项目,我必须在触发模式下使用全局快门相机找到物体的x和y坐标。 直到现在一切正常,我得到了理想的结果,但问题是当我在触发模式下连续处理图像时,我得到的图像有额外的噪声读数。
源图片:
任何人都可以建议我采用一种好方法来解决这个问题。
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
image = cv2.imread('022.bmp')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
gray = cv2.GaussianBlur(gray,(7,7),0)
edged = cv2.Canny(gray,50,100)
edged = cv2.dilate(edged,None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
cnts = cv2.findContours(edged.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
(cnts, _) = contours.sort_contours(cnts)
colors = ((0,0,255),(240,0,159),(0,165,255),(255,255,0),(255,0,255))
refObj = None
for c in cnts:
if cv2.contourArea(c)<250:
#print "in if loop"
continue
#print cv2.contourArea(c)
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box,dtype="int")
box = perspective.order_points(box)
cX = np.average(box[:,0])
cY = np.average(box[:,1])
if refObj is None:
(tl,tr,br,bl) = box
(tlblX, tlblY) = midpoint(tl,bl)
(trbrX, trbrY) = midpoint(tr,br)
D = dist.euclidean((tlblX,tlblY),(trbrX,trbrY))
refObj = (box, (cX,cY),D)
print refObj[0]
print 'cx-1',cX
print 'cy-1',cY
continue
orig = image.copy()
cv2.drawContours(orig,[box.astype("int")], -1, (0,255,0), 2)
cv2.drawContours(orig, [refObj[0].astype("int")],-1,(0,255,0),2)
refCoords = np.vstack([refObj[0],refObj[1]])
objCoords = np.vstack([box, (cX,cY)])
print box
print 'cx',cX
print 'cy',cY
for ((xA,yA), (xB,yB), color) in zip(refCoords,objCoords,colors):
cv2.circle(orig, (int(xA),int(yA)),5,color,-1)
cv2.circle(orig, (int(xB),int(yB)),5,color,-1)
cv2.line(orig, (int(xA),int(yA)),(int(xB),int(yB)),color,2)
cv2.imshow('img',orig)
cv2.waitKey(0)
你能就此提出一些建议吗? 谢谢!
答案 0 :(得分:2)
我备份你的源图像,避免你替换或删除它。
我在不同的ColorSpace中分析图像,发现B(BGR)
频道适合您的任务。
然后执行以下步骤:
代码和结果。
#!/usr/bin/python3
# 2017.12.09 00:25:47 CST
# 2018.01.10 21:10:07 CST
import cv2
import numpy as np
## (1) read and extract the blue channel
img = cv2.imread("img01.png")
blue = img[...,0]
## (2) threshold the blue
th, threshed = cv2.threshold(blue, 200, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
## (3) find contours
cnts = cv2.findContours(threshed,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)[-2]
## (4) filter by AREA
canvas = img.copy()
H,W = img.shape[:2]
AREA = H*W
for cnt in cnts:
area = cv2.contourArea(cnt)
if not AREA/100<area<AREA/20:
continue
box = cv2.minAreaRect(cnt)
box = cv2.boxPoints(box)
box = np.array(box,dtype="int")
cv2.drawContours(canvas, [box], -1,(255,0,255) , 2, cv2.LINE_AA)
## save
cv2.imwrite("result.png", canvas)
类似的问题:
答案 1 :(得分:1)
我建议更好的初始预处理,在应用边缘检测之前去除噪声。一种可能的方法是应用双边滤波,它可以平滑图像,但同时保留边缘。
这是一个简单的例子:
import numpy as np
import cv2
img = cv2.imread('img01.png')
canvas = img.copy()
filtered = cv2.bilateralFilter(img, 30, 80, 80)
gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2HSV)
edged = cv2.Canny(gray,50,100)
_, cnts, _ = cv2.findContours(edged,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
H,W = img.shape[:2]
AREA = H*W
for cnt in cnts:
area = cv2.contourArea(cnt)
if not AREA/100<area<AREA/20:
continue
box = cv2.minAreaRect(cnt)
box = cv2.boxPoints(box)
box = np.array(box,dtype="int")
cv2.drawContours(canvas, [box], -1,(255,0,255) , 2, cv2.LINE_AA)
result = np.hstack((img, filtered, cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR), canvas))
cv2.imwrite("result.png", result)
cv2.imshow('Results', result)
cv2.waitKey(0)
此外,通过应用均值平移滤波可以采取更加激烈的措施,从而减少图像中的颜色数量。只需将双边滤波器替换为:
filtered = cv2.pyrMeanShiftFiltering(img, 20, 50)