我正在使用python中的一个小程序来估计单眼相机的2D图像指向手势的方向,而我正在使用OpenCV 2.3。 我知道这有点棘手,但我很有动力! :) 我的方法是使用面部检测来检测我确定皮肤很多的区域:
img = cv2.imread("/home/max/recordings/cameras/imageTEST.jpg",1)
img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hc1 = cv2.CascadeClassifier("/home/max/haarcascade_frontalface_alt.xml")
faces1 = hc1.detectMultiScale(img)
for (x,y,w,h) in faces1:
cv2.rectangle(img, (x,y), (x+w,y+h), 255)
crop_img = img[y+2:y+w, x+2:x+h]
我真的想使用这种方法,因为我希望我的检测对光变化很稳健。然后我计算检测到的面部图像的色调饱和度直方图进行反投影:
roihist = cv2.calcHist([crop_img],[0,1], None, [180, 256], [0, 180, 0, 256] )
dst = cv2.calcBackProject([img],[0,1],roihist,[0,180,0,256],1)
最后,我可以用阈值对图片进行二值化,并跟踪头部和手部斑点以估计指向的方向。 我的代码没有问题但是没有检测到皮肤...... 我究竟做错了什么? 谢谢你的帮助!
最高
答案 0 :(得分:0)
您是否尝试过使用YCbCr格式的Cr频道?当我以前使用肤色进行手部检测时,我对Cr运气不错。此外,还有this paper,它使用一种很好的方法来检测手部。但请记住,只要您使用肤色,检测就不适用于所有手,但可以针对给定用户或一群用户进行调整。
答案 1 :(得分:0)
我最近一直在研究网上可用的opencv示例(只是为了好玩的基本内容)。我已经从脸部识别(有趣的,但我喜欢的太黑的盒子)中移动到手动选择HSV空间中的roi,然后使用'camshift'来跟踪。我仍然得到我不理解的变量结果所以我还绘制了所有中间处理窗口,例如hsv图像和backproject图像,还绘制了窗口的直方图。突然之间一切都很清楚了 - 你可以确切地看到计算机正在尝试使用它。
这是我的python3.4,opencv3的工作代码。您可以手动选择皮肤。主要归功于我在网上找到的其他例子。
'cv2.calcBAckProject'功能可以很好地阈化皮肤特征。
import numpy as np
import cv2
roiPts = []
track_mode = False
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
kernel = np.ones((5, 5), np.uint8)
frame_width_in_px = 640
number_of_histogram_elements=16
def selectROI(event, x,y,flags,param):
global track_mode, roiPts
if (event == cv2.EVENT_LBUTTONDOWN) and (len(roiPts)==4): #reselecting ROI points so take out of tracking mode and empty current roipoints
roiPts=[]
track_mode = False
if (event==cv2.EVENT_LBUTTONDOWN) and (len(roiPts) < 4): #ROI point selection
roiPts.append([x, y])
cap = cv2.VideoCapture(0)
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", selectROI)
while True:
ret, frame = cap.read()
if len(roiPts)<=4 and len(roiPts)>0:
for x,y in roiPts:
cv2.circle(frame, (x,y), 4, (0, 255, 0), 1) # draw small circle for each roi click
if len(roiPts)==4 and track_mode==False: #initialize the camshift
# convert the selected points to a box shape
roiBox = np.array(roiPts, dtype=np.int32)
s = roiBox.sum(axis=1)
tl = roiBox[np.argmin(s)]
br = roiBox[np.argmax(s)]
#extract the roi from the image and calculate the histograme
roi = frame[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) #
roiHist = cv2.calcHist([roi], [0], None, [number_of_histogram_elements], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
roiBox = (tl[0], tl[1], br[0], br[1])
track_mode = True #ready for camshift
if track_mode == True: #tracking mode
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
#perfrom some noise reduction and smoothing
erosion = cv2.erode(backProj, kernel, iterations=2)
dilate = cv2.dilate(erosion, kernel, iterations=2)
(r, roiBox) = cv2.CamShift(dilate, roiBox, termination) #this takes prev roiBox and calculates the new roiBox
pts = np.int0(cv2.boxPoints(r))
cv2.polylines(frame, [pts], True, (0, 255, 0), 2) #tracking box
cv2.polylines(backProj, [pts], True, (0, 255, 0), 2) #tracking box
cv2.polylines(dilate, [pts], True, (0, 255, 0), 2) #tracking box
cv2.polylines(hsv, [pts], True, (0, 255, 0), 2) #tracking box
# plot histogram polyline across the windows
x = np.linspace(0,640,number_of_histogram_elements,dtype=np.int32)
y = roiHist.flatten().astype(np.int32, copy=False)-255 #note frame height needs to be greater than 255 which is the max histo value
y=np.absolute(y)
pts2 = np.stack((x, y), axis=1)
cv2.polylines(frame, [pts2], False, (0, 255, 0), 2)
cv2.polylines(hsv, [pts2], False, (0, 255, 0), 2)
cv2.imshow("backproject", backProj)
cv2.imshow("dilate", dilate)
cv2.imshow("hsv", hsv)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()