我需要一些项目的帮助。它涉及在确定的绿色区域中检测红色物体。我必须躲避物体并到达目标(在这种情况下是一个蓝色区域),然后回来用伺服电机和夹子收集物体,所有这些都使用带有OpenCv和Irobot的相机。
到目前为止,我所拥有的代码可以识别红色对象,并移动到它们并在它们关闭时停止。也可以向左和向右旋转,我这样做是为了尝试将物体置于相机中心,将屏幕分成3个部分,然后我用伺服器选择它。但是我没有想法,当我运行de python代码时,过程非常缓慢并且左侧和右侧的检测非常敏感,我无法将对象集中在中心。此外,我不知道如何获得物体和机器人之间的距离,理论上我可以躲避物体,但不知道如何到达目标,或者如何回到它们。
这是python代码:
import cv2
import numpy as np
import serial
from time import sleep
def serialCom(int):
ser = serial.Serial(port = "/dev/ttyACM0", baudrate=9600)
x = ser.readline()
ser.write(int)
x = ser.readline()
print 'Sending: ', x
# Recognizes how much red is in a given area by the parameters
def areaRed(img, xI, xF, yI, yF):
# counter red pixels
c = 0
red = 255
for i in range(xI, xF):
for j in range(yI, yF):
if img[i][j] == red:
c += 1
return c
def reconoce(lower_re, upper_re, lower_gree, upper_gree, lower_ble, upper_ble):
cap = cv2.VideoCapture(1)
while(1):
_, frame = cap.read()
# Converting BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv2 = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
hsv3 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to get only red,blue and green colors
mask = cv2.inRange(hsv, lower_re, upper_re)
maskBlue = cv2.inRange(hsv2, lower_ble, upper_ble)
maskGreen = cv2.inRange(hsv3, lower_gree, upper_gree)
moments = cv2.moments(mask)
area = moments['m00']
blueMoment = cv2.moments(maskBlue)
blueArea = blueMoment['m00']
greenMoment = cv2.moments(maskGreen)
greenArea = greenMoment['m00']
# Determine the limits of the mask
x = mask.shape[0] - 1
y = mask.shape[1] - 1
# Determine the center point of the image
xC = x / 2
yC = y / 2
x3 = (x/3)/2
y3 = y/2
# Define the variables for the center values of the camera
xI, xF, yI, yF = xC - (xC / 4), xC + (xC / 4), yC - (yC / 4), yC + (yC / 4)
# define the ranges of the mask for the left and right side
right = areaRed(mask, xI + (x/4), xF + (x/4), yI + (x/4), yF + (x/4))
left = areaRed(mask, xI - (x/4), xF - (x/4), yI - (x/4), yF - (x/4))
centro = areaRed(mask, xI, xF, yI, yF)
if right > 700:
serialCom ("4")
print "turning right"
return mask
if left > 700:
serialCom("5")
print "turning left"
return mask
#if there is a red objet
if centro > 5000:
print 'i detect red'
#and in on the a green area
if greenArea > 10000000:
#we stop the irbot
serialCom("1")
print 'I found red and is in the area'
cv2.destroyAllWindows()
return mask
else:
#then we continue moving
serialCom("3")
print ''
else:
print "there is not red objet:v"
cv2.imshow('frame',frame)
cv2.imshow('red',mask)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
# The range of colors are defined for the HSV
lower_red = np.array([170,150,50])
upper_red = np.array([179,255,255])
lower_green = np.array([85,80,150])
upper_green = np.array([95,255,255])
lower_blue = np.array([100,100,100])
upper_blue = np.array([120,255,255])
while True:
img = reconoce(lower_red, upper_red, lower_green, upper_green, lower_blue, upper_blue)
cv2.imshow('Freeze image', img)
cv2.waitKey(10000)
cv2.destroyAllWindows()
这是使用opencv
连接irobot的arduino代码#include <Roomba.h>
Roomba roomba(&Serial1);
int p=0;
void init()
{
roomba.driveDirect(-100,100);
roomba.driveDirect(-100,100);
delay(100);
roomba.driveDirect(100,-100);
delay(100);
}
void move()
{
roomba.driveDirect(50,50);
roomba.driveDirect(50,50);
}
void reverse()
{
roomba.driveDirect(-50,-50);
}
void left_rotation()
{
roomba.driveDirect(-30,30);
delay(1000);
}
void right_rotation()
{
roomba.driveDirect(30,-30);
delay(1000);
}
void stop()
{
roomba.driveDirect(0,0);
delay(500);
Serial.println("9");
}
void setup()
{
Serial.begin(9600);
roomba.start();
roomba.safeMode();
}
void loop()
{
if(Serial.available()>0)
{
p=Serial.read();
if(p == 51)
{
Serial.println("1");
move();
}
if(p==50)
{
Serial.println("2");
reverse();
delay(1000);
}
if(p==52)
{
Serial.println("4");
left_rotation();
delay(1000);
stop();
}
if(p==53)
{
Serial.println("5");
right_rotation();
delay(1000);
stop();
}
if(p==49)
{
Serial.println("3");
stop();
delay(1000);
}
}
delay(100);
}
答案 0 :(得分:0)
是的,您的代码非常慢,因为您使用自己的简单源代码手动迭代图像矩阵。我建议您阅读更多关于对象跟踪和理论问题的内容,然后再阅读现有的实用解决方案。尝试在低级别上使用openCV进行编程可能会更好。针对您提出的具体问题,请查看cvBlob
http://code.google.com/p/cvblob/
也是关于跟踪红色物体(即使有很酷的演示视频)。
如果对象不仅仅是纯色而是纹理,我建议使用openCV doc的这一部分
http://docs.opencv.org/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html
通过特征提取,描述生成和FLANN匹配来解释跟踪。
答案 1 :(得分:0)
我猜&#34;如此敏感&#34;意味着您的机器人会保持抖动,因为您的检测速度很慢,无法跟踪物体的实际位置。
首先,您需要确保能够以特定帧速率检测目标对象,例如25FPS。这意味着您的检测和运动动作必须在不到40ms的时间内响应。你可以测试你的算法是否能在40ms内完成一次移动。如果没有,这里有一些建议。
以下代码效率不高。它们遍历图像大约12次,花费了太多时间。为了防止这种情况,您可以将遍历设置为2次(或仅一次)。
# Converting BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv2 = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
hsv3 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to get only red,blue and green colors
mask = cv2.inRange(hsv, lower_re, upper_re)
maskBlue = cv2.inRange(hsv2, lower_ble, upper_ble)
maskGreen = cv2.inRange(hsv3, lower_gree, upper_gree)
moments = cv2.moments(mask)
area = moments['m00']
blueMoment = cv2.moments(maskBlue)
blueArea = blueMoment['m00']
greenMoment = cv2.moments(maskGreen)
greenArea = greenMoment['m00']
...
# define the ranges of the mask for the left and right side
right = areaRed(mask, xI + (x/4), xF + (x/4), yI + (x/4), yF + (x/4))
left = areaRed(mask, xI - (x/4), xF - (x/4), yI - (x/4), yF - (x/4))
centro = areaRed(mask, xI, xF, yI, yF)
对不起,我现在只能提供现有代码的C ++代码。
Mat hsv, RedMask, BlueMask, GreenMask ;
RedMask = Mat::zeros(src.rows, src.cols, CV_8UC1) ;
RedMask.copyTo(BlueMask);
RedMask.copyTo(GreenMask);
//filtering each chanel
cvtColor (src, hsv, CV_BGR2HSV) ;
const uchar* pix ;
uchar * pixoutr ;
uchar * pixoutg ;
uchar * pixoutb ;
long int redarea = 0 ;
long int greenarea = 0 ;
long int bluearea = 0 ;
for (int i = 0 ; i < hsv.rows ; i++)
{
pix = hsv.ptr<uchar>(i) ;
pixoutr = RedMask.ptr<uchar>(i) ;
pixoutg = GreenMask.ptr<uchar>(i) ;
pixoutb = BlueMask.ptr<uchar>(i) ;
for (int j=0 ; j < hsv.cols * hsv.channels() ; j+=hsv.channels())
{
if (pix[j] >= lower_blue || pix[j] <= higer_blue)
{
pixoutb[(int)j/hsv.channels()] = 255 ;
bluearea+=1 ;
}
if (pix[j+1] >= lower_gree || pix[j+1] <= higer_gree)
{
pixoutg[(int)j/hsv.channels()] = 255 ;
greenarea+=1 ;
}
if (pix[j+2] >= lower_re || pix[j+2] <= higer_re)
{
pixoutr[(int)j/hsv.channels()] = 255 ;
bluearea+=1;
}
}
}
所以你得到所有通道掩码和只有一个遍历的区域。如果应用OpenMP或python多线程技术,它可以更快。
此外,您的跟踪算法有很多问题,请查看图像相关/ blob跟踪等。对于您的HU时刻使用,方向是正确的,因为它足够快。现在进行特征检测对您来说不太实用。最后,请注意您应该安排您的OpenCV检测算法与您的adrino动作代码并行执行,您可以使用简单的线程技能,这样您就可以在伺服电机运行时利用计算时间。
<强>补充:强>
对于OpenMP的http://openmp.org/wp/resources/,http://opencv.jp/?s=OpenMP OpenCV与OpenMP的一个示例,opencv python Multi Threading Video Capture一个使用OpenCV的python线程技术,另一个答案已经给出了blob跟踪示例。对于图像相关模板匹配方法,请选中http://docs.opencv.org/doc/tutorials/imgproc/histograms/template_matching/template_matching.html。