我正在使用ROS,OpenCV和Kinect深度图像进行手势识别。我读过以下paper,它存储了一列中手的中心最大轮廓的距离和轮廓点,中心点和另一列中固定点之间的角度(范围从0到359) biggest_contour.size() x 2
矩阵的列。然后我绘制了矩阵,x轴是从0到360度的角度,y轴是距离掌心的距离,如下所示:
第一个手掌图像及其对应的图:
第二个手掌图像及其对应的图:
我想在数据库中实时比较这些手势。由于我无法理解文中所示的Finger Earth Movers Distance方法,我尝试进行OpenCV模板匹配,除非两个手势不接近(如所示的\ m /和两个手指姿势),否则成功地给出了良好的结果。 谁能告诉我一个很好的算法来比较两个方向与上面创建的距离矩阵。它是否像直方图匹配?虽然我也尝试过OpenCV的EMD方法,但它没有给出好的结果。
这是我的OpenCV版本的代码,用于检测静态图像中的手势。但是,我正在做实时的手势识别。我只使用OpenCV版本的代码来检查我的算法是否首先使用静态图像。
#include <opencv/cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
class histogram1D {
public:
Mat getCon(Mat m)
{
vector<vector<Point> > cont;
double area, max = 0;
int x =0;
findContours(m,cont,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
for(int i=0;i<cont.size();i++)
{
area = contourArea(cont[i]);
if(area>max)
{
max = area;
x=i;
}
}
Mat c(m.size(), m.type(), Scalar(0));
drawContours(c, cont, x, Scalar(255), 1);
Rect rect = boundingRect(cont[x]);
Point cen(rect.x+rect.width/2, rect.y+(0.75*rect.height));
float rad = sqrt(pow(rect.width/2, 2) + pow(rect.height/2, 2));
Mat sig(cont[x].size(), 2, CV_32F, Scalar(0));
for(int i = 0; i < cont[x].size(); i++)
{
float dis = norm(cont[x][i] - cen)/(rad);
//if(dis <= 1.0)
// dis = 0;
sig.at<float>(i,0) = dis;
double a1 = atan2(cont[x][i].y - cen.y, cont[x][i].x - cen.x);
double a2 = atan2(rect.y + rect.height - cen.y, rect.x - cen.x);
double a = a1 - a2;
if(a<0) a = (2*CV_PI) + a;
if(a > 2*CV_PI) a = a - 2*CV_PI;
sig.at<float>(i,1) = a/(2*CV_PI);
}
//Sorting it according to increasing order of angles
for(int i = 0; i < sig.rows; i++)
{
for(int j = 0; j < sig.rows-1; j++)
{
if(sig.at<float>(j,1) > sig.at<float>(j+1,1))
{
float temp = sig.at<float>(j,0);
sig.at<float>(j,0) = sig.at<float>(j+1,0);
sig.at<float>(j+1,0) = temp;
float tem = sig.at<float>(j,1);
sig.at<float>(j,1) = sig.at<float>(j+1,1);
sig.at<float>(j+1,1) = tem;
}
}
}
return sig;
}
Mat getRec(Mat hi)
{
Mat rec(hi.rows, 360, CV_8U, Scalar(0));
for(int i =0; i<hi.rows;i++)
{
line(rec,Point(hi.at<float>(i,1)*360, 0),Point(hi.at<float>(i,1)*360,hi.at<float>(i,0)*hi.rows*0.5),Scalar::all(255));
}
flip(rec,rec,0);
return rec;
}
double getDist(Mat sig1, Mat sig2)
{
double d;
int size1, size2, f=0;
if(sig1.rows<sig2.rows)
{ size1 = sig1.rows; size2 = sig2.rows;f = 0;}
else
{ size1 = sig2.rows; size2 = sig1.rows; f= 1;}
/*for(int i = 0; i < size1; i++)
{
d += fabs((sig1.at<float>(i,0)) - (sig2.at<float>(i,0)));
}
for(int i = size1; i < size2; i++)
{
if(f==0)
d += (sig2.at<float>(i,0));
if(f==1)
d += (sig1.at<float>(i,0));
}
return d;*/
Mat res;
if(f==0)
{
int r = sig2.rows - sig1.rows + 1;
int c = 1;
res.create(c, r, CV_32FC1);
matchTemplate(sig2, sig1, res, CV_TM_CCORR_NORMED);
}
else
{
int r = sig1.rows - sig2.rows + 1;
int c = 1;
res.create(r, c, CV_32FC1);
matchTemplate(sig1, sig2, res, CV_TM_CCORR_NORMED);
}
minMaxLoc(res, NULL, &d, NULL, NULL, Mat());
return d;
}
};
int main()
{
Mat im1 = imread("131.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat im2 = imread("145.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat im3 = imread("122.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat im5 = imread("82.jpg",CV_LOAD_IMAGE_GRAYSCALE);
if (im1.empty())
{
cout << "Cannot load image!" << endl;
waitKey();
return -1;
}
histogram1D h;
resize( im1, im1, Size(80,120));
resize( im2, im2, Size(80,120));
resize( im3, im3, Size(80,120));
resize( im5, im5, Size(80,120));
blur( im1, im1, Size(3,3) );
blur( im2, im2, Size(3,3) );
blur( im3, im3, Size(3,3) );
blur( im5, im5, Size(3,3) );
Mat hi1 = h.getCon(im1);
Mat rec1 = h.getRec(hi1);
//FileStorage fs("test.yml", FileStorage::WRITE);
//fs << "hi" << hi;
Mat hi2 = h.getCon(im2);
Mat rec2 = h.getRec(hi2);
Mat hi3 = h.getCon(im3);
Mat rec3 = h.getRec(hi3);
Mat hi5 = h.getCon(im5);
Mat rec5 = h.getRec(hi5);
/*float ch = EMD(hi1, hi1, CV_DIST_L1);
float ch1 = EMD(hi1, hi2, CV_DIST_L1);
float ch2 = EMD(hi1, hi3, CV_DIST_L1);
float ch3 = EMD(hi1, hi5, CV_DIST_L1);*/
float ch = h.getDist(hi1, hi1);
float ch1 = h.getDist(hi1, hi2);
float ch2 = h.getDist(hi1, hi3);
float ch3 = h.getDist(hi1, hi5);
/*double ch = comparehist(hi1, hi1, CV_COMP_CsigQR);
double ch1 = comparehist(hi1, hi2, CV_COMP_CsigQR);
double ch2 = comparehist(hi1, hi3, CV_COMP_CsigQR);
double ch3 = comparehist(hi1, hi5, CV_COMP_CsigQR);*/
// imshow("rec1.jpg", rec1); imshow("rec2.jpg", rec2); imshow("rec3.jpg", rec3); imshow("rec5.jpg", rec5);
cout<<ch<<" "<<ch1<<" "<<ch2<<" "<<ch3<<endl;
waitKey();
return(0);
}
答案 0 :(得分:0)
据我所知,事情是EMD允许部分匹配,这就是为什么你无法识别2个相似的手势(你的2º手势被匹配为第一个的子集(索引和小手指有)两幅图像之间的距离为0),关于如何实施FEMD,我也试图这样做。
答案 1 :(得分:0)
我使用了paper中解释的类似方法:
从第一张图像中创建轮廓并将其另存为模板,然后计算轮廓的每个点与模板的点之间的差异。 您添加错误/模板然后选择具有最小错误(或没有)的模板
它是1美元手势识别器的应用程序,但除此之外,如果您搜索youtube,则会发现它的视频正在运行。