我正在使用openCV来为应用程序实现相机运动补偿。我知道我需要计算光流,然后找到两帧之间的基本矩阵来转换图像。
这是我到目前为止所做的:
void VideoStabilization::stabilize(Image *image) {
if (image->getWidth() != width || image->getHeight() != height) reset(image->getWidth(), image->getHeight());
IplImage *currImage = toCVImage(image);
IplImage *currImageGray = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
cvCvtColor(currImage, currImageGray, CV_BGRA2GRAY);
if (baseImage) {
CvPoint2D32f currFeatures[MAX_CORNERS];
char featuresFound[MAX_CORNERS];
opticalFlow(currImageGray, currFeatures, featuresFound);
IplImage *result = transformImage(currImage, currFeatures, featuresFound);
if (result) {
updateImage(image, result);
cvReleaseImage(&result);
}
}
cvReleaseImage(&currImage);
if (baseImage) cvReleaseImage(&baseImage);
baseImage = currImageGray;
updateGoodFeatures();
}
void VideoStabilization::updateGoodFeatures() {
const double QUALITY_LEVEL = 0.05;
const double MIN_DISTANCE = 5.0;
baseFeaturesCount = MAX_CORNERS;
cvGoodFeaturesToTrack(baseImage, eigImage,
tempImage, baseFeatures, &baseFeaturesCount, QUALITY_LEVEL, MIN_DISTANCE);
cvFindCornerSubPix(baseImage, baseFeatures, baseFeaturesCount,
cvSize(10, 10), cvSize(-1,-1), TERM_CRITERIA);
}
void VideoStabilization::opticalFlow(IplImage *currImage, CvPoint2D32f *currFeatures, char *featuresFound) {
const unsigned int WIN_SIZE = 15;
const unsigned int PYR_LEVEL = 5;
cvCalcOpticalFlowPyrLK(baseImage, currImage,
NULL, NULL,
baseFeatures,
currFeatures,
baseFeaturesCount,
cvSize(WIN_SIZE, WIN_SIZE),
PYR_LEVEL,
featuresFound,
NULL,
TERM_CRITERIA,
0);
}
IplImage *VideoStabilization::transformImage(IplImage *image, CvPoint2D32f *features, char *featuresFound) const {
unsigned int featuresFoundCount = 0;
for (unsigned int i = 0; i < MAX_CORNERS; ++i) {
if (featuresFound[i]) ++featuresFoundCount;
}
if (featuresFoundCount < 8) {
std::cout << "Not enough features found." << std::endl;
return NULL;
}
CvMat *points1 = cvCreateMat(2, featuresFoundCount, CV_32F);
CvMat *points2 = cvCreateMat(2, featuresFoundCount, CV_32F);
CvMat *fundamentalMatrix = cvCreateMat(3, 3, CV_32F);
unsigned int pos = 0;
for (unsigned int i = 0; i < featuresFoundCount; ++i) {
while (!featuresFound[pos]) ++pos;
cvSetReal2D(points1, 0, i, baseFeatures[pos].x);
cvSetReal2D(points1, 1, i, baseFeatures[pos].y);
cvSetReal2D(points2, 0, i, features[pos].x);
cvSetReal2D(points2, 1, i, features[pos].y);
++pos;
}
int fmCount = cvFindFundamentalMat(points1, points2, fundamentalMatrix, CV_FM_RANSAC, 1.0, 0.99);
if (fmCount < 1) {
std::cout << "Fundamental matrix not found." << std::endl;
return NULL;
}
std::cout << fundamentalMatrix->data.fl[0] << " " << fundamentalMatrix->data.fl[1] << " " << fundamentalMatrix->data.fl[2] << "\n";
std::cout << fundamentalMatrix->data.fl[3] << " " << fundamentalMatrix->data.fl[4] << " " << fundamentalMatrix->data.fl[5] << "\n";
std::cout << fundamentalMatrix->data.fl[6] << " " << fundamentalMatrix->data.fl[7] << " " << fundamentalMatrix->data.fl[8] << "\n";
cvReleaseMat(&points1);
cvReleaseMat(&points2);
IplImage *result = transformImage(image, *fundamentalMatrix);
cvReleaseMat(&fundamentalMatrix);
return result;
}
MAX_CORNERS为100,通常可以找到70-90个特征。
使用此代码,我得到一个奇怪的基本矩阵,如:
-0.000190809 -0.00114947 1.2487
0.00127824 6.57727e-05 0.326055
-1.22443 -0.338243 1
由于我只是用手握住相机并尽量不摇动它(并且没有任何物体移动),我期望矩阵接近同一性。我做错了什么?
另外,我不确定要使用什么来转换图像。 cvWarpAffine需要2x3矩阵,我应该丢弃最后一行还是使用其他功能?
答案 0 :(得分:7)
您所寻找的不是基本矩阵,而是仿射或透视变换。
基本矩阵描述了具有明显不同视点的两个相机的关系。计算如果你有两个点x(在一个图像上)和x'(在另一个图像上)是空间中相同点的投影,则x F x'(乘积)为零。如果x和x'几乎相同......那么唯一的解决方案是使F几乎为零(实际上无用)。这就是为什么你拥有你拥有的东西。
确实应该接近同一性的矩阵是转换A,它将点x转换为x'= A x(旧图像转换为新图像)。根据您想要包含的转换类型(仿射或透视),您可以(理论上)使用函数cvGetAffineTransform或cvGetPerspectiveTransform来计算转换。为此,您将分别需要3或4个点对。
然而,最好的选择(我认为)是 cvFindHomograpy 。它使用异常值过滤算法(例如RANSAC)估计基于所有可用点对的透视变换,为您提供3x3矩阵。
然后您可以使用 cvWarpPerspective 来转换图像。