我正在寻找一种算法来修剪边缘检测器输出中的短线段。从下面的图像(和链接)中可以看出,检测到的几个小边缘不是“长”线。理想情况下,我希望加工后只显示四边形的四边,但如果有几条偏离线,那就没什么大不了的......有什么建议吗?
答案 0 :(得分:5)
在找到边缘之前,使用打开或关闭操作(或两者)预处理图像,即侵蚀,然后 dilate 或 dilate ,然后是 erode 。这应该删除较小的对象,但保留较大的对象大致相同。
我已经找到了在线示例,我能找到的最好的是this PDF的第41页。
答案 1 :(得分:4)
我怀疑这可以通过简单的本地操作来完成。看看你想要保留的矩形 - 有几个间隙,因此执行局部操作来移除短线段可能会大大降低所需输出的质量。
因此,我会尝试通过关闭间隙,拟合多边形或类似的东西来检测矩形作为重要内容,然后在第二步中丢弃剩余的不重要内容。可能是Hough transform可以提供帮助。
<强>更新强>
我刚刚使用了这个sample application使用内核Hough变换和你的样本图像,并得到了四条适合你矩形的漂亮线条。
答案 2 :(得分:4)
如果有人踩到这个帖子,OpenCV 2.x会带来一个名为 squares.cpp 的例子,基本上可以解决这个问题。
我对应用程序稍作修改以改进 quadrangle 的检测
<强>代码强>:
#include "highgui.h"
#include "cv.h"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
void help()
{
cout <<
"\nA program using pyramid scaling, Canny, contours, contour simpification and\n"
"memory storage (it's got it all folks) to find\n"
"squares in a list of images pic1-6.png\n"
"Returns sequence of squares detected on the image.\n"
"the sequence is stored in the specified memory storage\n"
"Call:\n"
"./squares\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
}
int thresh = 70, N = 2;
const char* wndname = "Square Detection Demonized";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// karlphillip: dilate the image so this technique can detect the white square,
Mat out(image);
dilate(out, out, Mat(), Point(-1,-1));
// then blur it so that the ocean/sea become one big segment to avoid detecting them as 2 big squares.
medianBlur(out, out, 3);
// down-scale and upscale the image to filter out the noise
pyrDown(out, pyr, Size(out.cols/2, out.rows/2));
pyrUp(pyr, timg, out.size());
vector<vector<Point> > contours;
// find squares only in the first color plane
for( int c = 0; c < 1; c++ ) // was: c < 3
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 1; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA);
}
imshow(wndname, image);
}
int main(int argc, char** argv)
{
if (argc < 2)
{
cout << "Usage: ./program <file>" << endl;
return -1;
}
static const char* names[] = { argv[1], 0 };
help();
namedWindow( wndname, 1 );
vector<vector<Point> > squares;
for( int i = 0; names[i] != 0; i++ )
{
Mat image = imread(names[i], 1);
if( image.empty() )
{
cout << "Couldn't load " << names[i] << endl;
continue;
}
findSquares(image, squares);
drawSquares(image, squares);
imwrite("out.jpg", image);
int c = waitKey();
if( (char)c == 27 )
break;
}
return 0;
}
答案 3 :(得分:2)
霍夫变换可能是一项非常昂贵的操作。
可以在您的案例中正常运作的替代方案如下:
运行2个数学形态学运算,分别称为图像闭合(http://homepages.inf.ed.ac.uk/rbf/HIPR2/close.htm),其水平和垂直线(由测试确定的给定长度)结构元素。关键是要关闭大矩形中的所有间隙。
运行连通组件分析。如果您已经有效地完成了形态学,那么大矩形将作为一个连接组件出现。然后它只是迭代遍历所有连接的组件,并挑选出应该是大矩形的最可能的候选者。
答案 4 :(得分:2)
可能找到连接的组件,然后移除小于X像素的组件(根据经验确定),然后沿水平/垂直线扩张以重新连接矩形内的间隙
答案 5 :(得分:1)
可以遵循两种主要技术:
基于矢量的操作:将像素岛映射到簇(blob,voronoi区域等)。然后应用一些启发式方法来纠正细分,如Teh-Chin链式近似算法,并对矢量元素(起点,终点,长度,方向等)进行修剪。
基于设置的操作:对数据进行聚类(如上所述)。对于每个聚类,通过查找仅显示1个有意义特征值的聚类(或者如果查找“胖”段,则可以类似于省略号),计算主要成分并检测圆形或任何其他形状的线条。检查与特征值相关的特征向量,以获得有关斑点方向的信息,并做出选择。
使用OpenCV可以很容易地探索这两种方式(前者确实属于算法的“轮廓分析”类别)。
答案 6 :(得分:0)
这是一个简单的形态过滤解决方案,遵循@ Tom10:
matlab中的解决方案:
se1 = strel('line',5,180); % linear horizontal structuring element
se2 = strel('line',5,90); % linear vertical structuring element
I = rgb2gray(imread('test.jpg'))>80; % threshold (since i had a grayscale version of the image)
Idil = imdilate(imdilate(I,se1),se2); % dilate contours so that they connect
Idil_area = bwareaopen(Idil,1200); % area filter them to remove the small components
这个想法是基本上连接水平轮廓以制作一个大的组件,然后通过区域打开过滤器来过滤以获得矩形。
<强>结果:强>