我需要在测试表中找到三个黑色方块的坐标。我从网站emgu.com上获取了示例代码并略微更改了它,但他找不到我需要的内容。图像的大小为A4,测试形式的大小为A5。我希望你的帮助:) 我差点忘了,正方形的大小是30像素。
private void DetectRectangles(Image<Gray, byte> img)
{
var size = new Size(3, 3);
CvInvoke.GaussianBlur(img, img, size, 0);
CvInvoke.AdaptiveThreshold(img, img, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, 75, 100);
UMat cannyEdges = new UMat();
CvInvoke.Canny(img, cannyEdges, 180, 120);
var boxList = new List<RotatedRect>();
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, contours, null, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
int count = contours.Size;
for (int i = 0; i < count; i++)
{
using (VectorOfPoint contour = contours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
var area = CvInvoke.ContourArea(approxContour);
if (area > 800 && area < 1000)
{
if (approxContour.Size == 4)
{
bool isRectangle = true;
Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double angle = Math.Abs(edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
if (angle < 75 || angle > 94)
{
isRectangle = false;
break;
}
}
if (isRectangle)
boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
var resultimg = new Image<Bgr,byte>(img.Width, img.Height);
CvInvoke.CvtColor(img, resultimg, ColorConversion.Gray2Bgr);
foreach (RotatedRect box in boxList)
{
CvInvoke.Polylines(resultimg, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Red).MCvScalar, 2);
}
imageBox1.Image = resultimg;
resultimg.Save("result_img.jpg"); }
输入图片:
答案 0 :(得分:3)
由于您正在寻找一个非常特定的对象,您可以使用以下算法:
对于每个轮廓
一个。计算最小区域矩形box
湾计算box
:barea
℃。计算轮廓的面积:carea
d。应用一些约束以确保您的轮廓是您正在寻找的正方形
步骤3d的约束是:
比率barea / carea
应该高(假设高于0.9),这意味着轮廓属于几乎矩形blob。
box
的宽高比应该几乎 1,这意味着box
基本上是一个正方形
广场的大小应该接近30
,以拒绝图片中其他较小或较大的方块。
我运行的结果是:
这是代码。对不起,它是C ++,但由于它是所有OpenCV函数调用,您应该能够轻松地将它移植到C#。至少,您可以将其用作参考:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
// Load image
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
// Create the output image
Mat3b out;
cvtColor(img, out, COLOR_GRAY2BGR);
// Create debug image
Mat3b dbg = out.clone();
// Binarize (to remove jpeg arifacts)
img = img > 200;
// Invert image
img = ~img;
// Find connected components
vector<vector<Point>> contours;
findContours(img.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
vector<RotatedRect> squares;
// For each contour
for (int i = 0; i < contours.size(); ++i)
{
// Find rotated bounding box
RotatedRect box = minAreaRect(contours[i]);
// Compute the area of the contour
double carea = contourArea(contours[i]);
// Compute the area of the box
double barea = box.size.area();
// Constraint #1
if ((carea / barea) > 0.9)
{
drawContours(dbg, contours, i, Scalar(0, 0, 255), 7);
// Constraint #2
if (min(box.size.height, box.size.width) / max(box.size.height, box.size.width) > 0.95)
{
drawContours(dbg, contours, i, Scalar(255, 0, 0), 5);
// Constraint #3
if (box.size.width > 25 && box.size.width < 35)
{
drawContours(dbg, contours, i, Scalar(0, 255, 0), 3);
// Found the square!
squares.push_back(box);
}
}
}
// Draw output
for (int i = 0; i < squares.size(); ++i)
{
Point2f pts[4];
squares[i].points(pts);
for (int j = 0; j < 4; ++j)
{
line(out, pts[j], pts[(j + 1) % 4], Scalar(0,255,0), 5);
}
}
}
// Resize for better visualization
resize(out, out, Size(), 0.25, 0.25);
resize(dbg, dbg, Size(), 0.25, 0.25);
// Show images
imshow("Steps", dbg);
imshow("Result", out);
waitKey();
return 0;
}