我正在尝试创建一个能自动识别彩票号码的程序。
我已经认出了抽签时刻,将球分开了,现在我的问题是我无法识别球上的数字。
这是原始图片:
这是我找到轮廓后的照片:
现在,对于每个轮廓,我尝试确定它是一个数字还是它的数字。这是我的应用程序失败的地方。
*重要的是要说球可以在多个角度/照明可以是不同的,这都会影响照片的质量。
这是我的编程发现的轮廓的一个例子:
这是我识别号码的代码:
private void identifyNumber(Mat inFile) {
System.out.println("\nRunning identifyNumber");
System.out.println("-------------------------");
int match_method = Imgproc.TM_SQDIFF;
Mat img = inFile;
Mat bestImage = new Mat(), rotImg;
int bestDegree = 0, bestNumber = 0;
double lowerstFornumber, lowest = 1E30;
String templateNumber;
for (int k=0 ; k<=9; k++) {
lowerstFornumber = 1E30;
for(int i=-90; i<=90; i=i+5){
templateNumber = "C:\\pics\\drawProcessing\\numbers\\" + k + ".png";
Mat templ = Highgui.imread(templateNumber);
rotImg = rotateImage(img, i);
int result_cols = rotImg.cols() - templ.cols() + 1;
int result_rows = rotImg.rows() - templ.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
Imgproc.matchTemplate(rotImg, templ, result, match_method);
MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
double minValue = mmr.minVal;
// System.out.println(i+",maxVal:" +maxValue);
if(lowerstFornumber > minValue){
lowerstFornumber = minValue;
}
if(lowest > minValue){
lowest = minValue;
bestImage = rotImg;
bestDegree = i;
bestNumber = arr[k];
}
}
System.out.println("lowerstFornumber " + arr[k] + " :" + lowerstFornumber);
}
System.out.println("bestDegree:" + bestDegree);
System.out.println("bestNumber:" + bestNumber);
System.out.println("_lowest:" + lowest);
Highgui.imwrite("C:\\pics\\drawProcessing\\out-best.jpg", bestImage);
}
有时它会找到数字,有时不会。 它甚至可能吗?(我需要100%准确度) 我错了吗?
答案 0 :(得分:0)
如果您为盒子尝试仿射不变描述符怎么办?您甚至可以从更简单的描述符开始,例如筛选或冲浪,为每个区域计算并与数据库匹配。它应该很快,因为看起来规模不会改变。筛选和冲浪可能会给你一些结果但是对于更稳定的东西你可以使用ASIFT。
答案 1 :(得分:0)
不是在java中,但它描述了这个想法:
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
//----------------------------------------------------------------------
//
//----------------------------------------------------------------------
void DetectContour(Mat& img, Mat& res)
{
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat edges=img.clone();
//Canny(img, edges, 50, 190, 3);
img.copyTo(edges);
findContours(edges,contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE, Point());
if(contours.size()>0)
{
for( int i = 0; i < contours.size(); i++ )
{
vector<Point> approx;
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
double area = contourArea(Mat(approx));
if(area>200)
drawContours( res, contours, i, Scalar(255,0,0), CV_FILLED, 8);
}
}
}
//----------------------------------------------------------------------
//
//----------------------------------------------------------------------
int main(int argc, char **argv)
{
cv::namedWindow("result");
Mat img=imread("ball.png");
// Prepare mask
Mat mask=Mat::zeros(img.size(),CV_8UC1);
Mat img_gray;
cv::cvtColor(img,img_gray,cv::COLOR_BGR2GRAY);
Mat res=Mat(img.size(),CV_8UC1);
res=255;
vector<Vec3f> circles;
/// Apply the Hough Transform to find the circles
HoughCircles( img_gray, circles, cv::HOUGH_GRADIENT, 1, img_gray.rows/8, 140, 70, 0,0 );
/// Draw the circles detected
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle outline
circle( mask, center, radius, Scalar(255,255,255), -1, 8, 0 );
}
img.copyTo(res,mask);
cv::cvtColor(res,res,cv::COLOR_BGR2GRAY);
threshold(res,res,80,255,cv::THRESH_BINARY_INV);
mask=0;
DetectContour(res,mask);
mask.copyTo(res);
int element_size=10;
Mat element = getStructuringElement( cv::MORPH_ELLIPSE,Size( 2*element_size + 1, 2*element_size+1 ),Point( element_size, element_size ) );
int element_size2=5;
Mat element2 = getStructuringElement( cv::MORPH_ELLIPSE,Size( 2*element_size + 1, 2*element_size+1 ),Point( element_size, element_size ) );
cv::dilate(res,res,element2);
cv::erode(res,res,element);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(res,contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE, Point());
for (int i=0;i<contours.size();++i)
{
RotatedRect box = minAreaRect(contours[i]);
Point2f center, vtx[4];
box.points(vtx);
float w=100;
float h=100;
// Create a column vector with the coordinates of each point (on the field plane)
cv::Mat xField;
xField.create(4, 1, CV_32FC2);
xField.at<Point2f>(0) = ( vtx[0] );
xField.at<Point2f>(1) = ( vtx[1] );
xField.at<Point2f>(2) = ( vtx[2] );
xField.at<Point2f>(3) = ( vtx[3] );
// same thing for xImage but with the pixel coordinates instead of the field coordinates, same order as in xField
cv::Mat xImage;
xImage.create(4, 1, CV_32FC2);
xImage.at<Point2f>(0) = ( cv::Point2f(0, 0) );
xImage.at<Point2f>(1) = ( cv::Point2f(w, 0) );
xImage.at<Point2f>(2) = ( cv::Point2f(w, h) );
xImage.at<Point2f>(3) = ( cv::Point2f(0, h) );
// Compute the homography matrix
cv::Mat H = cv::findHomography(xField,xImage );
xField.release();
xImage.release();
Mat warped;
warpPerspective(img,warped,H,Size(w,h));
H.release();
char win_name[255];
sprintf(win_name,"number_image %d",i);
namedWindow(win_name);
imshow(win_name,warped);
// cv::waitKey(0);
for(int j = 0; j < 4; j++ )
{
line(img, vtx[j], vtx[(j+1)%4], Scalar(0, 255, 0), 1, LINE_AA);
}
}
imshow("result",img);
cv::waitKey(0);
cv::destroyAllWindows();
}