我编写了一个jni函数,使用findContour函数检测源图像的角点,然后使用返回的点数组通过warpPerspective函数提取最大轮廓。使用可绘制文件夹中的源图像时,一切正常,但问题是当我在onCameraFrame方法中使用相同的算法时,它给了我一个奇怪的黑色图像
这是我写的代码:
jni函数:
extern "C" jobjectArray
Java_com_grimg_testtt_Main2Activity_getQuadrilateral(
JNIEnv *env,
jobject /* this */,
jlong image) {
std::vector<std::string> vec;
cv::Mat* frame = (cv::Mat*) image;
cv::Mat approxPoly_mask(frame->rows, frame->cols, CV_8UC1);
approxPoly_mask = cv::Scalar(0);
std::vector<std::vector<cv::Point>> contours;
findContours(*frame, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
std::vector<int> indices(contours.size());
std::iota(indices.begin(), indices.end(), 0);
sort(indices.begin(), indices.end(), [&contours](int lhs, int rhs) {
return contours[lhs].size() > contours[rhs].size();
});
/// Find the convex hull object for each contour
std::vector<std::vector<cv::Point>> hull(1);
cv::convexHull(cv::Mat(contours[indices[0]]), hull[0], false);
std::vector<std::vector<cv::Point>> polygon(1);
approxPolyDP(hull[0], polygon[0], 20, true);
drawContours(approxPoly_mask, polygon, 0, cv::Scalar(255));
jobjectArray ret;
ret= (jobjectArray)env->NewObjectArray(polygon[0].size(),env->FindClass("java/lang/String"),env->NewStringUTF(""));
// if (polygon[0].size() >= 4) // we found the 4 corners
//{
for (int n=0;n<polygon[0].size();n++) {
std::string point_str = std::to_string(polygon[0][n].x) + "," + std::to_string(polygon[0][n].y);
env->SetObjectArrayElement(ret,n,env->NewStringUTF(point_str.c_str()));
}
// }
return (ret);
}
提取java函数:
private Mat detect_and_extract_largest_contour(Mat source_mat) {
double ratio = source_mat.size().height / 500;
int height = Double.valueOf(source_mat.size().height / ratio).intValue();
int width = Double.valueOf(source_mat.size().width / ratio).intValue();
Size size = new Size(width,height);
Mat input_grey = new Mat(size, CvType.CV_8UC4);
Imgproc.cvtColor(source_mat, input_grey, Imgproc.COLOR_RGB2GRAY);
Mat threshold1;
Mat edges = new Mat();
Imgproc.GaussianBlur(input_grey, input_grey, new Size(3, 3), 3);
Imgproc.Canny(input_grey, edges, 50, 150, 3, false);
cppArray = getQuadrilateral(edges.getNativeObjAddr());
card_corners = new ArrayList<>();
for (int i =0; i<cppArray.length; i++) {
String[] separated = cppArray[i].split(",");
pt = new Point();
pt.x = Double.parseDouble(separated[0]);
pt.y = Double.parseDouble(separated[1]);
card_corners.add(pt);
}
Mat warpedCard= new Mat(source_mat.rows(), source_mat.cols(), CvType.CV_8UC3);
if (card_corners.size() == 4)
{
final Point p1 = new Point(warpedCard.cols(), warpedCard.rows());
final Point p2 = new Point(0, warpedCard.rows());
final Point p3 = new Point(0, 0);
final Point p4 = new Point(warpedCard.cols(), 0);
LinkedList<Point> sceneList = new LinkedList<Point>();
sceneList.addLast(p1);
sceneList.addLast(p2);
sceneList.addLast(p3);
sceneList.addLast(p4);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(card_corners);
Mat homography = Calib3d.findHomography(obj, scene);
Imgproc.warpPerspective(input_grey, warpedCard, homography, new Size(warpedCard.cols(), warpedCard.rows()));
}
return warpedCard;
}
onCameraFrame代码:
mat = inputFrame.rgba();
Mat mRgbaT = mat.t();
Core.flip(mat.t(), mRgbaT, 1);
Imgproc.resize(mRgbaT, mRgbaT, mat.size());
return detect_and_extract_largest_contour(mRgbaT);
怎么了?即使在使用相机时也如何使结果准确?
谢谢。