在Opencv中清理分段图像

时间:2016-03-31 21:26:38

标签: c++ opencv image-segmentation

我有这个原创:

enter image description here

分割后,我获得了这张图片:

enter image description here

正如您所看到的,它仍然没有完美分割。关于如何进一步“清理”这个分割图像的任何建议?这是我的代码:

using namespace cv;
using namespace std;

Mat COLOR_MAX(Scalar(65, 255, 255));
Mat COLOR_MIN(Scalar(15, 45, 45));

int main(int argc, char** argv){

Mat src,src2,hsv_img,mask,gray_img,initial_thresh,second_thresh,add_res,and_thresh,xor_thresh,result_thresh,rr_thresh,final_thresh;
// Load source Image
src = imread("banana2.jpg");
src2 = imread("Balanced_Image1.jpg");
imshow("Original Image", src);
cvtColor(src,hsv_img,CV_BGR2HSV);
imshow("HSV Image",hsv_img);

//imwrite("HSV Image.jpg", hsv_img);

inRange(hsv_img,COLOR_MIN,COLOR_MAX, mask);
imshow("Mask Image",mask);

cvtColor(src,gray_img,CV_BGR2GRAY);
adaptiveThreshold(gray_img, initial_thresh, 255,ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV,257,2);
imshow("AdaptiveThresh Image", initial_thresh);

add(mask,initial_thresh,add_res);
erode(add_res, add_res, Mat(), Point(-1, -1), 1);
dilate(add_res, add_res, Mat(), Point(-1, -1), 5);
imshow("Bitwise Res",add_res);

threshold(gray_img,second_thresh,150,255,CV_THRESH_BINARY_INV | CV_THRESH_OTSU);
imshow("TreshImge", second_thresh);
bitwise_and(add_res,second_thresh,and_thresh);
imshow("andthresh",and_thresh);
bitwise_xor(add_res, second_thresh, xor_thresh);
imshow("xorthresh",xor_thresh);
bitwise_or(and_thresh,xor_thresh,result_thresh);
imshow("Result image", result_thresh);
bitwise_and(add_res,result_thresh,final_thresh);
imshow("Final Thresh",final_thresh);

erode(final_thresh, final_thresh, Mat(), Point(-1,-1),6);
bitwise_or(src,src,rr_thresh,final_thresh);
imshow("Segmented Image", rr_thresh);
imwrite("Segmented Image.jpg", rr_thresh);
waitKey(0);
return 1;
}`

1 个答案:

答案 0 :(得分:1)

对不起Python,它更容易原型化,并且不应该太难以移植到C ++。

import cv2
import numpy as np


img = cv2.imread("banana.jpg", 0)
edges = cv2.Canny(img, 10, 100)

kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
dilated = cv2.dilate(edges,kernel,iterations = 1)


contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)


for i,contour in enumerate(contours):
    area = cv2.contourArea(contour)
    if area > 1000.0:
        cv2.drawContours(img, contours, i, (0,255,255), 2)

cv2.imwrite('banana_out.png', img)

我使用Canny edge detection,因为香蕉的边缘看起来很尖锐。我填补了在面具上使用扩张的一些空白。这可能会做得更好。

输入:

边缘:

扩张边缘:

输出:

C ++版本

#include <opencv2/opencv.hpp>


int main(int argc, char** argv)
{
    cv::Mat img = cv::imread("./banana.jpg");

    cv::Mat gray_img;
    cv::cvtColor(img, gray_img, CV_BGR2GRAY);

    cv::Mat edges;
    cv::Canny(gray_img, edges, 10, 100);

    cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5, 5));
    cv::Mat dilated;
    cv::dilate(edges, dilated, kernel);


    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4i> hierarchy;

    cv::findContours(dilated, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    cv::Mat mask = cv::Mat::zeros(gray_img.size(), CV_8UC1);

    double const MIN_CONTOUR_AREA(1000.0);
    for (int i(0); i < contours.size(); ++i) {
        double area = cv::contourArea(contours[i]);

        if (area >= MIN_CONTOUR_AREA) {
            cv::drawContours(mask, contours, i, cv::Scalar(255, 255, 255), CV_FILLED);
        }
    }

    cv::Mat eroded;
    cv::erode(mask, eroded, kernel);

    cv::Mat masked_object;
    cv::bitwise_and(img, img, masked_object, eroded);

    cv::imwrite("banana_out.png", masked_object);

    return 1;
}

输出:

也许如果你做了我的面具和你的面具交叉,你会得到更好的结果......