我的项目没有在两种不同的跟踪算法之间切换

时间:2017-05-17 14:04:13

标签: c++ opencv

您好我正试图用2种方法在C ++中用opencv跟踪一个对象。第一种方法是使用绝对差异算法,第二种方法是使用扩张跟踪颜色并侵蚀它们两者都能完美地工作。当你按下& #34; T"在键盘上它将使用absdiff算法并跟踪对象。当你按下" t"它会再次停止跟踪但是当我按下" p"在键盘上使用扩张和侵蚀方法它开始跟踪ROI区域但是当我按下" p"它时它不会突破方法。再次,我不能回到使用absdiff方法

     #include <opencv\cv.h>
        #include <opencv\highgui.h>
        #include <opencv2/core/core.hpp>
        #include <opencv2/highgui/highgui.hpp>
        #include <iostream>
        #include <opencv2/imgproc/imgproc.hpp>

        using namespace cv;
        using namespace std;

        //our sensitivity value to be used in the absdiff() function
        const static int SENSITIVITY_VALUE = 20;
        //size of blur used to smooth the intensity image output from absdiff() function
        const static int BLUR_SIZE = 10;
           //we'll have just one object to search for
        //and keep track of its position.
        int theObject[2] = { 0,0 };
        //bounding rectangle of the object, we will use the center of this as its position.
        Rect objectBoundingRectangle = Rect(0, 0, 0, 0);

        int H_MIN = 0;
        int H_MAX = 256;
        int S_MIN = 0;
        int S_MAX = 256;
        int V_MIN = 0;
        int V_MAX = 256;

        const string windowName = "Original Image";
        const string windowName1 = "HSV Image";
        const string windowName2 = "Thresholded Image";
        const string windowName3 = "After Morphological Operations";
        const string trackbarWindowName = "Trackbars";

        void on_trackbar( int, void* )
    {//This function gets called whenever a
        // trackbar position is changed





    }
    string intToString(int number){


        std::stringstream ss;
        ss << number;
        return ss.str();
    }
    void createTrackbars(){
        //create window for trackbars


        namedWindow(trackbarWindowName,0);
        //create memory to store trackbar name on window
        char TrackbarName[50];
        sprintf( TrackbarName, "H_MIN", H_MIN);
        sprintf( TrackbarName, "H_MAX", H_MAX);
        sprintf( TrackbarName, "S_MIN", S_MIN);
        sprintf( TrackbarName, "S_MAX", S_MAX);
        sprintf( TrackbarName, "V_MIN", V_MIN);
        sprintf( TrackbarName, "V_MAX", V_MAX);
        //create trackbars and insert them into window
        //3 parameters are: the address of the variable that is changing when the trackbar is moved(eg.H_LOW),
        //the max value the trackbar can move (eg. H_HIGH), 
        //and the function that is called whenever the trackbar is moved(eg. on_trackbar)
        //                                  ---->    ---->     ---->      
        createTrackbar( "H_MIN", trackbarWindowName, &H_MIN, H_MAX, on_trackbar );
        createTrackbar( "H_MAX", trackbarWindowName, &H_MAX, H_MAX, on_trackbar );
        createTrackbar( "S_MIN", trackbarWindowName, &S_MIN, S_MAX, on_trackbar );
        createTrackbar( "S_MAX", trackbarWindowName, &S_MAX, S_MAX, on_trackbar );
        createTrackbar( "V_MIN", trackbarWindowName, &V_MIN, V_MAX, on_trackbar );
        createTrackbar( "V_MAX", trackbarWindowName, &V_MAX, V_MAX, on_trackbar );


    }
        void morphOps(Mat &thresh) {

            //create structuring element that will be used to "dilate" and "erode" image.
            //the element chosen here is a 3px by 3px rectangle

            Mat erodeElement = getStructuringElement(MORPH_RECT, Size(3, 3));
            //dilate with larger element so make sure object is nicely visible
            Mat dilateElement = getStructuringElement(MORPH_RECT, Size(8, 8));

            erode(thresh, thresh, erodeElement);
            erode(thresh, thresh, erodeElement);

            dilate(thresh, thresh, dilateElement);
            dilate(thresh, thresh, dilateElement);

        }
        void trackFilteredObject(int &x, int &y, Mat threshold, Mat &cameraFeed) {

            Mat temp;
            threshold.copyTo(temp);
            //these two vectors needed for output of findContours
            vector< vector<Point> > contours;
            vector<Vec4i> hierarchy;
            //find contours of filtered image using openCV findContours function
            findContours(temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

            bool objectFound = false;
            if (hierarchy.size() > 0) {
                int numObjects = hierarchy.size();
                //if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
            }
            int maxX = 0, minX = cameraFeed.cols, maxY = 0, minY = cameraFeed.rows;

            for (int i = 0; i<contours.size(); i++)
                for (int j = 0; j<contours[i].size(); j++)
                {
                    Point p = contours[i][j];

                    maxX = max(maxX, p.x);
                    minX = min(minX, p.x);

                    maxY = max(maxY, p.y);
                    minY = min(minY, p.y);
                }

            rectangle(cameraFeed, Point(minX, minY), Point(maxX, maxY), Scalar(0));
            //find center of the rectangle
            int m = minX + maxX / 4;
            int m2 = minY + maxY / 4;
            //calculate the arc tangent

            double angle;
            angle = atan(m / m2);
            //draw the circle
            circle(cameraFeed, Point(m, m2), 5, Scalar(255, 255, 255), CV_FILLED, 8, 0);
        }

        void searchForMovement(Mat thresholdImage, Mat &cameraFeed){
    //notice how we use the '&' operator for objectDetected and cameraFeed. This is because we wish
    //to take the values passed into the function and manipulate them, rather than just working with a copy.
    //eg. we draw to the cameraFeed to be displayed in the main() function.
    bool objectDetected = false;
    Mat temp;
    thresholdImage.copyTo(temp);
    //these two vectors needed for output of findContours
    vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    //find contours of filtered image using openCV findContours function
    //findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
    findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours

    //if contours vector is not empty, we have found some objects
    if(contours.size()>0)objectDetected=true;
    else objectDetected = false;

    if(objectDetected){
        //the largest contour is found at the end of the contours vector
        //we will simply assume that the biggest contour is the object we are looking for.
        vector< vector<Point> > largestContourVec;
        largestContourVec.push_back(contours.at(contours.size()-1));
        //make a bounding rectangle around the largest contour then find its centroid
        //this will be the object's final estimated position.
        objectBoundingRectangle = boundingRect(largestContourVec.at(0));
        int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
        int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;

        //update the objects positions by changing the 'theObject' array values
        theObject[0] = xpos , theObject[1] = ypos;
    }
    //make some temp x and y variables so we dont have to type out so much
    int x = theObject[0];
    int y = theObject[1];

            int maxX = 0, minX = cameraFeed.cols, maxY=0, minY = cameraFeed.rows;

        for(int i=0; i<contours.size(); i++)
            for(int j=0; j<contours[i].size(); j++)
            {
                Point p = contours[i][j];

                maxX = max(maxX, p.x);
                minX = min(minX, p.x);

                maxY = max(maxY, p.y);
                minY = min(minY, p.y);
            }
        rectangle(cameraFeed, Point(minX, minY), Point(maxX, maxY), Scalar(0));

        int m = minX + maxX / 4;
        int m2 = minY + maxY / 4;

        double angle;
        angle = atan(m / m2);

        circle(cameraFeed, Point(m, m2), 5, Scalar(255, 255, 255), CV_FILLED, 8, 0);
        }

        int main(int argc, char* argv[]) {

//some boolean variables for added functionality
    bool objectDetected = false;
    //these two can be toggled by pressing 'd' or 't'
    bool debugMode = false;
    bool trackingEnabled = false;
    bool colordetection=false;
    //set up the matrices that we will need
    //the two frames we will be comparing
    Mat frame1,frame2;
    //their grayscale images (needed for absdiff() function)
    Mat grayImage1,grayImage2;
    //resulting difference image
    Mat differenceImage;
    //thresholded difference image (for use in findContours() function)
    Mat thresholdImage;
    //video capture object.
    VideoCapture capture;

            capture.open(0);

            if (!capture.isOpened()) {
                cout << "ERROR ACQUIRING VIDEO FEED\n";
                getchar();
                return -1;
            }

            while (1) {

                //read first frame
        capture.read(frame1);
        //convert frame1 to gray scale for frame differencing
        cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
        //copy second frame
        capture.read(frame2);
        //convert frame2 to gray scale for frame differencing
        cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
        //perform frame differencing with the sequential images. This will output an "intensity image"
        //do not confuse this with a threshold image, we will need to perform thresholding afterwards.
        cv::absdiff(grayImage1,grayImage2,differenceImage);
        //threshold intensity image at a given sensitivity value
        cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
        if(debugMode==true){
            //show the difference image and threshold image
            cv::imshow("Difference Image",differenceImage);
            cv::imshow("Threshold Image", thresholdImage);
        }else{
            //if not in debug mode, destroy the windows so we don't see them anymore
            cv::destroyWindow("Difference Image");
            cv::destroyWindow("Threshold Image");
        }
        //blur the image to get rid of the noise. This will output an intensity image
        cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
        //threshold again to obtain binary image from blur output
        cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
        if(debugMode==true){
            //show the threshold image after it's been "blurred"

            imshow("Final Threshold Image",thresholdImage);

        }
        else {
            //if not in debug mode, destroy the windows so we don't see them anymore
            cv::destroyWindow("Final Threshold Image");
        }

                if (colordetection == true) {
                    //some boolean variables for different functionality within this
                    //program
                    bool trackObjects = true;
                    bool useMorphOps = true;
                    //Matrix to store each frame of the webcam feed
                    Mat cameraFeed;
                    //matrix storage for HSV image
                    Mat HSV;
                    //matrix storage for binary threshold image
                    //x and y values for the location of the object
                    int x = 0, y = 0;
                    //create slider bars for HSV filtering
                    createTrackbars();
                    //start an infinite loop where webcam feed is copied to cameraFeed matrix
                    //all of our operations will be performed within this loop
                    while (1) {
                        //store image to matrix
                        capture.read(cameraFeed);
                        //convert frame from BGR to HSV colorspace
                        cvtColor(cameraFeed, HSV, COLOR_BGR2HSV);
                        //filter HSV image between values and store filtered image to
                        //threshold matrix
                        inRange(HSV, Scalar(H_MIN, S_MIN, V_MIN), Scalar(H_MAX, S_MAX, V_MAX), thresholdImage);
                        //perform morphological operations on thresholded image to eliminate noise
                        //and emphasize the filtered object(s)
                        if (useMorphOps)
                            morphOps(thresholdImage);
                        //pass in thresholded frame to our object tracking function
                        //this function will return the x and y coordinates of the
                        //filtered object
                        if (trackObjects)
                            trackFilteredObject(x, y, thresholdImage, cameraFeed);

                        //show frames 
                        cv::imshow("Thresholded Image", thresholdImage);
                        cv::imshow("HSV Image", HSV);
                        cv::imshow("Original Image", cameraFeed);

                        //delay 30ms so that screen can refresh.
                        //image will not appear without this waitKey() command
                        waitKey(30);
                    }

                }
                else {
                    cv::destroyWindow("Original Image");
                    cv::destroyWindow("HSV Image");
                    cv::destroyWindow("Thresholded Image");
                    cv::destroyWindow("Trackbars");

                }

                if (trackingEnabled) {
                    searchForMovement(thresholdImage, frame1);
                }

                imshow("Frame1", frame1);


                switch (waitKey(10)) {

                case 27: //'esc' key has been pressed, exit program.
                    return 0;
                case 116: //'t' has been pressed. this will toggle tracking
                    trackingEnabled = !trackingEnabled;
                    if (trackingEnabled == false) cout << "Tracking disabled." << endl;
                    else cout << "Tracking enabled." << endl;
                    break;
                case 100: //'d' has been pressed. this will debug mode
                    debugMode = !debugMode;
                    if (debugMode == false) cout << "Debug mode disabled." << endl;
                    else cout << "Debug mode enabled." << endl;
                    break;
                case 112: //'t' has been pressed. this will toggle tracking to color tracking using dilate and erode
                    colordetection = !colordetection;
                    if (colordetection == false) cout << "Tracking not switched" << endl;
                    else {
                        cout << "Tracking switched" << endl;
                        break;
                    }
                }
            }
            return 0;
        }

1 个答案:

答案 0 :(得分:1)

colordetection 变量为 true 时,您输入了无限的 while(1)循环,因此程序永远不会到达<第一次按“ p ”并切换 colordetection 变量的值后,强>切换语句。在循环结束时放置 waitKey(30); 不会突破它 - 它只是等待键输入,接收或不接收,然后忽略它并返回到开头循环。

您可以将开关语句放到一个单独的函数中,并在上述循环结束时调用此函数,而不是现在调用它。