使用不同的C ++标头替换位标头

时间:2016-03-01 21:19:38

标签: c++ xcode header

我正在尝试编译一个OpenCV开源代码项目,但它正在使用这些头文件......

#include <bits/stl_list.h>
#include <bits/stl_vector.h>

我正在使用Xcode,它无法重新调整这些标头。是否可以用标题替换它......

#include <list.h>
#include <vector.h>

当我这样做时,它会破坏引用位头的部分代码。看起来他们仍然只是引用像List<int>这样的东西,这会让我觉得我可以替换标题以使代码工作。

是否可以将位标头安装到我的C ++库中以使项目编译?

的main.cpp

#include <iostream>
#include "opencv/cv.h"
#include <opencv2/opencv.hpp>
#include <bits/stl_list.h>
#include <bits/stl_vector.h>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "My_Q.h"
using namespace std;
using namespace cv;
//=========controller parameters=================
const int MAX_CORNERS = 100;
const int frame_queue_size = 50 ;
const float threshold_percent = 0.1;

const bool draw_floor_line = true;

const bool webcam_mode = false;
const string movie_address = "sample_videos/1p1.mp4";//1p1,people,2p1
//======camera calibration parameters============
const float fku = 1000;
const float v0 = 100;
const float h = 1;
//=======global variables========================
int number_of_people;

CvPoint2D32f* features;
int features_number=0;

int* setNumbers;
int* height_from_floor;

void MyLine( Mat img, Point start, Point end )
{
    int thickness = 1;
    int lineType = 8;
    line( img,
         start,
         end,
         Scalar( 0,255, 255 ),
         thickness,
         lineType );
}
CvPoint2D32f* find_features(Mat image,vector <Rect> Rect_list,
                            int & features_number)
{
    //cout<<"4.2.1"<<endl;
    list<CvPoint2D32f> features;
    list<int> set_number;
    list<int> dist2B;

    int RectNumber = Rect_list.size();
    int i, j;
    int All_features_number=0;
    //cout<<"4.2.2"<<endl;
    for (i=0; i<RectNumber; i++)
    {
        Rect r = Rect_list[i];
        ////=================cropping=================================
        //cout<<"4.2.3"<<endl;
        cout<<r.x<<','<<r.y<<endl;
        //Mat cropped_rectangle(image, r);
        Mat cropped_rectangle ;
        image(r).copyTo(cropped_rectangle);//crop r to cropped_rectangle
        //cout<<"4.2.4"<<endl;
        ////=================extracting good features=================
        IplImage* imgA = new IplImage(cropped_rectangle);//convert matrix
        //cropped_rectangle to IplImage
        CvSize img_sz = cvGetSize(imgA);
        IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
        //scratch image
        IplImage* tmp_image =cvCreateImage(img_sz,IPL_DEPTH_32F,1);
        //scratch image
        int corner_count = MAX_CORNERS;
        //The corner_count indicates the maximum number of points
        // for which there
        //is space to return. After the routine exits, corner_count
        //is overwritten by the number
        //of points that were actually found
        //cout<<"4.2.5"<<endl;
        CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
        // good feature points
        //cout<<"4.2.6"<<endl;
        cvGoodFeaturesToTrack(//fine good features of rectangle
                              imgA,
                              eig_image,
                              tmp_image,
                              cornersA,
                              &corner_count,
                              0.01,
                              5.0,
                              0,
                              3,
                              0,
                              0.04
                              );
        //========================

        //========================
        //cout<<"4.2.7"<<endl;
        ////===mapping good features to the original picture and add them to the list
        //of features and set their set(the rectangle they are belonging)===
        for(int k = 0;k<corner_count;k++)
        {
            int distance = r.height - cornersA[k].y;
            dist2B.push_back(distance);

            cornersA[k].x= r.x+ cornersA[k].x;//mapping
            cornersA[k].y= r.y+ cornersA[k].y;//mapping
            features.push_back(cornersA[k]);//adding

            set_number.push_back(i);

        }
        //cout<<"4.2.8"<<endl;
        All_features_number+=corner_count;

    }
    CvPoint2D32f* features_array = new CvPoint2D32f[All_features_number];
    setNumbers = new int[All_features_number];
    height_from_floor = new int [All_features_number];
    int iter = 0;
    //cout<<"4.2.9"<<endl;
    while (features.size() > 0)
    //copy the features list to the final features array
    {
        CvPoint2D32f F_point = features.front();
        features_array[iter].x = F_point.x;
        features_array[iter].y = F_point.y;
        features.pop_front();

        int SN = set_number.front();
        setNumbers[iter] = SN;
        set_number.pop_front();

        int d = dist2B.front();
        height_from_floor[iter] = d;
        dist2B.pop_front();
        iter++;
    }
    //cout<<"4.2.10"<<endl;
    features_number=All_features_number;
    return features_array;
}
int people_floor(int p, bool & status)
{
    int sum=0;
    int m=0;
    for(int i = 0 ; i< features_number;i++)
    if (setNumbers[i]== p)
    {
        sum+=(features[i].y+height_from_floor[i]);
        m++;
    }
    if(m==0)
    {
        status = false;
        return 0;
    }
    status = true;
    return(sum/m);
}
int scaled_people_floor(int p,bool &status)
{
    int y = people_floor(p,status);
    if(!status)
    return -1;
    float sum=0;
    float m = 0;
    for(int i = 0;i<features_number;i++)
    {
        if(setNumbers[i]==setNumbers[p])
        {
            for(int j=i+1;j<features_number;j++)
            {
                if(setNumbers[j]==setNumbers[p])
                {
                    if(height_from_floor[i]-height_from_floor[j]<2)
                    continue;
                    float t = (features[i].y-features[j].y)/(height_from_floor[i]-height_from_floor[j]);
                    if(t<0)
                    t=-t;
                    sum+= t;
                    m++;
                }
            }
        }
    }
    if(m==0)
    {
        return(y);
    }
    float alpha = sum/m;
    return(alpha*y);
}
float compute_distance(int v)
{
    return((fku*h)/(v-v0));
}
int main (int argc, const char * argv[])
{
    int screen_width;
    int screen_height;
    VideoCapture cap;
    if(!webcam_mode)
    {
        VideoCapture cap1(movie_address);
        screen_width = cap1.get(CV_CAP_PROP_FRAME_WIDTH);
        screen_height =cap1.get(CV_CAP_PROP_FRAME_HEIGHT);
        cap = cap1;
    }
    else
    {
        VideoCapture cap2(0);
        screen_width = 640;
        screen_height =480;
        cap2.set(CV_CAP_PROP_FRAME_WIDTH, screen_width);//1024,320,640,160
        cap2.set(CV_CAP_PROP_FRAME_HEIGHT, screen_height);//768,240,480,120
        cap = cap2;
    }


    if (!cap.isOpened())
    return -1;
    namedWindow("video capture", CV_WINDOW_AUTOSIZE);

    Mat current_frame;
    Mat next_frame;
    Mat current_frame_copy;
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
    int Threshold=0;
    while(1)//main loop:
    {

        vector<Rect> found, found_filtered;
        CvPoint2D32f* tracked_features;
        ////****************************************************************************
        ////**********************detection loop: **********************************
        while(1)//detection loop
        {
            cout<<"start detection loop\n";
            ////--------delete info of previous iteration---------
            found_filtered.clear();
            //--------------get next frame------------------------
            cap >> current_frame;
            if(! current_frame.data )// Check for invalid input
            {
                cout<<"no frame"<<endl;
                return 0;
            }
            current_frame.copyTo(current_frame_copy);
            //convert it to the gray
            cvtColor(current_frame, current_frame, CV_BGR2GRAY);
            //cout<<"2"<<endl;
            //---------------detection with HOG algorithm---------
            hog.detectMultiScale(current_frame,
                                 found,
                                 0,
                                 Size(8,8),
                                 Size(32,32),
                                 1.05,
                                 2);
            cout<<"HOG detection Done"<<endl;
            //filter redundant rectangles
            for (int i=0; i<found.size(); i++)
            {
                Rect r = found[i];
                int j;
                for (j=0; j<found.size(); j++)
                if (j!=i && (r & found[j])==r)
                break;
                if (j==found.size())
                found_filtered.push_back(r);
            }
            number_of_people = found_filtered.size();
            //cout<<"4"<<endl;
            //----------extracting good features for tracking------
            if(number_of_people>0)//if at least one person found:
            {
                // cout<<"4.1"<<endl;
                for(int i=0;i<found_filtered.size();i++)
                //bound rectangles to the screen
                {
                    if(found_filtered[i].x<0)
                    found_filtered[i].x =0;
                    if(found_filtered[i].y<0)
                    found_filtered[i].y =0;
                    if(found_filtered[i].x+found_filtered[i].width>screen_width)
                    found_filtered[i].width =screen_width-found_filtered[i].x;
                    if(found_filtered[i].y+found_filtered[i].height>screen_height)
                    found_filtered[i].height =screen_height-found_filtered[i].y;
                }
                try// find the list of features of the people
                {
                    // cout<<"4.2"<<endl;
                    features = find_features(current_frame,
                                             found_filtered,
                                             features_number);
                    // cout<<"4.3"<<endl;
                }
                catch(Exception exp)
                {
                    cout<<"exception find features error"<<endl;
                    continue;
                }

                break;
            }
            //cout<<"4.4.1"<<endl;

            //------------showing detection
            imshow("video capture", current_frame_copy);
            waitKey(10);
            cout<<"end detection loop"<<endl;
        }
        ////===========setting the breaking threshold for the tracking=========
        //cout<<"4.5"<<endl;
        Threshold = features_number*threshold_percent;
        //cout<<"4.6, feature_number="<<features_number
        //  <<",Threshold="<<Threshold<<endl;
        My_Q Queue(frame_queue_size,1);
        ////****************************************************************************
        ////***********************//tracking loop: ********************************
        while(1)//tracking loop
        {
            cout<<"tracking loop start"<<endl;

            ////==================check for breaking conditions===============
            Queue.AddNew(features_number);
            if(Queue.BreakCondition())
            {
                cout<<"features # are not changing for a while!"<<endl;
                break;
            }
            if(features_number< Threshold)
            //check for enough number of features to track
            {
                cout<<"features less than threshold"<<endl;
                break;
                //go to the detection loop when number of
                //tracked features is less than an arbitrary threshold
            }
            ////======================reading the next frame================
            current_frame_copy.setTo(Scalar(0,0,0));
            //cout<<"t00"<<endl;

            cap >> next_frame;//get the next frame
            //cout<<"t01"<<endl;
            if(! next_frame.data )// Check for invalid input
            {
                cout<<"Frame not captured"<<endl;
                return 0;
                //continue;
            }
            next_frame.copyTo(current_frame_copy);
            cvtColor(next_frame, next_frame, CV_BGR2GRAY);
            //convert it to gray scale
            //cout<<"t02";
            ////=====================tracking using LK ==============================
            try
            {
                ////-----------LK parameters-------------
                char status[features_number];
                //shows the status of points. 0 means
                // feature lost; 1 means found
                float feature_errors[features_number];
                //list of lost features
                tracked_features= new CvPoint2D32f[ features_number ];
                IplImage* imgA = new IplImage(current_frame);
                //convert initial matrix image to IplImage
                IplImage* imgB = new IplImage(next_frame);
                //convert final matrix image to IplImage
                CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
                IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
                IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
                int win_size = 10;
                cout<<"before LK"<<endl;
                //--------------------------------------
                cvCalcOpticalFlowPyrLK(
                                       imgA,//initial image
                                       imgB,
                                       //final image,both should be single-channel, 8-bit images.
                                       pyrA,
                                       //buffer allocated to store the pyramid images
                                       pyrB,
                                       features,
                                       ////the array contains the points
                                       //for which the motion is to be found
                                       tracked_features,////similar array into
                                       //which the computed new locations of the points
                                       ////from features are to be placed
                                       features_number,////feature_number is the number
                                       //of points in the features list
                                       cvSize( win_size,win_size ),
                                       5,
                                       status,
                                       feature_errors,
                                       cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
                                       0
                                       );
                cout<<"LK done"<<endl;
                ////============updating the info after tracking===================
                //updating feature points
                //removing info (set_number/distance to bottom) of lost features
                int new_feature_number = 0;
                int * temp_set_number = new int[features_number];
                int * temp_distance = new int[features_number];
                int j = 0;
                for(int i = 0 ;i<features_number;i++)
                {
                    if(status[i] == 1)
                    {
                        new_feature_number++;
                        //counting number of features tracked successfully

                        temp_set_number[j]=setNumbers[i];
                        temp_distance[j]= height_from_floor[i];
                        j++;
                    }
                }
                setNumbers = temp_set_number;
                height_from_floor = temp_distance;

                features_number = new_feature_number;
                cout<<"features_number="<<features_number<<endl;
                delete[] features;
                features = new CvPoint2D32f[ features_number ] ;
                for(int i = 0;i<features_number;i++)//update features
                {
                    features[i]=tracked_features[i];
                }
                delete[] tracked_features;
                next_frame.copyTo(current_frame);
                cout<<"info updated after LK"<<endl;
            }
            catch(Exception exp)
            {
                cout<<"LK-exception"<<endl;
                break;
            }
            ////****************************************************************************
            ////****************************computing distance******************************
            for(int p=0;p<number_of_people;p++)
            {
                bool status;
                int v = scaled_people_floor(p,status);
                //int v = people_floor(p);
                if(status)
                {
                    float d = compute_distance(v);
                    if(draw_floor_line)
                    MyLine(current_frame_copy,Point(0,v),Point(screen_width,v));

                    string dist_text = static_cast<ostringstream*>( &(ostringstream() << d) )->str();
                    dist_text = dist_text.substr(0,5);
                    putText(current_frame_copy, "distance: "+dist_text+"m", Point(screen_width-150,screen_height-p*50-20),
                            CV_FONT_NORMAL, 0.5,CV_RGB((125-p*100)%255,(p*100)%255,(255-p*100)%255),1,1);
                }

            }
            cout<<"computing distance"<<endl;

            ////================showing points ======================

            for(int i=0;i<features_number;i++)
            {
                int p =  setNumbers[i];
                circle(current_frame_copy,
                       features[i],
                       1,
                       CV_RGB((125-p*100)%255,(p*100)%255,(255-p*100)%255),
                       3);
            }
            imshow("video capture", current_frame_copy);
            waitKey(10);
            cout<<"tracking loop end"<<endl;
        }
        cout<<"main loop end"<<endl;
    }
    return 0;
}

0 个答案:

没有答案