如何向ROS中的主题发布vector <point2d>类型的消息?

时间:2015-10-31 23:28:28

标签: c++ ros

我编写了C ++代码,主要是将视频输入分成帧,运行HSV分割,找到轮廓,然后对其进行PCA分析,得到两个特征向量及其相应的特征值。

然后,我找到了两个特征值中最大的一个,并取出了与之对应的特征向量,并将其置于vector<Point2d>类型的向量中。这一切都运行良好,但我似乎无法将向量发布到ROS主题。

我的问题是如何将此vector<Point2d>类型的向量发布到ROS主题?由于我在cpp文件中进行了所有计算,因此我不认为我可以为向量创建一个msg文件。

ROS代码将位于main()。

#include<opencv2/highgui/highgui.hpp>
#include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/ml/ml.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/opencv.hpp"

#include <unistd.h>
#include <iostream>
#include <algorithm>

#include "ros/ros.h"
#include "std_msgs/String.h"
#include <vector> 

using namespace cv;
using namespace std;


vector<Point2d> getOrientation(vector<Point> &pts, Mat &img) //Taking address of pointers from main() as arugments and storing them
{
    //if (pts.size() == 0) return false;

    //First the data need to be arranged in a matrix with size n x 2, where n is the number of data points we have. Then we can perform that PCA analysis. The calculated mean (i.e. center of mass) is stored in the "pos" variable and the eigenvectors and eigenvalues are stored in the corresponding std::vector’s.

    //Construct a buffer called data_pts used by the pca analysis
    Mat data_pts = Mat(pts.size(), 2, CV_64FC1); //pts.size() rows, 2 columns, matrix type will be CV_64FC1(Type to hold inputs of "double")
    for (int i = 0; i < data_pts.rows; ++i)
    {
        data_pts.at<double>(i, 0) = pts[i].x;
        data_pts.at<double>(i, 1) = pts[i].y;
    }


    //Perform PCA analysis. Principal Component Analysis allows us to find the direction along which our data varies the most. In fact, the result of running PCA on the set of points in the diagram consist of 2 vectors called eigenvectors which are the principal components of the data set.
    PCA pca_analysis(data_pts, Mat(), CV_PCA_DATA_AS_ROW);

    //Store the position of the object
    Point pos = Point(pca_analysis.mean.at<double>(0, 0),
                      pca_analysis.mean.at<double>(0, 1));

    //Store the eigenvalues and eigenvectors
    vector<Point2d> eigen_vecs(2);
    vector<double> eigen_val(2);
    for (int i = 0; i < 2; ++i)
    {
        eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0), pca_analysis.eigenvectors.at<double>(i, 1));

        eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
    cout << "Eigen Vector: "<< eigen_vecs[i] << endl;
    cout << "Eigen Value: " << eigen_val[i] << endl;
    }


// Find a way to acquire highest Eigen Value and the Vector Associated with it and find a way to pass it on to the motor controller(The Pic 24) 
    double valueMAX = *max_element(eigen_val.begin(), eigen_val.end());
    double index = find(eigen_val.begin(), eigen_val.end(), valueMAX) - eigen_val.begin();

    cout << "\nMax value is: " << valueMAX << endl;
    cout << "\nThe index of the largest value is: " << index << endl;

    vector<Point2d> correctVector(2);
    for( int i = 0; i < 2; i++)
    {
    if(i == index)
    {
        correctVector[0] = eigen_vecs[i];
    }
    }

    cout <<" \nThe vector that corresponds with the value is: " << correctVector[0] << endl;

    float degrees = ((atan2(eigen_vecs[0].y, eigen_vecs[0].x) * 180) / 3.14159265);
    cout <<" \nThe degrees using ArcTangent of the vector(x,y) is: " << degrees << endl;

     //ros::Publisher vector_pub = node.advertise<std_msgs::String>("vector", 1000);

    // Draw the principal components, each eigenvector is multiplied by its eigenvalue and translated to the mean position.
    circle(img, pos, 3, CV_RGB(255, 0, 255), 2);
    line(img, pos, pos + 0.02 * Point(eigen_vecs[0].x * eigen_val[0], eigen_vecs[0].y * eigen_val[0]) , CV_RGB(255, 255, 0));
    line(img, pos, pos + 0.02 * Point(eigen_vecs[1].x * eigen_val[1], eigen_vecs[1].y * eigen_val[1]) , CV_RGB(0, 255, 255));

    //return degrees;
    return correctVector;
}
 int main(int argc, char **argv)
 {
    VideoCapture cap(0); //capture the video from web cam/USB cam. (0) for webcam, (1) for USB.

    if ( !cap.isOpened() )  // if not success, exit program
    {
         cout << "Cannot open the web cam" << endl;
         return -1;
    }

  //Creating values for Hue, Saturation, and Value. Found that the color Red will be near 110-180 Hue, 60-255 Saturation, and 0-255 Value. This combination seems to pick up any red object i give it, as well as a few pink ones too. 

  int count = 0;

  int iLowH = 113;
  int iHighH = 179;

  int iLowS = 60; 
  int iHighS = 255;

  int iLowV = 0;
  int iHighV = 255;

/**
   * The ros::init() function needs to see argc and argv so that it can perform
   * any ROS arguments and name remapping that were provided at the command line.
   * For programmatic remappings you can use a different version of init() which takes
   * remappings directly, but for most command-line programs, passing argc and argv is
   * the easiest way to do it.  The third argument to init() is the name of the node.
   *
   * You must call one of the versions of ros::init() before using any other
   * part of the ROS system.
*/

  // Initiate a new ROS node named "talker"
  ros::init(argc, argv,"OpenCV");

  /**
   * NodeHandle is the main access point to communications with the ROS system.
   * The first NodeHandle constructed will fully initialize this node, and the last
   * NodeHandle destructed will close down the node.
   */

  // creating a node handle: It is the reference assigned to a new node. EVERY NODE MUST HAVE A REFERENCE!
  ros::NodeHandle node;





    while (true)
    {
        Mat frame;

        bool bSuccess = cap.read(frame); // read a new frame from video

         if (!bSuccess) //if not success, break loop
        {
             cout << "Cannot read a frame from video stream" << endl;
             break;
        }

   Mat imgHSV;

   cvtColor(frame, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from RBG to HSV

   Mat imgThresholded;

   inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image

  //morphological opening (remove small objects from the foreground)
  erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
  dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 

   //morphological closing (fill small holes in the foreground)
  dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 
  erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

 // Find all objects of interest, find all contours in the threshold image
        vector<vector<Point> > contours; //vector of vector points
        vector<Vec4i> hierarchy; // Vector of 4 interges
    vector<Point2d> result;

    //findContours essentially is tracing all continuous points caputured by the thresholding, I feel this gives accuracy to the upcoming Eigen analysis and also helps in detecting what is actually an object versus  what is not. The arguments for findingContours() is the following( binary image, contour retrieval mode, contour approximation method)

    findContours(imgThresholded, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

        ros::Publisher vector_pub = node.advertise<std_msgs::UInt8MultiArray("vector", 1000); // THIS IS THE ERROR IN MY CODE, HOW DO I PUBLISH THE VECTOR OF TYPE vector<Point2d>!?!?


    // For each object
        for (size_t i = 0; i < contours.size(); ++i)
    {

        // Calculate its area of each countour
            double area = contourArea(contours[i]);

        // Ignore if too small or too large
            if (area < 1e2 || 1e5 < area) continue;

        // Draw the contour for visualisation purposes
            drawContours(frame, contours, i, CV_RGB(255, 0, 0), 2, 8, hierarchy, 0); 
        count++;
        cout << "This is frame: " << count <<endl;
        //usleep(500000);

        result = getOrientation(contours[i], frame);
        cout<< "THE VECTOR BEING PASSED TO THE TOPIC IS: " << result << endl;
    }


   imshow("Thresholded Image", imgThresholded); //show the thresholded image with Contours.
  imshow("Original", frame); //show the original image

        if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
       {
            cout << "esc key is pressed by user" << endl;
            break; 
       }
    }
   return 0;
}

0 个答案:

没有答案