为相机创建ROSRUN ros cpp节点

时间:2016-08-10 11:42:38

标签: ros

我已经拥有一个catkin_ws工作区,其中有一些ros节点和主题,所有这些都可以让相机进入它,意味着相机与这些节点接合,我有一个cpp,它以相机作为输入运行虽然只像./example -camera一样运行,但没有任何ros编码。

现在,我想将example.cpp转换为ros cpp,这里的代码为ros::init(),并订阅已经运行的相机,并实时提供输出视频。

我试图了解整个事情,但我不知道如何从已经运行的节点拍摄相机视频,意味着我想要运行时需要相机的rosrun文件。

 cv::VideoCapture video;
 bool useCamera = true;

 if( useCamera )
  video.open(0);




#include <opencv/cv.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <math.h>
#include <stdio.h>
#include <iostream>
#include <time.h>

#include "../src/MSAC.h"

#include "../src/pedro.cpp"
using namespace std;
using namespace cv;

int uheight = 240;
int uwidth = 320;
int vthres = 60;
Size size(uwidth, uheight);
cv::Mat inputImg;
// std::vector<cv::Mat> vps;            // vector of vps: vps[vpNum], with vpNum=0...numDetectedVps

//it takes the label number which you want as output

Mat labelled_image(int label,int *arr_label[],Mat image)
{
  Mat temp(image.size(),CV_8UC1,Scalar(0));
  for(int i=0;i<image.rows;i++)
  {
    for(int j=0;j<image.cols;j++)
    {
      if(label==arr_label[i][j])
      {
        temp.at<uchar>(i,j)=255;
      }
    }
  }

  return(temp);
}

void help()
{
  cout << "/*\n"
  << " **************************************************************************************************\n"
  << " * Vanishing point detection using Hough and MSAC \n"
  << " * ----------------------------------------------------\n"
  << " * \n"
  << " * Author:Marcos Nieto\n"
  << " * www.marcosnieto.net\n"
  << " * marcos.nieto.doncel@gmail.com\n"
  << " * \n"
  << " * Date:01/12/2011\n"
  << " **************************************************************************************************\n"
  << " * \n"
  << " * Usage: \n"
  << " *        -numVps     # Number of vanishing points to detect (at maximum) \n"
  << " *        -mode       # Estimation mode (default is NIETO): LS (Least Squares), NIETO\n"
  << " *        -video      # Specifies video file as input (if not specified, camera is used) \n"
  << " *        -image      # Specifies image file as input (if not specified, camera is used) \n"
  << " *        -verbose    # Actives verbose: ON, OFF (default)\n"
  << " *        -play       # ON: the video runs until the end; OFF: frame by frame (key press event)\n"
  << " *        -resizedWidth   # Specifies the desired width of the image (the height is computed to keep aspect ratio)\n"
  << " * Example:\n"
  << " *        vanishingPoint.exe -numVps 2 -video myVideo.avi -verbose ON\n"
  << " *        vanishingPoint.exe -numVps 2 -image myImage.jpg\n"
  << " *        vanishingPoint.exe -numVps 1 -play OFF -resizedWidth 640\n"
  << " * \n"
  << " * Keys:\n"
  << " *        Esc: Quit\n"
  << " */\n" << endl;
}

/** This function contains the actions performed for each image*/
void processImage(MSAC &msac, int numVps, cv::Mat &imgGRAY, cv::Mat &outputImg)
{
  cv::Mat imgCanny;

  // Canny
  cv::Canny(imgGRAY, imgCanny, 40, 65, 3);

  // Hough
  vector<vector<cv::Point> > lineSegments;
  vector<cv::Point> aux;
  #ifndef USE_PPHT
  vector<Vec2f> lines;
  cv::HoughLines( imgCanny, lines, 1, CV_PI/180, 130);

  for(size_t i=0; i< lines.size(); i++)
  {
    float rho = lines[i][0];
    float theta = lines[i][1];

    double a = cos(theta), b = sin(theta);
    double x0 = a*rho, y0 = b*rho;

    Point pt1, pt2;
    pt1.x = cvRound(x0 + 1000*(-b));
    pt1.y = cvRound(y0 + 1000*(a));
    pt2.x = cvRound(x0 - 1000*(-b));
    pt2.y = cvRound(y0 - 1000*(a));

    aux.clear();
    aux.push_back(pt1);
    aux.push_back(pt2);
    lineSegments.push_back(aux);

    line(outputImg, pt1, pt2, CV_RGB(0, 0, 0), 1, 8);

  }
  #else
  vector<Vec4i> lines;
  int houghThreshold = 70;
  if(imgGRAY.cols*imgGRAY.rows < 400*400)
  houghThreshold = 100;

  cv::HoughLinesP(imgCanny, lines, 1, CV_PI/180, houghThreshold, 10,10);

  while(lines.size() > MAX_NUM_LINES)
  {
    lines.clear();
    houghThreshold += 10;
    cv::HoughLinesP(imgCanny, lines, 1, CV_PI/180, houghThreshold, 10, 10);
  }
  for(size_t i=0; i<lines.size(); i++)
  {
    Point pt1, pt2;
    pt1.x = lines[i][0];
    pt1.y = lines[i][1];
    pt2.x = lines[i][2];
    pt2.y = lines[i][3];
    line(outputImg, pt1, pt2, CV_RGB(0,0,0), 2);
    /*circle(outputImg, pt1, 2, CV_RGB(255,255,255), CV_FILLED);
    circle(outputImg, pt1, 3, CV_RGB(0,0,0),1);
    circle(outputImg, pt2, 2, CV_RGB(255,255,255), CV_FILLED);
    circle(outputImg, pt2, 3, CV_RGB(0,0,0),1);*/

    // Store into vector of pairs of Points for msac
    aux.clear();
    aux.push_back(pt1);
    aux.push_back(pt2);
    lineSegments.push_back(aux);
  }

  #endif

  // Multiple vanishing points
  std::vector<cv::Mat> vps;         // vector of vps: vps[vpNum], with vpNum=0...numDetectedVps
  std::vector<std::vector<int> > CS;    // index of Consensus Set for all vps: CS[vpNum] is a vector containing indexes of lineSegments belonging to Consensus Set of vp numVp
  std::vector<int> numInliers;

  std::vector<std::vector<std::vector<cv::Point> > > lineSegmentsClusters;

  // Call msac function for multiple vanishing point estimation
  msac.multipleVPEstimation(lineSegments, lineSegmentsClusters, numInliers, vps, numVps);
  for(int v=0; v<vps.size(); v++)
  {
    printf("VP %d (%.3f, %.3f, %.3f)", v, vps[v].at<float>(0,0), vps[v].at<float>(1,0), vps[v].at<float>(2,0));
    fflush(stdout);
    double vpNorm = cv::norm(vps[v]);
    if(fabs(vpNorm - 1) < 0.001)
    {
      printf("(INFINITE)");
      fflush(stdout);
    }
    printf("\n");
  }

  // Draw line segments according to their cluster
  msac.drawCS(outputImg, lineSegmentsClusters, vps);

  // View
  namedWindow("Output", CV_WINDOW_KEEPRATIO);
  imshow("Output", outputImg);
  // imwrite("vanishingPoint.jpg", outputImg);

  // storing 'y' values of vanishing point and getting that 'y' value till which we will do processing on our image
  float one, two = 0.0;
  int x1, x2 = 1;
  vthres = 60;
  int tmax = uheight-120;
  if(vps.size() == 1)
  {
    x1 = vps[0].at<float>(0,0);
    if(x1 > 0 && x1 < uwidth){
      one = vps[0].at<float>(0,1);
      if(one <0) one = vthres;
      if(one > tmax) vthres = tmax;
      else vthres = one;
    }
  }
  else if(vps.size() == 2)
  {
    x1 = vps[0].at<float>(0,0);
    x2 = vps[1].at<float>(0,0);
    if(x1 > 0 && x1 < uwidth)
    {
      one = vps[0].at<float>(0,1);
      if(one <0) one = vthres;
    }
    else one = vthres;
    if(x2 > 0 && x2 < uwidth)
    {
      two = vps[1].at<float>(0,1);
      if(two<0) two = vthres;
    }
    else two = vthres;

    if(one > tmax && two > tmax) vthres = tmax;
    else if(one > tmax || two > tmax) vthres = (one > two) ? two : one;
    else vthres = (one > two) ? one : two;
  }
  else vthres = vthres;
  cout << "\nvanishing point: " << vthres << endl;


  // Resizing image considering vanishing point
  Mat img(inputImg, Rect(0,vthres,uwidth,uheight-vthres) );
  //    resize(img,img,size);
  cout << "\nUpdated Image size: (" << img.cols << " x " << img.rows << ')' <<  endl;

  /*Result of graph-segmentation*/
  cv::Mat output(img.size(),img.type());

  /*labelled image i.e each segment is assigned a particular label*/
  cv::Mat label_image(img.size(),img.type(),Scalar(0));

  /*Parameters for graph-segmentation*/
  int k = 800;
  int min_size = 50;

  pedro p = pedro(img, k, min_size);

  // cv::imshow("input image",img);

  p.Evaluate();

  cout<<"number of segments = "<<p.num_ccs<<endl;

  p.output_seg.copyTo(output);

  namedWindow(" segmented output ",CV_WINDOW_KEEPRATIO);
  cv::imshow(" segmented output ",output);
  // imwrite("segmented.jpg", output);

  /**Suppose you want to check a segment number then uncomment the code below and provide
  * the label number**/

  // Storing labels in a Mat object labels
  Mat labels(img.size(),CV_8UC1,Scalar(0));
  for(int i=0;i<img.rows;i++){
    for(int j=0;j<img.cols;j++){
      labels.at<uchar>(i,j) = p.labels_seg[i][j];
    }
  }

  // detecting road label
  float total = 0.0;
  Mat label1(labels, Rect(100, uheight-vthres-15, 150, 15));
  Mat label2(labels, Rect(110, uheight-vthres-20, 120, 20));
  Mat label3(labels, Rect(120, uheight-vthres-30, 80, 30));

  total = ((sum(label1)[0] / 2250) + (sum(label2)[0] / 2400) + (sum(label3)[0] / 2400)) / 3;
  // total = ceil(total);

  int label_no = total;
  cout << "\nlabel: " << label_no << "\ttotal: " << total << endl;
  label_image = labelled_image(label_no, p.labels_seg, img); //Suppose you want label 1

  cv::namedWindow("labelimage", CV_WINDOW_KEEPRATIO);
  cv::imshow("labelimage",label_image);
  // imwrite("boundary.jpg", label_image);

  // Find contours
  vector<vector<Point> > contours;
  vector<Vec4i> hierarchy;
  RNG rng(12345);
  findContours( label_image, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );

  // Draw contours
  Mat drawing = Mat::zeros( label_image.size(), CV_8UC3 );
  for( int i = 0; i< contours.size(); i++ )
  {
    Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
    drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
  }

  // display contours
  namedWindow("Contours", CV_WINDOW_KEEPRATIO);
  imshow("Contours", drawing);
  // imwrite("drawing.jpg", drawing);

  // vector for boundary points
  vector<int>bdp;

  // finding boundary points
  for(int i= 0; i < contours.size(); i++)
  {
    for(int j= 0; j < contours[i].size();j++) // run until j < contours[i].size();
    {
      if(contours[i][j].y == uheight-vthres-2){
        bdp.push_back(contours[i][j].x);
      }
    }
  }

  // sdist is safe-distance
  int right,left=0;
  int sdist = 10;
  for(int i=0; i < bdp.size(); i++)
  {
    cout << "\nCorner Point: (" << bdp[i] << ", " << uheight-vthres-2 << ")" << endl;
    if(bdp[i]-(uwidth/2) > 0) right = bdp[i]- (uwidth/2) -sdist;
    else left = -1 * ((uwidth/2) - bdp[i] - sdist);
  }

  // displaying boundary distances
  cout << "\nLeft Distance: " << left << "\tRight Distance: " << right << endl;

}

int main(int argc, char** argv)
{
  // time starts now
  clock_t tStart = clock();

  // Images
  Mat imgGRAY;
  Mat outputImg;

  // Other variables
  char *videoFileName = 0;
  char *imageFileName = 0;
  cv::VideoCapture video;
  bool useCamera = true;
  int mode = MODE_NIETO;
  int numVps = 1;
  bool playMode = true;
  bool stillImage = false;
  bool verbose = false;

  int procWidth = -1;
  int procHeight = -1;
  cv::Size procSize;

  // Start showing help
  // help();

  // Parse arguments
  if(argc < 2)
  return -1;
  for(int i=1; i<argc; i++)
  {
    const char* s = argv[i];

    if(strcmp(s, "-video" ) == 0)
    {
      // Input video is a video file
      videoFileName = argv[++i];
      useCamera = false;
    }
    else if(strcmp(s,"-image") == 0)
    {
      // Input is a image file
      imageFileName = argv[++i];
      stillImage = true;
      useCamera = false;
    }
    else if(strcmp(s, "-resizedWidth") == 0)
    {
      procWidth = atoi(argv[++i]);
    }
    else if(strcmp(s, "-verbose" ) == 0)
    {
      const char* ss = argv[++i];
      if(strcmp(ss, "ON") == 0 || strcmp(ss, "on") == 0
      || strcmp(ss, "TRUE") == 0 || strcmp(ss, "true") == 0
      || strcmp(ss, "YES") == 0 || strcmp(ss, "yes") == 0 )
      verbose = true;
    }
    else if(strcmp(s, "-play" ) == 0)
    {
      const char* ss = argv[++i];
      if(strcmp(ss, "OFF") == 0 || strcmp(ss, "off") == 0
      || strcmp(ss, "FALSE") == 0 || strcmp(ss, "false") == 0
      || strcmp(ss, "NO") == 0 || strcmp(ss, "no") == 0
      || strcmp(ss, "STEP") == 0 || strcmp(ss, "step") == 0)
      playMode = false;
    }
    else if(strcmp(s, "-mode" ) == 0)
    {
      const char* ss = argv[++i];
      if(strcmp(ss, "LS") == 0)
      mode = MODE_LS;
      else if(strcmp(ss, "NIETO") == 0)
      mode = MODE_NIETO;
      else
      {
        perror("ERROR: Only LS or NIETO modes are supported\n");
      }
    }
    else if(strcmp(s,"-numVps") == 0)
    {
      numVps = atoi(argv[++i]);
    }
  }

  // Open video input
  if( useCamera )
  video.open(0);
  else
  {
    if(!stillImage)
    // video.open(videoFileName);
    video = VideoCapture(videoFileName);
  }

  // Check video input
  int width = 0, height = 0, fps = 0, fourcc = 0;

  if(!stillImage)
  {
    if( !video.isOpened() )
    {
      printf("ERROR: can not open camera or video file\n");
      return -1;
    }
    else
    {
      // Show video information
      width = (int) video.get(CV_CAP_PROP_FRAME_WIDTH);
      height = (int) video.get(CV_CAP_PROP_FRAME_HEIGHT);
      fps = (int) video.get(CV_CAP_PROP_FPS);
      fourcc = (int) video.get(CV_CAP_PROP_FOURCC);

      if(!useCamera)
      printf("Input video: (%d x %d) at %d fps, fourcc = %d\n", width, height, fps, fourcc);
      else
      printf("Input camera: (%d x %d) at %d fps\n", width, height, fps);
    }
  }
  else
  {
    inputImg = cv::imread(imageFileName);
    if(inputImg.empty())
    return -1;

    resize(inputImg,inputImg,size);
    // size of image
    width = inputImg.cols;
    height = inputImg.rows;

    printf("Input image: (%d x %d)\n", width, height);

    playMode = false;
  }

  // Resize
  if(procWidth != -1)
  {

    procHeight = height*((double)procWidth/width);
    procSize = cv::Size(procWidth, procHeight);

    printf("Resize to: (%d x %d)\n", procWidth, procHeight);
  }
  else
  procSize = cv::Size(width, height);

  // Create and init MSAC
  MSAC msac;
  msac.init(mode, procSize, verbose);


  int frameNum=0;
  for( ;; )
  {
    if(!stillImage)
    {
      printf("\n-------------------------\nFRAME #%6d\n", frameNum);
      frameNum++;

      // Get current image
      video >> inputImg;
    }

    if( inputImg.empty() )
    break;

    resize(inputImg,inputImg,size);

    // Resize to processing size
    //      cv::resize(inputImg, inputImg, procSize);

    // Color Conversion
    if(inputImg.channels() == 3)
    {
      cv::cvtColor(inputImg, imgGRAY, CV_BGR2GRAY);
      inputImg.copyTo(outputImg);
    }
    else
    {
      inputImg.copyTo(imgGRAY);
      cv::cvtColor(inputImg, outputImg, CV_GRAY2BGR);
    }
    // ++++++++++++++++++++++++++++++++++++++++
    // Process
    // ++++++++++++++++++++++++++++++++++++++++
    processImage(msac, numVps, imgGRAY, outputImg);

    printf("Time taken: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);

    if(playMode)
    cv::waitKey(1);
    else
    cv::waitKey(0);

    char q = (char)waitKey(1);

    if( q == 27 )
    {
      printf("\nStopped by user request\n");
      break;
    }

    if(stillImage)
    break;
  }

  if(!stillImage)
  video.release();

  return 0;

}

0 个答案:

没有答案