我想使用GStreamer捕获IP摄像机的视频内容并将其压缩到H.264流服务器,然后使用OpenCV + GStreamer在nvidia TX1上接收H.264视频流。这是我的gstreamer管道
gst-launch-1.0 -ve rtspsrc location=rtsp://admin:12345@192.168.1.64/Streaming/Channels/1 ! nvvidconv flip-method=6 ! 'video/x-raw(memory:NVMM), width=(int)960, height=(int)540, format=(string)I420, framerate=(fraction)30/1' ! omxh264enc control-rate=2 bitrate=4000000 ! 'video/x-h264, stream-format=(string)byte-stream' ! h264parse ! queue ! omxh264dec ! nvvidconv ! 'video/x-raw, format=(string)UYVY' ! videoconvert ! jpegenc quality=30 ! rtpjpegpay ! udpsink host=$CLIENT_IP port=5000 sync=false async=false
以上代码将摄像机的内容捕获并压缩为30帧960p的H.264视频流,并通过UDP协议[5000]将其发送到板卡的网络端口。运行成功,此处是我的 客户代码:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace cv;
int main(int, char**)
{
VideoCapture input("./stream.sdp");
if(!input.isOpened()){ // check if we succeeded
std::cout<< "open failed" << std::endl;
return -1;
}
Mat img, img_gray;
OrbFeatureDetector detector(7000);
vector<KeyPoint> img_keypoints, car_keypoints;
Mat img_descriptors, car_descriptors;
input.read(img);
Mat car;
img(Rect(400, 320, 150, 100)).copyTo(car);
detector(car, Mat(), car_keypoints, car_descriptors);
drawKeypoints(car, car_keypoints, car);
for(;;)
{
if(!input.read(img))
break;
detector(img, Mat(), img_keypoints, img_descriptors);
drawKeypoints(img, img_keypoints, img);
BFMatcher matcher;
vector<DMatch> matches;
matcher.match(car_descriptors, img_descriptors, matches);
vector<Point2f> car_points, img_points;
for(int i=0; i < matches.size(); ++i){
car_points.push_back(car_keypoints[matches[i].queryIdx].pt);
img_points.push_back(img_keypoints[matches[i].queryIdx].pt);
}
std::cout<<"car points count = " << car_points.size() << std::endl;
if(car_points.size() >= 4){
Matx33f H = findHomography(car_points, img_points, CV_RANSAC);
vector<Point> car_border, img_border;
car_border.push_back(Point(0, 0));
car_border.push_back(Point(0, car.rows));
car_border.push_back(Point(car.cols, car.rows));
car_border.push_back(Point(car.cols, 0));
for (size_t i = 0; i < car_border.size(); ++i){
Vec3f p = H * Vec3f(car_border[i].x, car_border[i].y, 1);
img_border.push_back(Point(p[0]/p[2], p[1]/p[2]));
}
polylines(img, img_border, true, CV_RGB(255, 255, 0));
Mat img_matches;
drawMatches(car, car_keypoints, img, img_keypoints, matches, img_matches);
imshow("img_matches", img_matches);
}
// imshow("car", car);
// imshow("img", img);
if(waitKey(27) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
配置文件CMakeLists.txt如下:
project(hello)
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(cv_hello hello.cpp)
target_link_libraries(cv_hello ${OpenCV_LIBS}
客户端代码可以成功编译,但是当它运行时,VideoCapture input("./stream.sdp")
将无法打开sdp文件并返回&#34;打开失败&#34;。这是我的stream.sdp文件:
c=IN IP4 127.0.0.1
m=video 5000 RTP/AVP 96
a=rtpmap:96 JPEG/4000000
我尝试使用绝对路径并尝试设置环境变量
export PKG_CONFIG_PATH=/home/ubuntu/ffmpeg_build/lib/pkgconfig : $PKG_CONFIG_PATH
添加ffmeg解码器,但它们都无法解决问题。
我在TX1上使用opencv 2.4.13和gstreamer-1.0。