使用Opencv和JavaCamera进行对象跟踪

时间:2016-11-07 19:43:51

标签: android opencv flip

当我使用后置摄像头来限制帧时,默认情况下Android应用程序是横向的,以便获取我使用的输入帧

Core.flip(currentFrame, currentFrame, 1);//flip around Y-axi

使用opencv进行一些图像增强和findcontour后,

我有以下问题:

一个。物体向左侧移动,drawcirle向下移动  湾物体向右移动,drawcircle向上移动。
 C。物体向上移动,drawcircile移动左侧  d。物体向下移动,drawcircle向右移动。

换句话说,drawcircle(输出)应该是顺时针90,以获得源1的图像。

代码如下所示:

package com.mtyiuaa.writingintheair;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.SurfaceView;
import android.view.View;
import android.content.Intent;
import android.view.ViewDebug;
import android.widget.Button;
import java.util.ArrayList;
import java.util.List;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.JavaCameraView;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.Point;
import org.opencv.core.MatOfPoint;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.core.Rect;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgproc.Moments;
import org.opencv.highgui.VideoCapture;

public class MainActivity extends AppCompatActivity implements   CameraBridgeViewBase.CvCameraViewListener2{

private static final int THRESH_BINARY = 1;
private static final  int THRESH_TOZERO = 4;

private static String TAG = "MainActivity";

JavaCameraView javaCameraView;
JavaCameraView javaCameraView2;
VideoCapture videoCapture;
Mat mRgba;
Mat temp;
Mat previousFrame;
Mat GpreviousFrame; // gray-level frame of previous Frame
Mat currentFrame;
Mat GcurrentFrame; // gray-level frame of current Frame
Mat diffFrame;
Mat imgGray;
Mat imgHSV;
Mat imgCanny;
Mat inputFrame;
Mat FlipFrame;
Mat outputFrame;
Mat imgthresholding;
Mat imgNormalization;
Mat imgGaussianSmothing;
int max_Binary_value = 255;
int thresh = 20;
Boolean CameraActive;
Boolean firstIteration= true;
int[] theObject = {0,0};

int x=0, y=0;
int FRAME_WIDTH = 1280;
int FRAME_HEIGHT = 720;
//max number of objects to be detected in frame
int MAX_NUM_OBJECTS=50;
//Minimum and Maximum object area
int MIN_OBJECT_AREA = 20*20;
int MAX_OBJECT_AREA = (int) ((FRAME_HEIGHT*FRAME_WIDTH)/1.5);


//MatOfPoint allcontours = new MatOfPoint();
//bounding rectangle of the object, we will use the center of this as its position.


BaseLoaderCallback mLoaderCallBack = new BaseLoaderCallback(this) {
    @Override
    public void onManagerConnected(int status) {
        switch(status){
            case BaseLoaderCallback.SUCCESS:{
                javaCameraView.enableView();
                //javaCameraView2.enableView();
                break;
            }
            default:{
                super.onManagerConnected(status);
                break;
            }
        }

    }
};

static{

}
//JavaCameraView javaCameraView;


@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    javaCameraView = (JavaCameraView)findViewById(R.id.java_camera_view);
    javaCameraView.setVisibility(SurfaceView.VISIBLE);
    javaCameraView.setCvCameraViewListener(this);


@Override
protected void onPause(){
    super.onPause();
    if(javaCameraView!=null) {
        CameraActive = false;
        javaCameraView.disableView();
    }
}

@Override
protected void onDestroy(){
    super.onDestroy(); // call the basic function
    if(javaCameraView!=null){
        javaCameraView.disableView();
    }

}

@Override
protected void onResume(){
    super.onResume(); //call based class
    if(OpenCVLoader.initDebug()){
        Log.i(TAG, "OpenCV loaded successfully");
        mLoaderCallBack.onManagerConnected(LoaderCallbackInterface.SUCCESS);
        //grab a new instance by using Basecallbackloader


    }
    else {
        Log.i(TAG, "OpenCV not loaded");
        //recall opencvLoader if not loaded
        OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_10, this, mLoaderCallBack);
    }


}

@Override
public void onCameraViewStarted(int width, int height) {
    //Mat::Mat(int rows, int cols, int type)
    // initialize all Mat object when onCamera starts
    CameraActive = true;
    // 4 channels are used
    mRgba = new Mat(height, width, CvType.CV_8SC4);
    FlipFrame = new Mat(height, width, CvType.CV_8SC4);
    previousFrame =new Mat(height, width, CvType.CV_8SC4);
    currentFrame = new Mat(height, width, CvType.CV_8SC4);
    diffFrame =new Mat(height, width, CvType.CV_8SC4);

    // 1 channel is used.
    GcurrentFrame = new Mat(height, width, CvType.CV_8SC1);
    GpreviousFrame = new Mat(height, width, CvType.CV_8SC1);
    imgGray= new Mat(height, width, CvType.CV_8SC1);
    imgHSV = new Mat (height, width, CvType.CV_8SC1);
    imgCanny = new Mat(height, width, CvType.CV_8SC1);
    imgGaussianSmothing = new Mat(height, width, CvType.CV_8SC1);
    imgthresholding = new Mat(height, width, CvType.CV_8SC1);
    imgNormalization = new Mat(height,width, CvType.CV_8SC1);
    inputFrame = new Mat(height, width, CvType.CV_8SC1);
    outputFrame = new Mat(height, width, CvType.CV_8SC1);
    temp = new Mat(height, width, CvType.CV_8SC1);
}

@Override
public void onCameraViewStopped() {
    mRgba.release();
    FlipFrame.release();
    previousFrame.release();
    currentFrame.release();
    diffFrame.release();

    GcurrentFrame.release();
    GpreviousFrame.release();
    imgGray.release();
    imgHSV.release();
    imgCanny.release();
    imgGaussianSmothing.release();
    imgthresholding.release();
    imgNormalization.release();
    inputFrame.release();
    outputFrame.release();
    temp.release();
    CameraActive = false;


}

@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {

    while(CameraActive) {
        Mat temp2 = new Mat();
        Mat temp3 = new Mat();


        currentFrame = inputFrame.rgba();
        Core.flip(currentFrame, currentFrame, 1);//flip aroud Y-axis


        RGB2HSV(currentFrame).copyTo(temp2);
        FilterHSVImage(temp2).copyTo(temp2);
        //CannyDetector(temp2).copyTo(temp4);
        MorphOperation(temp2).copyTo(temp2);
        List<MatOfPoint> contours = new ArrayList<>();
        Mat hierarchy = new Mat();
        Imgproc.findContours(temp2,contours,hierarchy,Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

        temp2.copyTo(temp3);
        FindLargestContours(temp3, contours);



        //return outputFrame;
    }

    return null;
}



// Edge Detector using Canny
// Goal: Edge image is less sensitive to lighting conditon
public Mat CannyDetector(Mat inputFrame) {

    Imgproc.Canny(inputFrame, imgCanny, 50, 150);
    return imgCanny;
}




private Mat RGB2Gray (Mat inputFrame){

    Imgproc.cvtColor(inputFrame, imgGray, Imgproc.COLOR_RGB2GRAY);
    return imgGray;
}

private Mat RGB2HSV (Mat inputFrame){
    Imgproc.cvtColor(inputFrame, imgHSV, Imgproc.COLOR_RGB2HSV);
    return imgHSV;
}

private Mat FilterHSVImage(Mat inputFrame){
    Core.inRange(inputFrame, new Scalar(0, 100, 100), new Scalar(10, 255, 255), imgthresholding);
    //Core.inRange(temp2, new Scalar(160, 100, 100), new Scalar(179, 255, 255), temp2);

    return imgthresholding;
}

private Mat MorphOperation (Mat inputFrame){

    //Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new  Size(2*dilation_size + 1, 2*dilation_size+1));
    //Imgproc.dilate(source, destination, element1);
    //Highgui.imwrite("dilation.jpg", destination);
    Mat erodeElement =Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3,3));
    Mat dilateElement = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size (8,8));

    Imgproc.dilate(inputFrame, inputFrame, erodeElement);
    Imgproc.dilate(inputFrame, inputFrame, erodeElement);

    Imgproc.erode(inputFrame, inputFrame, dilateElement);
    Imgproc.erode(inputFrame, inputFrame, dilateElement);
    return inputFrame;
}



private Mat Threshold(Mat inputFrame){

    Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, Imgproc.THRESH_TOZERO);
    return imgthresholding;
}

private Mat ThresholdToBinary(Mat inputFrame){
    Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, Imgproc.THRESH_BINARY);
    //Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, THRESH_BINARY);
    return imgthresholding;
}
private Mat Normalization(Mat inputFrame, double min, double max){
    //double E_Max =

    Core.normalize(inputFrame, imgNormalization, min, max, Core.NORM_MINMAX);
    return imgNormalization;
}


private Mat drawObject(int x, int y, Mat inputFrame) {
    Point point = new Point(x, y);
    Point pointA = new Point(x, y - 25);
    Point pointB = new Point(x, y + 25);
    Point pointC = new Point(x - 25, y);
    Point pointD = new Point(x + 25, y);
    Scalar scalar = new Scalar(255, 0, 0);


        Core.circle(inputFrame,point,20,scalar,2);

        if(y-25>0) Core.line(inputFrame,point,pointA,scalar,2);
        else Core.line(inputFrame,point,new Point(x,0),scalar,2);
        if(y+25<FRAME_HEIGHT) Core.line(inputFrame,point,pointB,scalar,2);
        else Core.line(inputFrame,point,new Point(x,FRAME_HEIGHT),scalar,2);
        if(x-25>0)Core.line(inputFrame,point,pointC,scalar,2);
        else Core.line(inputFrame,point,new Point(0,y),scalar,2);
        if(x+25<FRAME_WIDTH) Core.line(inputFrame,point,pointD,scalar,2);
        else Core.line(inputFrame,point,new Point(FRAME_WIDTH,y),scalar,2);
        Core.putText(inputFrame, "Tracking object at (" + Integer.toString(x)+" , "+ Integer.toString(y)+ ")",point, 1, 1,scalar, 2);
       // putText(inputFrame,intToString(x)+","+intToString(y),Point(x,y+30),1,1,Scalar(0,255,0),2);
        Log.i(TAG, "Draw x at "+Integer.toString(x)+ "  Draw y at "+ Integer.toString(y));

    inputFrame.copyTo(outputFrame);
    return outputFrame;
}


private void TrackFilteredObject (int x, int y, Mat filteredImage, Mat sourceImage){
    boolean objectFound = false;
    Mat temp3 = new Mat();
    filteredImage.copyTo(temp3);
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat hierarchy = new Mat();
    Imgproc.findContours(temp3,contours,hierarchy,Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);

    //Point[] contourPoints = (Point[]) contours.toArray();
    double refArea = 0;


    if (hierarchy.size().height>0 && hierarchy.size().width>0){
       // int numObjects = hierarchy.size();
        //if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter

        //if(numObjects<MAX_NUM_OBJECTS) {
            for (int index = 0; index >= 0; index =(int)hierarchy.get(index,0)[0]){
                    //hierarchy[index][0]) {

                Moments moment = Imgproc.moments(contours.get(index), true);
                double area = moment.get_m00();

                //if the area is less than 20 px by 20px then it is probably just noise
                //if the area is the same as the 3/2 of the image size, probably just a bad filter
                //we only want the object with the largest area so we safe a reference area each
                //iteration and compare it to the area in the next iteration.
                if (area > MIN_OBJECT_AREA && area < MAX_OBJECT_AREA && area > refArea) {
                   // x = moment.m10 / area;
                    x= (int) (moment.get_m10()/area);
                    y = (int) (moment.get_m01()/area);
                    objectFound = true;
                    refArea = area;
                } else objectFound = false;
            }
        //}
    }
}



    }

1 个答案:

答案 0 :(得分:0)

将x替换为y,这是非常简单的人