在Android中使用带有openCv的SURF或ORB算法进行清理

时间:2015-09-08 16:44:05

标签: android opencv java-native-interface image-recognition

我是OpenCv的新手,所以我将尝试解释我的问题。 (抱歉我的英语不好)

目标

使用OpenCv的Android应用程序,保存或下载了大量图像(无论来自哪里),让应用程序识别相机框架中的每个图像,并在找到的图像周围绘制一个矩形在那个框架中。

Something like this example但在我的情况下,如果有多个“图像匹配”,则会在同一帧或同等帧上绘制所有图像。

问题

我正在阅读很多帖子并且做了opencv示例我得到它在exe中工作但是当试图将它传递给Android应用程序时它无法正常工作。

Post 1Post 2Post 3 ..

我尝试使用SURF(它在Android中编译了非自由库),ORB和FLANN匹配器算法,但我只是为每个方向或只是点获得不同的不准确的行。

我的尝试:

基于Opencv示例2的Java活动

package org.opencv.jc.tct;

import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.CvType;
import org.opencv.core.Mat;

import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.WindowManager;
public class Tutorial2Activity extends Activity implements CvCameraViewListener2 {
private static final String    TAG = "TCT";


private static final int       CORRECT            = 0;
private static final int       FAILED             = 1;

private Mat                    mRgba;
private Mat                    mIntermediateMat;
private Mat                    mGray;
private int                    res = -1;
private CameraBridgeViewBase   mOpenCvCameraView;

private BaseLoaderCallback  mLoaderCallback = new BaseLoaderCallback(this) {
    @Override
    public void onManagerConnected(int status) {
        switch (status) {
            case LoaderCallbackInterface.SUCCESS:
            {
                Log.i(TAG, "OpenCV loaded successfully");

                // Load native library after(!) OpenCV initialization
                System.loadLibrary("mixed_sample");
                //Image_Interface.imageBase(); //TODO Carga los cuadros descargados de internet.
                mOpenCvCameraView.enableView();
            } break;
            default:
            {
                super.onManagerConnected(status);
            } break;
        }
    }
};

public Tutorial2Activity() {
    Log.i(TAG, "Instantiated new " + this.getClass());
}

/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
    Log.i(TAG, "called onCreate");
    super.onCreate(savedInstanceState);


    getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);

    setContentView(R.layout.tutorial2_surface_view);

    mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial2_activity_surface_view);
    mOpenCvCameraView.setCvCameraViewListener(this);
    Thread th = new Thread(new Runnable() {

        @Override
        public void run() {
            // TODO Auto-generated method stub

            res = ImageInterface.imageBase();
            if(res == CORRECT)
                ImageInterface.NativeDataTest();
        }
    });

    th.start();

}

@Override
public void onPause()
{
    super.onPause();
    if (mOpenCvCameraView != null)
        mOpenCvCameraView.disableView();
}

@Override
public void onResume()
{
    super.onResume();
    OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_11, this, mLoaderCallback);
}

public void onDestroy() {
    super.onDestroy();
    if (mOpenCvCameraView != null)
        mOpenCvCameraView.disableView();
}

public void onCameraViewStarted(int width, int height) {
    mRgba = new Mat(height, width, CvType.CV_8UC4);

    mGray = new Mat(height, width, CvType.CV_8UC1);

}

public void onCameraViewStopped() {
    mRgba.release();
    mGray.release();

}

public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
    mRgba = inputFrame.rgba();
    mGray = inputFrame.gray();
    if(res == CORRECT){
        ImageInterface.imageFrame(mGray.getNativeObjAddr(), mRgba.getNativeObjAddr());
    }else if(res == FAILED){
        Log.i(TAG, "Imagenes base no está lista");
    }
    return mGray;
}


}

中级

package org.opencv.jc.tct;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;

import org.opencv.android.Utils;
import org.opencv.core.CvType;
import org.opencv.core.Mat;

import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Environment;
import android.util.Log;

public class ImageInterface {
private static final String    TAG = "TCT";
private static long[]          imaArray;

public static int imageBase(){
    int nativeRes = -1;
    getBitmapFromURL();
    if(imaArray.length > 0 && imaArray != null){     
     nativeRes = ProcessImagesBase(imaArray);
    }

    return nativeRes;
}

public static void imageFrame(long matAddrGr, long matAddrRgba){
    ProcessImageFrame( matAddrGr, matAddrRgba);
}

public static void NativeDataTest(){
    TestData();
}


// download data from internet //
private static void getBitmapFromURL() {

    String[] imageUrl = {"http://s7.postimg.org/3yz6bb87f/libro2.jpg"};
    imaArray = new long[imageUrl.length];

    for(int i=0; i<imageUrl.length; i++){
        try {
            URL url = new URL(imageUrl[i]);
            HttpURLConnection connection = (HttpURLConnection)     url.openConnection();
            connection.setDoInput(true);
            connection.connect();
            InputStream input = connection.getInputStream();

            Bitmap myBitmap = BitmapFactory.decodeStream(input);
            if(myBitmap != null){
                Mat mat = new Mat();
                Utils.bitmapToMat(myBitmap, mat);
                if(!mat.empty()){
                    mat.convertTo(mat, CvType.CV_8UC1);
                    Log.i("ImageInterface", "Image downloaded type: "+mat.type());
                }else
                    Log.i("ImageInterface", "Mat: "+ i + " Vacias");
                imaArray[i] = mat.getNativeObjAddr();
                Log.i("ImageInterface", "BITMAP: "+ i + "LLENO");
            }else
                Log.i("ImageInterface", "BITMAP VACIO");

        } catch (IOException e) {
            e.printStackTrace();        
        }
    } 
    Log.i("ImageInterface", "Array de direcciones: "+imaArray.length);
}

//Get App Data Folder in Android
public File getDataFolder(Context context) {
    File dataDir = null;
        if (Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED)) {
            dataDir = new File(Environment.getExternalStorageDirectory(), "files");
            if(!dataDir.isDirectory()) {
                dataDir.mkdirs();
            }
        }

        if(!dataDir.isDirectory()) {
            dataDir = context.getFilesDir();
        }

    return dataDir;
}


  public void writeDataFolder(Context context){

    String[] pathList = {"http://localhost/libro1.jpg",
                         "http://localhost/libro2.jpg"};

    URL wallpaperURL = null;
    for(int i=0; i < pathList.length;i++){
        try {
            wallpaperURL = new URL(pathList[i]);
        } catch (MalformedURLException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        InputStream inputStream = null;
        try {
            URLConnection connection = wallpaperURL.openConnection();
            inputStream = new BufferedInputStream(wallpaperURL.openStream(), 10240);
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        File cacheDir = context.getCacheDir();
        File cacheFile = new File(cacheDir, "localFileName.jpg");
        FileOutputStream outputStream = null;

        try {
            outputStream = new FileOutputStream(cacheFile);
        } catch (FileNotFoundException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        byte buffer[] = new byte[1024];
        int dataSize;
               try {
                while ((dataSize = inputStream.read(buffer)) != -1) {              
                       outputStream.write(buffer, 0, dataSize);
                   }

                outputStream.close();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
    }
}
private native static int ProcessImagesBase(long[] arrayImage); 
private native static void ProcessImageFrame(long matAddrGr, long matAddrRgba);
private native static void TestData();
}

C ++ JNI

JNIEXPORT jint JNICALL Java_org_opencv_jc_tct_ImageInterface_ProcessImagesBase(JNIEnv* env, jobject,jlongArray traindataaddr);
JNIEXPORT void JNICALL Java_org_opencv_jc_tct_ImageInterface_ProcessImageFrame(JNIEnv*,
            jobject, jlong addrGray, jlong addrRgba);

JNIEXPORT void JNICALL Java_org_opencv_jc_tct_ImageInterface_TestData(JNIEnv*,jobject);

void trainDetector();

void trainExtractor();

void trainMatches(Mat& descriptors_scene,vector<vector<vector<DMatch> > >& matches);

void getGoodMatches(vector<vector<vector<DMatch> > >& matches, vector<vector<DMatch> >& tr_good_matches);

void perspectiveScene(vector<vector<DMatch> >& tr_good_matches, vector<KeyPoint>& keypoints_scene, Mat& img_scene);

const bool testing = false;
const int CORRECT  = 0;
const int FAIL     = 1;

static vector<vector<KeyPoint> > train_keypoints;
static vector<Mat> train_descriptors;
static vector<Mat> trainImages;

OrbFeatureDetector detector(400);
OrbDescriptorExtractor extractor;
FlannBasedMatcher matcher;

JNIEXPORT jint JNICALL Java_org_opencv_jc_tct_ImageInterface_ProcessImagesBase(JNIEnv* env,jobject, jlongArray traindataaddr) {

    jint result = -1;

    jsize a_len = env->GetArrayLength(traindataaddr);
    jlong *traindata = env->GetLongArrayElements(traindataaddr,0);

    #pragma omp parallel for
    for(int k=0;k<a_len;k++)
    {
        Mat & newimage=*(Mat*)traindata[k];
        trainImages.push_back(newimage);
    }
    // do the required manipulation on the images;
    env->ReleaseLongArrayElements(traindataaddr,traindata,0);


    trainDetector();
    trainExtractor();

    if (!train_keypoints.empty()){
        LOGI("Created Keypoints!!!");
        result = CORRECT;
    }
    else{
        LOGE("Error creating the keypoints");
        result = FAIL;
    }
    return result;
}

JNIEXPORT void JNICALL Java_org_opencv_jc_tct_ImageInterface_ProcessImageFrame(JNIEnv*,
        jobject, jlong addrGray, jlong addrRgba) {

    Mat& img_scene = *(Mat*) addrGray;
    Mat& mRgb = *(Mat*) addrRgba;

    vector<KeyPoint> keypoints_scene;
    detector.detect(img_scene, keypoints_scene);

    Mat descriptors_scene;
    extractor.compute(img_scene, keypoints_scene, descriptors_scene);

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    if(!descriptors_scene.empty()){
        vector<vector<vector<DMatch> > > matches;
        trainMatches(descriptors_scene, matches);

        if(!matches.empty()){
            LOGI("Matches [0]: %d",matches[0].size());
            vector<vector<DMatch> > tr_good_matches;
            getGoodMatches( matches, tr_good_matches);

            if(!tr_good_matches.empty()){
                LOGI("GOOD MATCHES FRAME size %d",tr_good_matches[0].size());
                perspectiveScene(tr_good_matches, keypoints_scene, img_scene);
            }else{
                LOGE("MATCHES FRAME emtpy!");
            }
        }else
            LOGE("MATCHES FRAME empty!");
    }else{
        LOGE("MAT Descriptor FRAME empty");
    }


    // Mat img_matches;
    // drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
    //              good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
    //              vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );


 }

JNIEXPORT void JNICALL Java_org_opencv_jc_tct_ImageInterface_TestData(JNIEnv*,jobject){

    if(!trainImages.empty())
        LOGI("TEST Trainimages %d",trainImages.size());
    else
        LOGI("TEST TrainImages empty");

}

void trainDetector() {
    LOGI("Train Detector");
    #pragma omp parallel for
    for (int i = 0; i < trainImages.size(); i++) {

        vector<KeyPoint> obj_kp;
        detector.detect(trainImages[i], obj_kp);

        if (!obj_kp.empty()) {
            train_keypoints.push_back(obj_kp);

        } else if (testing) {
            LOGE("Error: There are not keypoints. Func: trainDetector");
        }
    }

    LOGI("Trainimages size %d ",trainImages.size());
}

void trainExtractor() {

    #pragma omp parallel for
    for (int i = 0; i < trainImages.size(); i++) {

        Mat* obj_desc = new Mat();
        extractor.compute(trainImages[i], train_keypoints[i], *obj_desc);

        if (!(*obj_desc).empty()) {

            train_descriptors.push_back(*obj_desc);

        } else if (testing) {
            LOGI("Error: Problem with descriptors. Func: trainExtractor");
        }
    }

    LOGE("Train descriptors: %d",train_descriptors.size());
}

void getGoodMatches(vector<vector<vector<DMatch> > >& matches,vector<vector<DMatch> >& tr_good_matches) {
    //-- Quick calculation of max and min distances between keypoints
    for (int i = 0; i < train_descriptors.size(); i++) {
        double max_dist = 0;
        double min_dist = 100;
        Mat obj_desc = train_descriptors[i];
        vector<DMatch> gm;

        for (int j = 0; j < min(obj_desc.rows - 1, (int) matches[i].size());j++) {
            if (matches[i][j][0].distance <  matches[i][j][1].distance
                    && ((int) matches[i][j].size() <= 2
                            && (int) matches[i][j].size() > 0)) {
                gm.push_back(matches[i][j][0]);
            }
        }

        tr_good_matches.push_back(gm);
    }
}

void perspectiveScene(vector<vector<DMatch> >& tr_good_matches, vector<KeyPoint>& keypoints_scene, Mat& img_scene) {
    LOGI("PERS FUNCTION");
    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;
    int R, G, B;
    for (int i = 0; i < tr_good_matches.size(); i++) {
        LOGI("PF: For train size[%d]: %d",i,tr_good_matches[i].size());
        obj.clear();
        scene.clear();
        if (tr_good_matches[i].size() >= 4) {
            for (int j = 0; j < tr_good_matches[i].size(); j++) {
                //-- Get the keypoints from the good matches
                obj.push_back(train_keypoints[i][tr_good_matches[i][j].queryIdx].pt);
                scene.push_back(keypoints_scene[tr_good_matches[i][j].trainIdx].pt);
            }
            LOGI("Obj size: %d, scene size: %d",obj.size(),scene.size());
            if (!obj.empty() && !scene.empty()) {
                LOGI("OBJ size: %d, scene size: %d",obj.size(),scene.size());

                Mat H = findHomography(obj, scene, CV_RANSAC);

                //-- Get the corners from the image_1 ( the object to be "detected" )
                vector<Point2f> obj_corners(4);
                obj_corners[0] = cvPoint(0, 0);
                obj_corners[1] = cvPoint(trainImages[i].cols, 0);
                obj_corners[2] = cvPoint(trainImages[i].cols,
                        trainImages[i].rows);
                obj_corners[3] = cvPoint(0, trainImages[i].rows);
                vector<Point2f> scene_corners(4);

                perspectiveTransform(obj_corners, scene_corners, H);
                //getPerspectiveTransform(obj_corners,scene_corners);
                R = rand() % 256;
                G = rand() % 256;
                B = rand() % 256;

                //-- Draw lines between the corners (the mapped object in the scene - image_2 )
                line(img_scene, scene_corners[0], scene_corners[1],
                        Scalar(R, G, B), 4);
                line(img_scene, scene_corners[1], scene_corners[2],
                        Scalar(R, G, B), 4);
                line(img_scene, scene_corners[2], scene_corners[3],
                        Scalar(R, G, B), 4);
                line(img_scene, scene_corners[3], scene_corners[0],
                        Scalar(R, G, B), 4);

            } else{
                LOGE("Error: Problem with goodmatches. Func: perspectiveScene.");
            }
        }
    }
}

void trainMatches( Mat& descriptors_scene, vector<vector<vector<DMatch> > >& matches) {

    for (int i = 0; i < train_descriptors.size(); i++) {
        vector<vector<DMatch> > obj_matches;
        Mat desc = train_descriptors[i];

        if(desc.type() != CV_32F){
            desc.convertTo(desc,CV_32F);
        }

        if(descriptors_scene.type() != CV_32F){
            descriptors_scene.convertTo(descriptors_scene,CV_32F);
        }

        matcher.knnMatch(desc, descriptors_scene, obj_matches, 2);

        if (!obj_matches.empty()) {
            matches.push_back(obj_matches);

        } else if (testing) {
            LOGE("Error: Problem with matches. Func: trainMatches");
        }
    }
}
}
你能说我错了吗?

感谢您的帮助!

1 个答案:

答案 0 :(得分:0)

我使用Surf Detector,Descriptor Extractor和基于flan的Matcher在Android上尝试了mutiobject检测。我使用Jni,但是工作正常,但帧速率小于0.5。我不知道如何提高帧率?

我在Sdcard的“detect”文件夹中放了一些图像。在我按下加载的菜单中,然后加载“detect”文件夹中的图像,并在单独的线程中找到trainDescriptors。

加载图像后,按菜单中的开始开始检测对象。

<强> processframe.cpp

#include <stdio.h>
#include<jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "opencv2/nonfree/nonfree.hpp"



using namespace std;
using namespace cv;

vector<Mat> trainImages;
Ptr<FeatureDetector> featureDetector=FeatureDetector::create("SIFT" );
 Ptr<DescriptorExtractor> descriptorExtractor=DescriptorExtractor::create("SIFT");
Ptr<DescriptorMatcher> descriptorMatcher=DescriptorMatcher::create("FlannBased");//new BFMatcher(NORM_HAMMING,true);//
vector<vector<KeyPoint> > trainKeypoints;
vector<Mat> trainDescriptors;
vector<Point2f> scene_corners(4);
vector<Point2f> obj_corner(4);
vector<KeyPoint> queryKeypoints;
Mat queryDescriptors ;
vector<vector<DMatch> > matches;
vector<DMatch > good_matches;
int imagefound=-1;
Mat H;
char buffer[50];

int reprojectionThreshold=3;

bool refineMatchesWithHomography(const vector<KeyPoint>& querykeypoints,const vector<vector<KeyPoint> >& trainkeypoints, const vector<Mat> &trainimages,
                                           float reprojectionthreshold,vector<DMatch>& goodmatches,int &imagefound, Mat& Homo)
{

   vector<Point2f> src;
    vector<Point2f> dst;

   //cout<<good_matches.size()<<"\n";
   if (goodmatches.size() < 8)
   {
     imagefound=-1;
      return false;
   }


    for( int i=0;i<goodmatches.size();i++)
    {
        DMatch imatch = goodmatches[i];
        src.push_back(trainkeypoints[imatch.imgIdx][imatch.trainIdx].pt);
        dst.push_back(querykeypoints[imatch.queryIdx].pt);
        for(int j=0;j<trainimages.size();j++)
        {
            if(goodmatches[j].imgIdx==j)
            {
                imagefound=j;

            }
         }
     }
       vector<unsigned char> maskmatch(src.size());
       Homo = findHomography( src,dst , CV_RANSAC,reprojectionthreshold,maskmatch);

       vector<DMatch> inliers;
       for (size_t i=0; i<maskmatch.size(); i++)
       {
       if (maskmatch[i])
       inliers.push_back(goodmatches[i]);
      }
      goodmatches.swap(inliers);

 return goodmatches.size() >8;
 }

 extern "C" {

 JNIEXPORT jboolean JNICALL Java_com_example_cameraobjectdetection_loadImages_NativesendPaths(JNIEnv* env, jobject obj, jobjectArray stringArray);
 JNIEXPORT jint JNICALL Java_com_example_cameraobjectdetection_NativeFrameProcessor_process(JNIEnv* env, jobject obj, jlong mrgba, jlong mprocessed);

JNIEXPORT jboolean JNICALL Java_com_example_cameraobjectdetection_loadImages_NativesendPaths(JNIEnv* env, jobject obj, jobjectArray stringArray) 
{
int stringCount = env->GetArrayLength(stringArray);
for (int i = 0; i<stringCount; i++) {
        jstring string = (jstring) env->GetObjectArrayElement(stringArray, i);
        const char *rawstring = env->GetStringUTFChars(string,0);
        std::string s(rawstring);
        Mat img=imread(s);
         cvtColor(img,img, CV_BGR2GRAY);
        trainImages.push_back(img);
        env->ReleaseStringUTFChars(string,rawstring);
    }
if(trainImages.empty()==0)
{

 featureDetector->detect(trainImages,trainKeypoints);
 descriptorExtractor->compute( trainImages, trainKeypoints, trainDescriptors );
 descriptorMatcher->add( trainDescriptors );
 descriptorMatcher->train();
}
else
{
    return false;
}

return true;
}

JNIEXPORT jint JNICALL Java_com_example_cameraobjectdetection_NativeFrameProcessor_process(JNIEnv* env, jobject obj, jlong mrgba, jlong mprocessed)
{
Mat& mRgb = *(Mat*)mrgba;
 Mat& mProcess = *(Mat*)mprocessed;


 cvtColor(mRgb,mRgb, CV_BGR2GRAY);

 mProcess =mRgb;
vector<KeyPoint> queryKeypoints;
    featureDetector->detect(mRgb,queryKeypoints);

    Mat queryDescriptors ;
    descriptorExtractor->compute(mRgb,queryKeypoints,queryDescriptors);

   vector<vector<DMatch> > matches;
   descriptorMatcher->knnMatch(queryDescriptors,matches,2);
     vector<DMatch > good_matches;


     for(int i = 0; i < min(queryDescriptors.rows-1,(int) matches.size  ()); i++)
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

     bool foundhomography= refineMatchesWithHomography(queryKeypoints,trainKeypoints,trainImages,reprojectionThreshold,good_matches,imagefound,H);

           if(foundhomography)
           {

            if (good_matches.size()>8&&imagefound!=-1)
            {
                obj_corner[0] = cvPoint(0,0);
                obj_corner[1] = cvPoint(  trainImages[imagefound].cols, 0 );
                obj_corner[2] = cvPoint( trainImages[imagefound].cols, trainImages[imagefound].rows );
                obj_corner[3] = cvPoint( 0, trainImages[imagefound].rows );

                 perspectiveTransform(obj_corner , scene_corners, H);


                 sprintf(buffer,"image is: %d",imagefound);
                 putText(mProcess,buffer, Point(10,20), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(0, 255, 255));
                 //obj_corner.clear();



                 line( mProcess, scene_corners[0], scene_corners[1] , Scalar(0, 255, 0), 4 );
                 line( mProcess, scene_corners[1] , scene_corners[2] , Scalar( 0, 255, 0), 4 );
                 line(mProcess, scene_corners[2] , scene_corners[3] , Scalar( 0, 255, 0), 4 );
                 line(mProcess, scene_corners[3] , scene_corners[0] , Scalar( 0, 255, 0), 4 );
            }
          }

   }
 }

<强> MainActivity.java

package com.example.cameraobjectdetection;

import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.core.Mat;
import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.SurfaceView;
import android.widget.SeekBar;
import android.widget.Toast;


public class MainActivity extends Activity implements CvCameraViewListener2 {

Zoomcameraview zoomcameraview;
loadImages loadimages;
boolean start;
NativeFrameProcessor nativeframeprocessor;
Mat mRgba;
Mat mProcessed;
boolean started=false;

static 
{
 try
    { 
    // Load necessary libraries.
    System.loadLibrary("opencv_java");
    System.loadLibrary("nonfree");

    }
 catch( UnsatisfiedLinkError e )
    {
    System.err.println("Native code library failed to load.\n" + e);        
    }
}

private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
    @Override
    public void onManagerConnected(int status) {
        switch (status) {
            case LoaderCallbackInterface.SUCCESS:
                System.loadLibrary("process");
                zoomcameraview.enableFpsMeter();
                zoomcameraview.enableView();
                nativeframeprocessor=new NativeFrameProcessor();
                break;
            default:
                super.onManagerConnected(status);
                break;
        }
    }
};


@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);

    setContentView(R.layout.activity_main);
    zoomcameraview = (Zoomcameraview)findViewById(R.id.ZoomCameraView);
    zoomcameraview.setVisibility(SurfaceView.VISIBLE);
    zoomcameraview.setZoomControl((SeekBar) findViewById(R.id.CameraZoomControls));
    zoomcameraview.setCvCameraViewListener(this);
    loadimages=new loadImages(this);



}


@Override
public boolean onCreateOptionsMenu(Menu menu) {
    // Inflate the menu; this adds items to the action bar if it is present.
    getMenuInflater().inflate(R.menu.main, menu);
    return true;
}

@Override
public boolean onOptionsItemSelected(MenuItem item) {
    // Handle action bar item clicks here. The action bar will
    // automatically handle clicks on the Home/Up button, so long
    // as you specify a parent activity in AndroidManifest.xml.
    int id = item.getItemId();

    switch (id) {
    case R.id.load:
        loadimages.execute();

        break;
    case R.id.start:
        start=true;
        break;


    default:
        break;
    }
    return super.onOptionsItemSelected(item);
}



@Override
public void onPause()
{
    super.onPause();
    if (zoomcameraview!= null)
        zoomcameraview.disableView();
}

public void onDestroy() {
    super.onDestroy();
    if (zoomcameraview != null)
        zoomcameraview.disableView();
}

@Override
public void onResume()
{
    super.onResume();
    OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this,mLoaderCallback );
}

@Override
public void onCameraViewStarted(int width, int height) {
    // TODO Auto-generated method stub

    mRgba = new Mat();
    mProcessed= new Mat();


}
@Override
public void onCameraViewStopped() {
    // TODO Auto-generated method stub

}
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
    // TODO Auto-generated method stub
    mRgba=inputFrame.rgba();
    if(!start)
    {
      nativeframeprocessor.noprocessFrame(mRgba, mProcessed);

    }
    else if(start)
    {
    nativeframeprocessor.processFrame(mRgba, mProcessed);
    }
  return mProcessed;

 }
}

<强> loadImages.java

package com.example.cameraobjectdetection;
import java.io.File;
import android.content.Context;
import android.os.AsyncTask;
import android.os.Environment;
import android.util.Log;
import android.widget.Toast; 
public class loadImages  extends AsyncTask<Void, Void, Void>{

File[] files;
Context context;
String[] paths;
boolean nativesent;
public loadImages(Context _context)
{
    context=_context;

    String ExternalStorageDirectoryPath = Environment.getExternalStorageDirectory().getAbsolutePath();
    String targetPath = ExternalStorageDirectoryPath + "/detect";

    File targetDirector = new File(targetPath);
     files = targetDirector.listFiles();
     paths=new String[files.length];

}

@Override
protected Void doInBackground(Void... params) {
    // TODO Auto-generated method stub
    int i=0;
    for (File file : files){
        Log.d("images",file.getAbsolutePath());
        paths[i]=file.getAbsolutePath();
        i++;

    } 

    nativesent=NativesendPaths(paths);
     Log.d("images pathsent",String.valueOf(nativesent));

    return null;
}

@Override
   protected void onPostExecute(Void result) {
      super.onPostExecute(result);
      Log.d("images","loaded");
      if(nativesent)
        Toast.makeText(context,"Loaded",Toast.LENGTH_LONG).show();

   }


 public native boolean NativesendPaths(String[] paths);

 }

<强> NativeFrameProcessor.java

package com.example.cameraobjectdetection;
import org.opencv.core.Mat;
import android.util.Log;
public class NativeFrameProcessor 
{

public NativeFrameProcessor()
{

}
public void processFrame( Mat mRgba, Mat mProcessed)
{

  int image=process(mRgba.getNativeObjAddr(), mProcessed.getNativeObjAddr());   
    if(image!=-1)
    {
        Log.d("Image found","Image is:"+String.valueOf(image));
    }


}


public void noprocessFrame(Mat mRgba, Mat mProcessed)
{
    mRgba.copyTo(mProcessed);   
}

 public native int process(long matAddrRgba, long matAddrGray);

 }