如何使用OpenCV在Android中修复“仅黑框接收”

时间:2019-05-25 08:13:36

标签: java android c++ opencv java-native-interface

我正在为使用python和openCV的移动应用程序开发类似于inkHunter的增强现实功能。尽管有一些过大的技巧,但代码仍然可以按我预期的那样很好地工作。我需要制作一个android应用程序,我知道我需要将该python代码转换为C ++并使用ndk在android中运行它,因为它具有实时过程。我能够将openCV库加载到我的android项目中,并在本机类和MainActivity之间传递数据。然后,我将python代码转换为C ++(我不太熟悉),然后运行了该项目。但这只会给我黑框。该程序未显示任何错误,但我没有得到预期的输出。

我正在尝试使用Android Studio 3.3.2OpenCV4Android 4.1.0,我使用templateMatching方法从捕获的帧中检测输入模板,然后使用alpha混合将png粘贴到检测到的区域,最后将该区域添加到单应性框架。

这是我的代码,

MainActivity.java

public class MainActivity extends AppCompatActivity implements CameraBridgeViewBase.CvCameraViewListener2 {



    private static String TAG = "MainActivity";
    private JavaCameraView javaCameraView;

    // Used to load the 'native-lib' library on application startup.
    static {
        System.loadLibrary("native-lib");
        System.loadLibrary("opencv_java4");
    }

    private Mat mRgba;



    BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
        @Override
        public void onManagerConnected(int status) {
            switch(status){
                case BaseLoaderCallback.SUCCESS:{
                    javaCameraView.enableView();
                    break;
                }
                default:{
                    super.onManagerConnected(status);
                    break;
                }
            }
        }
    };

    private Mat temp, tattoo;

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        javaCameraView = (JavaCameraView)findViewById(R.id.java_camera_view);
        javaCameraView.setVisibility(SurfaceView.VISIBLE);
        javaCameraView.setCvCameraViewListener(this);

        AssetManager assetManager = getAssets();

        try {
            InputStream is = assetManager.open("temp.jpg");
            Bitmap bitmap = BitmapFactory.decodeStream(is);
            Bitmap bmp32 = bitmap.copy(Bitmap.Config.ARGB_8888, true);
            temp = new Mat(bitmap.getHeight(), bitmap.getWidth(), CvType.CV_8UC4);
            Utils.bitmapToMat(bmp32, temp);

        } catch (IOException e) {
            e.printStackTrace();
        }

        try {
            InputStream isTattoo = assetManager.open("tattoo2.png");
            Bitmap bitmapTattoo = BitmapFactory.decodeStream(isTattoo);
            Bitmap bmp32Tattoo = bitmapTattoo.copy(Bitmap.Config.ARGB_8888, true);
            tattoo = new Mat(bitmapTattoo.getHeight(), bitmapTattoo.getWidth(), CvType.CV_8UC4);
            Utils.bitmapToMat(bmp32Tattoo, tattoo);

        } catch (IOException e) {
            e.printStackTrace();
        }




    }

    @Override
    protected void onPause(){
        super.onPause();
        if(javaCameraView != null){
            javaCameraView.disableView();
        }
    }

    @Override
    protected void onDestroy(){
        super.onDestroy();
        if(javaCameraView != null){
            javaCameraView.disableView();
        }
    }

    @Override
    protected void onResume(){
        super.onResume();
        if(OpenCVLoader.initDebug()){
            Log.i(TAG, "OpenCV Loaded successfully ! ");
            mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
        }else{
            Log.i(TAG, "OpenCV not loaded ! ");
            OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, this, mLoaderCallback);
        }
    }

    @Override
    public void onCameraViewStarted(int width, int height) {
        mRgba = new Mat(height, width, CvType.CV_8UC4);

    }

    @Override
    public void onCameraViewStopped() {
        mRgba.release();

    }

    @Override
    public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
        mRgba = inputFrame.rgba();

        augmentation(mRgba.getNativeObjAddr(), temp.getNativeObjAddr(), tattoo.getNativeObjAddr());

        return mRgba;
    }


    public native void augmentation(long matAddrRgba, long tempC, long tattooDesign);
}

native-lib.cpp

#include <jni.h>
#include <string>
#include <opencv2/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;

extern "C" {



// Alpha Blending using direct pointer access
Mat& alphaBlendDirectAccess(Mat& alpha, Mat& foreground, Mat& background, Mat& outImage)
{

    int numberOfPixels = foreground.rows * foreground.cols * foreground.channels();

    float* fptr = reinterpret_cast<float*>(foreground.data);
    float* bptr = reinterpret_cast<float*>(background.data);
    float* aptr = reinterpret_cast<float*>(alpha.data);
    float* outImagePtr = reinterpret_cast<float*>(outImage.data);

    int i,j;
    for ( j = 0; j < numberOfPixels; ++j, outImagePtr++, fptr++, aptr++, bptr++)
    {
        *outImagePtr = (*fptr)*(*aptr) + (*bptr)*(1 - *aptr);
    }

    return outImage;
}


Mat& alphaBlend(Mat& foreg, Mat& backgg)
{

    // Read background image
    Mat background = backgg;// cropped frame
    Size sizeBackground = background.size();

    // Read in the png foreground asset file that contains both rgb and alpha information
    // Mat foreGroundImage = imread("foreGroundAssetLarge.png", -1); //resized tattoo
    Mat foreGroundImage = foreg;
    // resize the foreGroundImage to background image size
    resize(foreGroundImage, foreGroundImage, Size(sizeBackground.width,sizeBackground.height));
    Mat bgra[4];
    split(foreGroundImage, bgra);//split png foreground

    // Save the foregroung RGB content into a single Mat
    vector<Mat> foregroundChannels;
    foregroundChannels.push_back(bgra[0]);
    foregroundChannels.push_back(bgra[1]);
    foregroundChannels.push_back(bgra[2]);
    Mat foreground = Mat::zeros(foreGroundImage.size(), CV_8UC3);
    merge(foregroundChannels, foreground);

    // Save the alpha information into a single Mat
    vector<Mat> alphaChannels;
    alphaChannels.push_back(bgra[3]);
    alphaChannels.push_back(bgra[3]);
    alphaChannels.push_back(bgra[3]);
    Mat alpha = Mat::zeros(foreGroundImage.size(), CV_8UC3);
    merge(alphaChannels, alpha);



    // Convert Mat to float data type
    foreground.convertTo(foreground, CV_32FC3);
    background.convertTo(background, CV_32FC3);
    alpha.convertTo(alpha, CV_32FC3, 1.0/255); // keeps the alpha values betwen 0 and 1

    // Number of iterations to average the performane over
    int numOfIterations = 1; //1000;



    // Alpha blending using direct Mat access with for loop
    Mat outImage = Mat::zeros(foreground.size(), foreground.type());

    for (int i=0; i<numOfIterations; i++) {
        outImage = alphaBlendDirectAccess(alpha, foreground, background, outImage);
    }

    imshow("alpha blended image", outImage/255);
    outImage = outImage/255;
    outImage.convertTo(outImage, CV_8U); // Convert float to Mat data type

    return outImage;
}




Mat& applyHomography(Mat& convertedOutImage, Mat& initialFrame, int startX, int startY, int endX, int endY)
{

    struct userdata{
        Mat im;
        vector<Point2f> points;
    };

    // Read in the image.
    Mat im_src = convertedOutImage;
    Size size = im_src.size();

    // Create a vector of points.
    vector<Point2f> pts_src;
    pts_src.push_back(Point2f(0,0));
    pts_src.push_back(Point2f(size.width - 1, 0));
    pts_src.push_back(Point2f(size.width - 1, size.height -1));
    pts_src.push_back(Point2f(0, size.height - 1 ));



    // Destination image
    Mat im_dst = initialFrame;
    vector<Point2f> pts_dst;
    pts_dst.push_back(Point2f(startX, startY));
    pts_dst.push_back(Point2f(endX, startY));
    pts_dst.push_back(Point2f(endX, endY));
    pts_dst.push_back(Point2f(startX, endY));


    Mat im_temp = im_dst.clone();


    // Calculate Homography between source and destination points
    Mat h = findHomography(pts_src, pts_dst);

    // Warp source image
    warpPerspective(im_src, im_temp, h, im_dst.size());


    // Black out polygonal area in destination image.
    fillConvexPoly(im_dst, pts_dst, Scalar(0), LINE_AA);

    // Add warped source image to destination image.
    im_dst = im_dst + im_temp;



    return im_dst;
}


JNIEXPORT void JNICALL
Java_com_example_inkmastertest_MainActivity_augmentation(JNIEnv *env, jobject, jlong addrRgba, jlong tempC, jlong tattooDesign);

JNIEXPORT void JNICALL
Java_com_example_inkmastertest_MainActivity_augmentation(JNIEnv *env, jobject, jlong addrRgba, jlong tempC, jlong tattooDesign) {

    Mat& img = *(Mat*)addrRgba;
    Mat target_img = img.clone();

    Mat& template1 = *(Mat*)tempC;
    Mat& tattooDes = *(Mat*)tattooDesign;


    // Contains the description of the match
    typedef struct Match_desc{
        bool init;
        double maxVal;
        Point maxLoc;
        double scale;
        Match_desc(): init(0){}
    } Match_desc;

    Mat template_mat;
    template_mat = template1; // Read image
    cvtColor(template_mat, template_mat, COLOR_BGR2GRAY); // Convert to Gray
    Canny(template_mat, template_mat, 50, 50*4); // Find edges


    // Find size
    int tW, tH;
    tW = template_mat.cols;
    tH = template_mat.rows;



    Mat target_gray, target_resized, target_edged;

    cvtColor(target_img, target_gray, COLOR_BGR2GRAY); // Convert to Gray

    const float SCALE_START = 1;
    const float SCALE_END = 0.2;
    const int SCALE_POINTS = 20;

    Match_desc found;
    for(float scale = SCALE_START; scale >= SCALE_END; scale -= (SCALE_START - SCALE_END)/SCALE_POINTS){
        resize(target_gray, target_resized, Size(0,0), scale, scale);// Resize

        // Break if target image becomes smaller than template
        if(tW > target_resized.cols || tH > target_resized.rows) break;


        Canny(target_resized, target_edged, 50, 50*4); // Find edges

        // Match template
        Mat result;
        matchTemplate(target_edged, template_mat, result, TM_CCOEFF);

        double maxVal; Point maxLoc;
        minMaxLoc(result, NULL, &maxVal, NULL, &maxLoc);

        // If better match found
        if( found.init == false || maxVal > found.maxVal ){
            found.init = true;
            found.maxVal = maxVal;
            found.maxLoc = maxLoc;
            found.scale = scale;
        }


    }

    int startX, startY, endX, endY;
    startX = found.maxLoc.x / found.scale;
    startY = found.maxLoc.y / found.scale;

    endX= (found.maxLoc.x + tW) / found.scale;
    endY= (found.maxLoc.y + tH) / found.scale;

    // draw a bounding box around the detected result and display the image
    rectangle(target_img, Point(startX, startY), Point(endX, endY), Scalar(0, 0, 255), 3);


    Rect myROI(startX, startY, endX, endY);
    Mat cropped = target_img(myROI);

    Mat alphaBlended = alphaBlend(tattooDes , cropped);
    Mat homographyApplied = applyHomography(alphaBlended, target_img, startX, startY, endX, endY);

    img = homographyApplied;


}





}

如果我可以跳过单应性会更好,但是我不知道如何对两种不同尺寸的图像进行alpha混合。 我的预期输出是显示输入检测到的模板区域上的png(tattoo2.png)。如果您能帮助我,我将不胜感激。请让我知道是否需要提及其他任何内容。谢谢。

0 个答案:

没有答案