如何在NodeJS中运行C ++文件

时间:2016-03-28 16:15:37

标签: android c++ json opencv express

我有一个C ++中的小程序,可以识别图像中的对象。 我还有一个REST API(使用NodeJS上的express.js运行时开发),可以在特定文件夹中下载图像(通过发送移动设备)。

这个想法是如何将C ++文件(main.cpp)与NodeJS链接起来!也就是说,当图像被发送到服务器时,它将下载NodeJS必须运行C ++文件(main.cpp)才能知道移动设备发送的图像中的对象。然后他以相同图片的形式将响应发送到移动终端,构成找到的对象。

感谢您的帮助

-code C ++:

//main.cpp
    #include "mainwindow.h"  
    #include <QApplication>
    #include "opencv2/opencv.hpp"
    #include <stdio.h>
    #include <iostream>
    #include "opencv2/core/core.hpp"
    #include <opencv2/features2d.hpp>
    #include "opencv2/features2d/features2d.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/calib3d/calib3d.hpp"
    #include "opencv2/xfeatures2d/nonfree.hpp"
    #include "opencv2/xfeatures2d.hpp"


int main(int argc, char *argv[])
{
    QApplication a(argc, argv);
        MainWindow w;
        w.show();


        return a.exec();
}

这里的处理类:mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "opencv2/opencv.hpp"
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include <opencv2/features2d.hpp>
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/imgcodecs.hpp"
using namespace cv;
void readme();

MainWindow::MainWindow(QWidget *parent) :
    QMainWindow(parent),
    ui(new Ui::MainWindow)
{
    ui->setupUi(this);


    cv::Mat img_object = cv::imread("/home/emile/Pictures/jeux.jpg");
    cv::Mat img_scene = cv::imread("//home/emile/Pictures/jeux_vectorss.jpg");
    int minHessian = 400;
    cv::Ptr<Feature2D> detector = xfeatures2d::SIFT::create(minHessian);

    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    detector->detect(img_object, keypoints_object);
    detector->detect(img_scene, keypoints_scene);

    //-- Step 2: Calculate descriptors (feature vectors)

    Mat descriptors_object, descriptors_scene;

    detector->compute( img_object, keypoints_object, descriptors_object );
    detector->compute( img_scene, keypoints_scene, descriptors_scene );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( descriptors_object, descriptors_scene, matches );

    double max_dist = 0; double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_object.rows; i++ )
    { double dist = matches[i].distance;
      if( dist < min_dist ) min_dist = dist;
      if( dist > max_dist ) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    //BFMatcher good_matcher;
    std::vector< DMatch > good_matches;

    for( int i = 0; i < descriptors_object.rows; i++ )
    { if( matches[i].distance < 3*min_dist )
       { good_matches.push_back( matches[i]); }
    }

    Mat img_matches;
    std::vector<char> vec;
    drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vec, DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for( size_t i = 0; i < good_matches.size(); i++ )
    {
      //-- Get the keypoints from the good matches
      obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
      scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }

    Mat H = findHomography( obj, scene, CV_RANSAC );

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
    obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
    std::vector<Point2f> scene_corners(4);

    perspectiveTransform( obj_corners, scene_corners, H);

    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
    line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

    //-- Show detected matches
    imshow( " Object detection", img_matches );

    waitKey(0);
    return ;
}
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
MainWindow::~MainWindow()
{
    delete ui;
}

MainActivity.java

public class MainActivity extends Activity {
    Button imgsel,upload;
    ImageView img;
    String path;


    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        img = (ImageView)findViewById(R.id.img);
        Ion.getDefault(this).configure().setLogging("ion-sample", Log.DEBUG);
        imgsel = (Button)findViewById(R.id.selimg);
        upload =(Button)findViewById(R.id.uploadimg);
        upload.setVisibility(View.INVISIBLE);



        upload.setOnClickListener(new View.OnClickListener() {
            @Override
            public void onClick(View v) {
                File f = new File(path);


                Future uploading = Ion.with(MainActivity.this)
                        .load("http://xxx.xxx.xx.xxx:xxxx/uploads")
                       .setMultipartFile("image",f)
                        //asJsonObject()
                .asString()
        .withResponse()
                        .setCallback(new FutureCallback<Response<String>>() {
                            @Override
                            public void onCompleted(Exception e, Response<String> result) {
                                try {

                                    JSONObject jobj = new JSONObject(result.getResult());
                                    Toast.makeText(getApplicationContext(), jobj.getString("response"), Toast.LENGTH_SHORT).show();


                                } catch (JSONException e1) {
                                    e1.printStackTrace();
                                }
                            }
        });
            }

        });

        imgsel.setOnClickListener(new View.OnClickListener() {
            @Override
            public void onClick(View v) {
                Intent fintent = new Intent(Intent.ACTION_GET_CONTENT);
                fintent.setType("image/*");
                try {
                    startActivityForResult(fintent, 100);
                } catch (ActivityNotFoundException e) {

                }
            }
        });
    }
    protected void onActivityResult(int requestCode, int resultCode, Intent data) {
        if (data == null)
            return;
        switch (requestCode) {
            case 100:
                if (resultCode == RESULT_OK) {
                    path = getPathFromURI(data.getData());
                    img.setImageURI(data.getData());
                    upload.setVisibility(View.VISIBLE);

                }
        }
    }
    private String getPathFromURI(Uri contentUri) {
        String[] proj = { MediaStore.Images.Media.DATA };
        CursorLoader loader = new CursorLoader(getApplicationContext(), contentUri, proj, null, null, null);
        Cursor cursor = loader.loadInBackground();
        int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
        cursor.moveToFirst();
        return cursor.getString(column_index);
    }

}

0 个答案:

没有答案