如何将3D模型放置在人脸地标的顶部,例如人脸滤镜应用程序?

时间:2019-03-26 18:47:30

标签: c++ opencv computer-vision augmented-reality openscenegraph

我正在使用两个库:用于计算机视觉功能的OpenCV和用于计算机图形功能的OpenSceneGraph。因为,该软件的主要目的是增强现实。该软件的主要目的是创建一个类似于Snapchat中的面部过滤器,到目前为止,我已经完成了面部标志(计算机视觉部分)的工作,并借助OpenSceneGraph函数将3d模型加载到OpenCV的相机供稿中。问题是我试图将3d模型放置在人脸地标的顶部,但是由于模型的坐标与OpenCV人脸地标模型不同,因此无法正常工作。 那么,有什么方法可以将模型完美地放置在面部标志的顶部?

我尝试根据面部界标坐标点更改模型的位置,但是没有运气,即使我将数字除以10或20也是如此,因为来自面部界标的坐标是与模型位置相比请注意,模型的位置具有三个坐标点:x,y和z,而来自面部界标的点只有x和y。

main.cpp

#include <iostream>
#include <osgViewer/Viewer>
#include <osgDB/ReadFile>
#include <osg/PositionAttitudeTransform>

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "OpenCVFuncs.hpp"

#include "/home/bardawil/Desktop/OSG-OpenCV-ARDemo/include/BackgroundCamera.h"
#include "/home/bardawil/Desktop/OSG-OpenCV-ARDemo/include/VirtualCamera.h"

using namespace cv;
using namespace cv::face;
using namespace std;

// Initialization: -

    // ** OSG Stuff **
    int screenWidth, screenHeight, textureWidth, textureHeight;
    // Create viewer
    osgViewer::Viewer viewer;
    // Main Camera
    osg::ref_ptr<osg::Camera>  camera = viewer.getCamera();
    // Background-Camera (OpenCV Feed)
    BackgroundCamera bgCamera;
    // Load glass Model as Example Scene
    osg::ref_ptr<osg::Node> glassModel = osgDB::readNodeFile("priestene test.obj");
    // Model position initial value
    osg::Vec3 modelPosition(0, 100, 10);
    // Model scale initial value
    osg::Vec3 modelScale(150, 150, 150);

    // ** OpenCV Stuff **
    // Video Capture initialization (from desktop camera)
    cv::VideoCapture cap(0);

    Mat gray;

    // Load Face Detector
    CascadeClassifier faceDetector("/home/bardawil/Desktop/OSG-OpenCV-ARDemo/haarcascade_frontalface_alt2.xml");

    // Create an instance of Facemark
    Ptr<Facemark> facemark = FacemarkLBF::create();

    struct faceParams faceStruct;
    // struct eyesLM eyes;



int main( int argc, char** argv )
{
    int count  = 0;
    facemark->loadModel("/home/bardawil/Desktop/OSG-OpenCV-ARDemo/lbfmodel.yaml");

    screenWidth = 640;
    screenHeight = 480;

    textureWidth = 640;
    textureHeight = 480;

    // OSG STUFF
    viewer.setUpViewInWindow(50,50,screenWidth,screenHeight);

    // Virtual Camera setup
    VirtualCamera* vCamera = new VirtualCamera(camera);

    // OpenCV camera
    osg::Camera* backgroundCamera = bgCamera.createCamera(textureWidth, textureHeight);

    osg::Group* glassesGroup = new osg::Group();
    // Position of glass
    osg::PositionAttitudeTransform* position = new osg::PositionAttitudeTransform();

    glassesGroup->addChild(position);
    position->addChild(glassModel);

    // Set Position of Model
    position->setPosition(modelPosition);

    // Set Scale of Model
    position->setScale(modelScale);

    // Create new group node
    osg::ref_ptr<osg::Group> group = new osg::Group;
    osg::Node* background = backgroundCamera;
    osg::Node* foreground = glassesGroup;
    background->getOrCreateStateSet()->setRenderBinDetails(1,"RenderBin");
    foreground->getOrCreateStateSet()->setRenderBinDetails(2,"RenderBin");
    group->addChild(background);
    group->addChild(foreground);
    background->getOrCreateStateSet()->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF);
    foreground->getOrCreateStateSet()->setMode(GL_DEPTH_TEST,osg::StateAttribute::ON);

    // Add the groud to the viewer
    viewer.setSceneData(group.get());

    if(!cap.isOpened())
    {
            std::cout << "Webcam cannot open!\n";
            return 0;
    }


    while (!viewer.done())
    {
        // Refresh Background Image
        cv::Mat frame;
        faceStruct.frame = frame;
        cap.read(frame);
        // bgCamera.update(frame);

        // Update Virtual Camera (these Coordinates should be determined by some AR-Framework/Functionality)
        // They are just updated for demonstration purposes..
        // Position Parameters: Roll, Pitch, Heading, X, Y, Z
        // vCamera->updatePosition(0, 0, 0, 0, 0, 0);
        //osg::notify(osg::WARN)<<"Angle: "<<  angleRoll <<std::endl;
        vector<Rect> faces;

        // // Convert frame to grayscale because
        // // faceDetector requires grayscale image.

        cvtColor(frame, gray, COLOR_BGR2GRAY);

        // // Detect faces
        // const int scale = 3;
        // cv::Mat frame_gray( cvRound( gray.rows / scale ), cvRound( gray.cols / scale ), CV_8UC1 );
        // cv::resize( gray, frame_gray, frame_gray.size() );
        faceDetector.detectMultiScale(gray, faces, 1.05 , 6, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

        vector< vector<Point2f> > landmarks;

        // // Run landmark detector
        bool success = facemark->fit(frame, faces, landmarks);

        if(success){
            // eyes = drawLandmarks(frame, landmarks[0]);   
            vCamera->updatePosition(0, 0, 0, 0, 0, 0);
        }
        // Display results 
        // flip(frame, frame, +1);

        // vCamera->updatePosition(0, 0, 0, 0, 0, 0);
        bgCamera.update(frame);
        viewer.frame();
    }
    return 0;
}

0 个答案:

没有答案