视频人脸识别并使用Emgu 3.2.0与图像进行比较

时间:2017-08-18 14:31:01

标签: c# .net image-processing face-recognition emgucv

亲爱的开发者。这是我第一次在这里问别人。

上周我从我的主管那里得到了任务: 开发软件以将出现在相机上的访客面部与其成员卡中的照片进行比较。经过深入研究,似乎不可能只用一张照片来完成它,但这不是重点。 我使用C#和Emgu库甚至编写了工作代码,但问题是准确性。我试图使用all:“EMGU.CV.EigenFaceRecognizer”,“EMGU.CV.FisherFaceRecognizer”和“EMGU.CV.LBPHFaceRecognizer”,但他们都犯了很多错误识别。在我试图上传10张欧洲自我照片和10张我的中国同学照片后 - 它主要显示了我的名字,只有“Eigen Face”闪烁着,我们两个名字都看到了他。 到目前为止,我自己修复了所有错误和错误,但是现在甚至没有错误,我不知道该怎么做。 谢谢你的关注。

这是我的主要课程:

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Text;
using System.Windows.Forms;

using System.Threading;
using System.Threading.Tasks;
using System.IO;
using System.Xml;
using System.Runtime.InteropServices;
using System.Security.Principal;
using Microsoft.Win32.SafeHandles;

using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.CV.CvEnum;
using Emgu.Util;
using Emgu.CV.UI;

namespace Ilovewinforms
{
    public partial class Form1 : Form
    {
        public Form1()
        {
            InitializeComponent();
        //Load of previus trainned faces and labels for each image
        if (Eigen_Recog.IsTrained)
        {
            message_bar.Text = "Training Data loaded";
        }
        else
        {
            message_bar.Text = "No training data found, please train program using Train menu option";
        }

        Face = new CascadeClassifier("haarcascade_frontalface_default.xml");
        ENC_Parameters.Param[0] = ENC;
        Image_Encoder_JPG = GetEncoder(ImageFormat.Jpeg);
    }

    private void Form1_Load(object sender, EventArgs e)
    {
        try
        {
            capturecam = new Capture();
        }
        catch (NullReferenceException exception)
        {
            MessageBox.Show(exception.Message);
            return;
        }
        Application.Idle += new EventHandler(broadcastFunction);
        //Application.Idle += new EventHandler(photoGrabber);
        CapturingProcess = true;
    }

    #region Detection
    #region Detection Variables
    //******************************DETECTION GLOBAL VARIABLES*******************************************//

    Capture capturecam = null; //instance for capture using webcam
    bool CapturingProcess = false; //boolean stating the capturing process status
    Image<Bgr, Byte> imgOrg; //image type RGB (or Bgr as we say in Open CV)    
    Image<Gray, byte> result, TrainedFace = null; //used to store the result image and trained face

    private static readonly CascadeClassifier _cascadeClassifier = new CascadeClassifier("haarcascade_frontalface_default.xml");

    //Classifier with default training location
    Classifier_Train Eigen_Recog = new Classifier_Train();

    //MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5); //Our font for writing within the frame
    //CvInvoke.PutText(img, "Hello, world", new System.Drawing.Point(10, 80), FontFace.HersheyComplex, 1.0, new Bgr(0, 255, 0).MCvScalar); //Our font for writing within the frame

    Bitmap[] ExtFaces;
    int faceNo = 0;

    //***************************************************************************************************//
    #endregion

    //Video capture and broadcast
    void broadcastFunction(object sender, EventArgs e)
    {
        imgOrg = capturecam.QueryFrame().ToImage<Bgr, byte>();
        faceDetection(imgOrg);

        //videoPictureBox.Image = imgOrg.ToBitmap();            
        //if (imgOrg == null) return;                   
    }

    void faceDetection(Image<Bgr, Byte> imgOrg)
    {
        //Convert it to Grayscale

        if (imgOrg != null)
        {
            gray_frame = imgOrg.Convert<Gray, Byte>();
            //Face Detector
            Rectangle[] facesDetected = Face.DetectMultiScale(gray_frame, 1.2, 10, new Size(50, 50), Size.Empty);

            //Action for each element detected
            Parallel.For(0, facesDetected.Length, i =>
            {
                try
                {
                    facesDetected[i].X += (int)(facesDetected[i].Height * 0.15);
                    facesDetected[i].Y += (int)(facesDetected[i].Width * 0.22);
                    facesDetected[i].Height -= (int)(facesDetected[i].Height * 0.3);
                    facesDetected[i].Width -= (int)(facesDetected[i].Width * 0.35);

                    result = imgOrg.Copy(facesDetected[i]).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic);
                    result._EqualizeHist();
                    //draw the face detected in the 0th (gray) channel with blue color
                    imgOrg.Draw(facesDetected[i], new Bgr(Color.Blue), 2);

                    if (Eigen_Recog.IsTrained)
                    {
                        string name = Eigen_Recog.Recognise(result);
                        int match_value = (int)Eigen_Recog.Get_Eigen_Distance;

                        //Draw the label for each face detected and recognized
                        //imgOrg.Draw(name + " ", ref font, new Point(facesDetected[i].X - 2, facesDetected[i].Y - 2), new Bgr(Color.LightGreen));
                        CvInvoke.PutText(imgOrg, name + " ", new System.Drawing.Point(10, 80), FontFace.HersheyComplex, 1.0, new Bgr(255, 0, 0).MCvScalar);
                        ADD_Face_Found(result, name, match_value);
                    }

                }
                catch
                {
                    //do nothing as parrellel loop buggy
                    //No action as the error is useless, it is simply an error in 
                    //no data being there to process and this occurss sporadically 
                }
            });
            //Show the faces procesed and recognized
            videoPictureBox.Image = imgOrg.ToBitmap();
        }
    }

    //ADD Picture box and label to a panel for each face
    int faces_count = 0;
    int faces_panel_Y = 0;
    int faces_panel_X = 0;

    void Clear_Faces_Found()
    {
        this.Faces_Found_Panel.Controls.Clear();
        faces_count = 0;
        faces_panel_Y = 0;
        faces_panel_X = 0;
    }

    void ADD_Face_Found(Image<Gray, Byte> img_found, string name_person, int match_value)
    {
        PictureBox PI = new PictureBox();
        PI.Location = new Point(faces_panel_X, faces_panel_Y);
        PI.Height = 80;
        PI.Width = 80;
        PI.SizeMode = PictureBoxSizeMode.StretchImage;
        PI.Image = img_found.ToBitmap();
        Label LB = new Label();
        LB.Text = name_person + " " + match_value.ToString();
        LB.Location = new Point(faces_panel_X, faces_panel_Y + 80);
        LB.Width = 80;
        LB.Height = 15;

        Faces_Found_Panel.Controls.Add(PI);
        Faces_Found_Panel.Controls.Add(LB);
        faces_count++;
        if (faces_count == 2)
        {
            faces_panel_X = 0;
            faces_panel_Y += 100;
            faces_count = 0;
        }
        else faces_panel_X += 85;

        if (Faces_Found_Panel.Controls.Count > 10)
        {
            Clear_Faces_Found();
        }

    }
    #endregion


    #region Training

    #region Training Variables
    //******************************RECOGNITION GLOBAL VARIABLES*****************************************//  

    Image<Gray, byte> gray_frame = null;

    //Classifier
    CascadeClassifier Face = new CascadeClassifier("haarcascade_frontalface_default.xml");

    //Saving Jpg
    List<Image<Gray, byte>> ImagestoWrite = new List<Image<Gray, byte>>();
    EncoderParameters ENC_Parameters = new EncoderParameters(1);
    EncoderParameter ENC = new EncoderParameter(System.Drawing.Imaging.Encoder.Quality, 100);
    ImageCodecInfo Image_Encoder_JPG;

    //Saving XAML Data file
    List<string> NamestoWrite = new List<string>();
    List<string> NamesforFile = new List<string>();
    XmlDocument docu = new XmlDocument();

    //Variables
    Form1 Parent;
    private Image<Bgr, byte> img;

    //For aquiring 10 images in a row
    List<Image<Gray, byte>> resultImages = new List<Image<Gray, byte>>();
    int results_list_pos = 0;
    int num_faces_to_aquire = 10;
    bool RECORD = false;

    //***************************************************************************************************//
    #endregion
    void photoGrabber(object sender, EventArgs e)
    {
        //Get the current frame form capture device
        //currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

        // Converting the master image to a bitmap
        Bitmap masterImage = (Bitmap)photoPictureBox.Image;

        // Normalizing it to grayscale
        img = new Image<Bgr, byte>(masterImage);

        //Convert it to Grayscale
        if (img != null)
        {
            gray_frame = img.Convert<Gray, Byte>();

            //Face Detector
            //MCvAvgComp[][] facesDetected = gray_frame.DetectHaarCascade(Face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); //old method
            Rectangle[] facesDetected = Face.DetectMultiScale(gray_frame, 1.2, 10, new Size(50, 50), Size.Empty);

            //Action for each element detected
            for (int i = 0; i < facesDetected.Length; i++)// (Rectangle face_found in facesDetected)
            {
                //This will focus in on the face from the haar results its not perfect but it will remove a majoriy
                //of the background noise
                facesDetected[i].X += (int)(facesDetected[i].Height * 0.15);
                facesDetected[i].Y += (int)(facesDetected[i].Width * 0.22);
                facesDetected[i].Height -= (int)(facesDetected[i].Height * 0.3);
                facesDetected[i].Width -= (int)(facesDetected[i].Width * 0.35);

                result = img.Copy(facesDetected[i]).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic);
                result._EqualizeHist();
                facePictureBox.Image = result.ToBitmap();
                //draw the face detected in the 0th (gray) channel with blue color
                img.Draw(facesDetected[i], new Bgr(Color.Blue), 2);

            }      

            photoPictureBox.Image = img.ToBitmap();
        }
    }

    private bool save_training_data(Image face_data)
    {
        try
        {
            Random rand = new Random();
            bool file_create = true;
            string facename = "face_" + nameTextBox.Text + "_" + rand.Next().ToString() + ".jpg";
            while (file_create)
            {

                if (!File.Exists(Application.StartupPath + "/TrainedFaces/" + facename))
                {
                    file_create = false;
                }
                else
                {
                    facename = "face_" + nameTextBox.Text + "_" + rand.Next().ToString() + ".jpg";
                }
            }


            if (Directory.Exists(Application.StartupPath + "/TrainedFaces/"))
            {
                face_data.Save(Application.StartupPath + "/TrainedFaces/" + facename, ImageFormat.Jpeg);
            }
            else
            {
                Directory.CreateDirectory(Application.StartupPath + "/TrainedFaces/");
                face_data.Save(Application.StartupPath + "/TrainedFaces/" + facename, ImageFormat.Jpeg);
            }
            if (File.Exists(Application.StartupPath + "/TrainedFaces/TrainedLabels.xml"))
            {
                //File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", NAME_PERSON.Text + "\n\r");
                bool loading = true;
                while (loading)
                {
                    try
                    {
                        docu.Load(Application.StartupPath + "/TrainedFaces/TrainedLabels.xml");
                        loading = false;
                    }
                    catch
                    {
                        docu = null;
                        docu = new XmlDocument();
                        Thread.Sleep(10);
                    }
                }

                //Get the root element
                XmlElement root = docu.DocumentElement;

                XmlElement face_D = docu.CreateElement("FACE");
                XmlElement name_D = docu.CreateElement("NAME");
                XmlElement file_D = docu.CreateElement("FILE");

                //Add the values for each nodes
                //name.Value = textBoxName.Text;
                //age.InnerText = textBoxAge.Text;
                //gender.InnerText = textBoxGender.Text;
                name_D.InnerText = nameTextBox.Text;
                file_D.InnerText = facename;

                //Construct the Person element
                //person.Attributes.Append(name);
                face_D.AppendChild(name_D);
                face_D.AppendChild(file_D);

                //Add the New person element to the end of the root element
                root.AppendChild(face_D);

                //Save the document
                docu.Save(Application.StartupPath + "/TrainedFaces/TrainedLabels.xml");
                //XmlElement child_element = docu.CreateElement("FACE");
                //docu.AppendChild(child_element);
                //docu.Save("TrainedLabels.xml");
            }
            else
            {
                FileStream FS_Face = File.OpenWrite(Application.StartupPath + "/TrainedFaces/TrainedLabels.xml");
                using (XmlWriter writer = XmlWriter.Create(FS_Face))
                {
                    writer.WriteStartDocument();
                    writer.WriteStartElement("Faces_For_Training");

                    writer.WriteStartElement("FACE");
                    writer.WriteElementString("NAME", nameTextBox.Text);
                    writer.WriteElementString("FILE", facename);
                    writer.WriteEndElement();

                    writer.WriteEndElement();
                    writer.WriteEndDocument();
                }
                FS_Face.Close();
            }

            return true;
        }
        catch (Exception ex)
        {
            return false;
        }

    }        

    //Delete all the old training data by simply deleting the folder
    private void DeleteButton_Click(object sender, EventArgs e)
    {
        if (Directory.Exists(Application.StartupPath + "/TrainedFaces/"))
        {
            Directory.Delete(Application.StartupPath + "/TrainedFaces/", true);
            Directory.CreateDirectory(Application.StartupPath + "/TrainedFaces/");
        }
    }

    private void addButton_Click(object sender, EventArgs e)
    {
        //if (resultImages.Count == num_faces_to_aquire)
        //{
            if (!save_training_data(facePictureBox.Image))
            MessageBox.Show("Error", "Error in saving file info. Training data not saved", MessageBoxButtons.OK, MessageBoxIcon.Error);
        //}

    }

    private ImageCodecInfo GetEncoder(ImageFormat format)
    {
        ImageCodecInfo[] codecs = ImageCodecInfo.GetImageDecoders();
        foreach (ImageCodecInfo codec in codecs)
        {
            if (codec.FormatID == format.Guid)
            {
                return codec;
            }
        }
        return null;
    }

面部训练课:

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;

using Emgu.CV.UI;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.CV.CvEnum;
using Emgu.CV.Face;

using System.IO;
using System.Xml;
using System.Runtime.InteropServices;
using System.Threading;
using System.Windows.Forms;
using System.Xml.Serialization;
using System.Drawing.Imaging;
using System.Drawing;

/// <summary>
/// Desingned to remove the training a EigenObjectRecognizer code from the main form
/// </summary>
class Classifier_Train : IDisposable
{

    #region Variables

    //Eigen
    //EigenObjectRecognizer recognizer;
    FaceRecognizer recognizer;

    //training variables
    List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();//Images
    //TODO: see if this can be combined in Ditionary format this will remove support for old data
    List<string> Names_List = new List<string>(); //labels
    List<int> Names_List_ID = new List<int>();
    int ContTrain, NumLabels;
    float Eigen_Distance = 0;
    string Eigen_label;
    int Eigen_threshold = 2000;

    //Class Variables
    string Error;
    bool _IsTrained = false;

    //"EMGU.CV.EigenFaceRecognizer"
    //"EMGU.CV.FisherFaceRecognizer"
    //"EMGU.CV.LBPHFaceRecognizer"

    public string Recognizer_Type = "EMGU.CV.EigenFaceRecognizer";
    #endregion

    #region Constructors
    /// <summary>
    /// Default Constructor, Looks in (Application.StartupPath + "\\TrainedFaces") for traing data.
    /// </summary>
    public Classifier_Train()
    {
        _IsTrained = LoadTrainingData(Application.StartupPath + "\\TrainedFaces");
    }

    /// <summary>
    /// Takes String input to a different location for training data
    /// </summary>
    /// <param name="Training_Folder"></param>
    public Classifier_Train(string Training_Folder)
    {
        _IsTrained = LoadTrainingData(Training_Folder);
    }
    #endregion

    #region Public
    /// <summary>
    /// Retrains the recognizer witout resetting variables like recognizer type.
    /// </summary>
    /// <returns></returns>
    public bool Retrain()
    {
        return _IsTrained = LoadTrainingData(Application.StartupPath + "\\TrainedFaces");
    }
    /// <summary>
    /// Retrains the recognizer witout resetting variables like recognizer type.
    /// Takes String input to a different location for training data.
    /// </summary>
    /// <returns></returns>
    public bool Retrain(string Training_Folder)
    {
        return _IsTrained = LoadTrainingData(Training_Folder);
    }

    /// <summary>
    /// <para>Return(True): If Training data has been located and Eigen Recogniser has been trained</para>
    /// <para>Return(False): If NO Training data has been located of error in training has occured</para>
    /// </summary>
    public bool IsTrained
    {
        get { return _IsTrained; }
    }

    /// <summary>
    /// Recognise a Grayscale Image using the trained Eigen Recogniser
    /// </summary>
    /// <param name="Input_image"></param>
    /// <returns></returns>
    public string Recognise(Image<Gray, byte> Input_image, int Eigen_Thresh = -1)
    {
        if (_IsTrained)
        {
            FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image);

            if (ER.Label == -1)
            {
                Eigen_label = "Unknown";
                Eigen_Distance = 0;
                return Eigen_label;
            }
            else
            {
                Eigen_label = Names_List[ER.Label];
                Eigen_Distance = (float)ER.Distance;
                if (Eigen_Thresh > -1) Eigen_threshold = Eigen_Thresh;

                //Only use the post threshold rule if we are using an Eigen Recognizer 
                //since Fisher and LBHP threshold set during the constructor will work correctly 
                switch (Recognizer_Type)
                {
                    case ("EMGU.CV.EigenFaceRecognizer"):
                        if (Eigen_Distance > Eigen_threshold) return Eigen_label;
                        else return "Unknown";
                    case ("EMGU.CV.FisherFaceRecognizer"):
                    case ("EMGU.CV.LBPHFaceRecognizer"):
                    default:
                        return Eigen_label; //the threshold set in training controls unknowns
                }




            }

        }
        else return "";
    }

    /// <summary>
    /// Returns a string containg the recognised persons name
    /// </summary>
    public string Get_Eigen_Label
    {
        get
        {
            return Eigen_label;
        }
    }

    /// <summary>
    /// Returns a float confidence value for potential false clasifications
    /// </summary>
    public float Get_Eigen_Distance
    {
        get
        {
            //get eigenDistance
            return Eigen_Distance;
        }
    }

    /// <summary>
    /// Returns a string contatining any error that has occured
    /// </summary>
    public string Get_Error
    {
        get { return Error; }
    }


    /// <summary>
    /// Dispose of Class call Garbage Collector
    /// </summary>
    public void Dispose()
    {
        recognizer = null;
        trainingImages = null;
        Names_List = null;
        Error = null;
        GC.Collect();
    }

    #endregion

    #region Private
    /// <summary>
    /// Loads the traing data given a (string) folder location
    /// </summary>
    /// <param name="Folder_location"></param>
    /// <returns></returns>
    private bool LoadTrainingData(string Folder_location)
    {
        if (File.Exists(Folder_location + "\\TrainedLabels.xml"))
        {
            try
            {
                //message_bar.Text = "";
                Names_List.Clear();
                Names_List_ID.Clear();
                trainingImages.Clear();
                FileStream filestream = File.OpenRead(Folder_location + "\\TrainedLabels.xml");
                long filelength = filestream.Length;
                byte[] xmlBytes = new byte[filelength];
                filestream.Read(xmlBytes, 0, (int)filelength);
                filestream.Close();

                MemoryStream xmlStream = new MemoryStream(xmlBytes);

                using (XmlReader xmlreader = XmlTextReader.Create(xmlStream))
                {
                    while (xmlreader.Read())
                    {
                        if (xmlreader.IsStartElement())
                        {
                            switch (xmlreader.Name)
                            {
                                case "NAME":
                                    if (xmlreader.Read())
                                    {
                                        Names_List_ID.Add(Names_List.Count); //0, 1, 2, 3....
                                        Names_List.Add(xmlreader.Value.Trim());
                                        NumLabels += 1;
                                    }
                                    break;
                                case "FILE":
                                    if (xmlreader.Read())
                                    {
                                        //PROBLEM HERE IF TRAININGG MOVED
                                        trainingImages.Add(new Image<Gray, byte>(Application.StartupPath + "\\TrainedFaces\\" + xmlreader.Value.Trim()));
                                    }
                                    break;
                            }
                        }
                    }
                }
                ContTrain = NumLabels;

                if (trainingImages.ToArray().Length != 0)
                {

                    //Eigen face recognizer
                    //Parameters:   
                    //      num_components – The number of components (read: Eigenfaces) kept for this Prinicpal 
                    //          Component Analysis. As a hint: There’s no rule how many components (read: Eigenfaces) 
                    //          should be kept for good reconstruction capabilities. It is based on your input data, 
                    //          so experiment with the number. Keeping 80 components should almost always be sufficient.
                    //
                    //      threshold – The threshold applied in the prediciton. This still has issues as it work inversly to LBH and Fisher Methods.
                    //          if you use 0.0 recognizer.Predict will always return -1 or unknown if you use 5000 for example unknow won't be reconised.
                    //          As in previous versions I ignore the built in threhold methods and allow a match to be found i.e. double.PositiveInfinity
                    //          and then use the eigen distance threshold that is return to elliminate unknowns. 
                    //
                    //NOTE: The following causes the confusion, sinc two rules are used. 
                    //--------------------------------------------------------------------------------------------------------------------------------------
                    //Eigen Uses
                    //          0 - X = unknown
                    //          > X = Recognised
                    //
                    //Fisher and LBPH Use
                    //          0 - X = Recognised
                    //          > X = Unknown
                    //
                    // Where X = Threshold value


                    switch (Recognizer_Type)
                    {
                        case ("EMGU.CV.LBPHFaceRecognizer"):
                            recognizer = new LBPHFaceRecognizer(1, 8, 8, 8, 100);//50
                            break;
                        case ("EMGU.CV.FisherFaceRecognizer"):
                            recognizer = new FisherFaceRecognizer(0, 3500);//4000
                            break;
                        case ("EMGU.CV.EigenFaceRecognizer"):
                        default:
                            recognizer = new EigenFaceRecognizer(80, double.PositiveInfinity);
                            break;
                    }

                    recognizer.Train(trainingImages.ToArray(), Names_List_ID.ToArray());
                    // Recognizer_Type = recognizer.GetType();
                    // string v = recognizer.ToString(); //EMGU.CV.FisherFaceRecognizer || EMGU.CV.EigenFaceRecognizer || EMGU.CV.LBPHFaceRecognizer

                    return true;
                }
                else return false;
            }
            catch (Exception ex)
            {
                Error = ex.ToString();
                return false;
            }
        }
        else return false;
    }

    #endregion
}

0 个答案:

没有答案