有没有办法在检测面部时增加emgu haarcascade方形尺寸?

时间:2015-09-24 06:42:39

标签: c# emgucv windows-applications face-detection haar-classifier

我使用以下代码捕获面部http://www.codeproject.com/Articles/239849/Multiple-face-detection-and-recognition-in-real。它按我的预期工作。但我想增加检测区域的高度和重量。

enter image description here

以下是示例代码:

public partial class FrmPrincipal : Form
{
    //Declararation of all variables, vectors and haarcascades
    Image<Bgr, Byte> currentFrame;
    Capture grabber;
    HaarCascade face;
    HaarCascade eye;
    MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
    Image<Gray, byte> result, TrainedFace = null;
    Image<Gray, byte> gray = null;
    List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
    List<string> labels= new List<string>();
    List<string> NamePersons = new List<string>();
    int ContTrain, NumLabels, t;
    string name, names = null;


    public FrmPrincipal()
    {
        InitializeComponent();
        //Load haarcascades for face detection
        face = new HaarCascade("haarcascade_frontalface_default.xml");
        //eye = new HaarCascade("haarcascade_eye.xml");
        try
        {
            //Load of previus trainned faces and labels for each image
            string Labelsinfo = File.ReadAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt");
            string[] Labels = Labelsinfo.Split('%');
            NumLabels = Convert.ToInt16(Labels[0]);
            ContTrain = NumLabels;
            string LoadFaces;

            for (int tf = 1; tf < NumLabels+1; tf++)
            {
                LoadFaces = "face" + tf + ".bmp";
                trainingImages.Add(new Image<Gray, byte>(Application.StartupPath + "/TrainedFaces/" + LoadFaces));
                labels.Add(Labels[tf]);
            }

        }
        catch(Exception e)
        {
            //MessageBox.Show(e.ToString());
            MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
        }

    }


    private void button1_Click(object sender, EventArgs e)
    {
        //Initialize the capture device
        grabber = new Capture();
        grabber.QueryFrame();
        //Initialize the FrameGraber event
        Application.Idle += new EventHandler(FrameGrabber);
        button1.Enabled = false;
    }


    private void button2_Click(object sender, System.EventArgs e)
    {
        try
        {
            //Trained face counter
            ContTrain = ContTrain + 1;

            //Get a gray frame from capture device
            gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
            face,
            1.2,
            10,
            Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
            new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
                break;
            }

            //resize face detected image for force to compare the same size with the 
            //test image with cubic interpolation type method
            TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            trainingImages.Add(TrainedFace);
            labels.Add(textBox1.Text);

            //Show face added in gray scale
            imageBox1.Image = TrainedFace;

            //Write the number of triained faces in a file text for further load
            File.WriteAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");

            //Write the labels of triained faces in a file text for further load
            for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
            {
                trainingImages.ToArray()[i - 1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
                File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");
            }

            MessageBox.Show(textBox1.Text + "´s face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
        }
        catch
        {
            MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
        }
    }


    void FrameGrabber(object sender, EventArgs e)
    {
        label3.Text = "0";
        //label4.Text = "";
        NamePersons.Add("");


        //Get the current frame form capture device
        currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                //Convert it to Grayscale
                gray = currentFrame.Convert<Gray, Byte>();

                //Face Detector
                MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
              face,
              1.4,
              10,
              Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
              new Size(20, 20));

                //Action for each element detected
                foreach (MCvAvgComp f in facesDetected[0])
                {
                    t = t + 1;
                    result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                    //draw the face detected in the 0th (gray) channel with blue color
                    currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);


                    if (trainingImages.ToArray().Length != 0)
                    {
                        //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                       trainingImages.ToArray(),
                       labels.ToArray(),
                       3000,
                       ref termCrit);

                    name = recognizer.Recognize(result);

                        //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));

                    }

                        NamePersons[t-1] = name;
                        NamePersons.Add("");


                    //Set the number of faces detected on the scene
                    label3.Text = facesDetected[0].Length.ToString();

                    /*
                    //Set the region of interest on the faces

                    gray.ROI = f.rect;
                    MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
                       eye,
                       1.1,
                       10,
                       Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                       new Size(20, 20));
                    gray.ROI = Rectangle.Empty;

                    foreach (MCvAvgComp ey in eyesDetected[0])
                    {
                        Rectangle eyeRect = ey.rect;
                        eyeRect.Offset(f.rect.X, f.rect.Y);
                        currentFrame.Draw(eyeRect, new Bgr(Color.Blue), 2);
                    }
                     */

                }
                    t = 0;

                    //Names concatenation of persons recognized
                for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
                {
                    names = names + NamePersons[nnn] + ", ";
                }
                //Show the faces procesed and recognized
                imageBoxFrameGrabber.Image = currentFrame;
                label4.Text = names;
                names = "";
                //Clear the list(vector) of names
                NamePersons.Clear();

            }

    private void button3_Click(object sender, EventArgs e)
    {
        Process.Start("Donate.html");
    }

建议我寻求最佳解决方案。如果没有,那么另外的方法是什么? 在此先感谢!

1 个答案:

答案 0 :(得分:1)

相当琐碎,但需要了解代码 在变量“f”中得到结果后,您应该更改此行

currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);

根据检测到的矩形的位置绘制新矩形。请注意,结果不会更改,您只更改显示用户的内容。 所以它应该有点像:

                    Rectangle newFaceRect = new Rectangle();
                    newFaceRect.Location = f.Location;
                    newFaceRect.Y = (int)(f.Y - face.Height / 4);
                    newFaceRect.X = (int)(f.X - face.Width / 4);
                    newFaceRect.Height = (int)(f.Height * 1.5);
                    newFaceRect.Width = (int)(f.Width * 1.5);
                    currentFrame.Draw(newFaceRect, new Bgr(Color.Black), 2);