使用OpenCV Cascade - 仅使用haartraning XML文件

时间:2012-02-19 08:40:54

标签: c++ visual-c++ opencv

我正在尝试在Cuda平台上实施Viola Johns面部检测算法(我知道openCV已经这样做了,我为我的学校做了那个...... :)。

我的第一个阶段是在CPU上实现算法。

我正在使用openCV库,我知道openCV知道如何进行人脸检测。为了理解,我想回到基础并按照自己的方式去做。

我使用openCV函数创建了积分和表示,以及squere sum积分表示。

我遍历了级联。迭代阶段,分类和rects。 规范化每个窗口,计算每个分类的总和并与阈值进行比较,可悲的是,我似乎错过了一些东西。因为我无法察觉面孔。

似乎我需要更好地理解级联xml文件。

这是一个例子:

          <!-- tree 158 -->
      <_>
        <!-- root node -->
        <feature>
          <rects>
            <_>3 6 2 2 -1.</_>
            <_>3 6 1 1 2.</_>
            <_>4 7 1 1 2.</_></rects>
          <tilted>0</tilted></feature>
        <threshold>2.3729570675641298e-003</threshold>
        <left_val>0.4750812947750092</left_val>
        <right_val>0.7060170769691467</right_val></_></_>
    <_>
      <!-- tree 159 -->

          <!-- tree 159 -->
      <_>
        <!-- root node -->
        <feature>
          <rects>
            <_>16 6 3 2 -1.</_>
            <_>16 7 3 1 2.</_></rects>
          <tilted>0</tilted></feature>
        <threshold>-1.4541699783876538e-003</threshold>
        <left_val>0.3811730146408081</left_val>
        <right_val>0.5330739021301270</right_val></_></_></trees>
  <stage_threshold>79.2490768432617190</stage_threshold>
  <parent>16</parent>
  <next>-1</next></_>
<_>

我想了解left_val和right_val的含义是什么? 父母的意思是什么,下一个值? 如何计算每个分类器的归一化和? 我有什么问题吗?看我的代码附件

基本上这是我正在做的事情,我想对这个问题提供奖励,但我没有足够的评价。非常感谢任何帮助 事先提醒, S

int RunHaarClassifierCascadeSum(CascadeClassifier * face_cascade, CvMat*  image , CvMat* sum , CvMat* sqsum,
                            CvMat* tilted,CvSize *scaningWindowSize, int iteratorRow, int iteratorCol )

{

// Normalize the current scanning window - Detection window
// Variance(x) = E(x^2) - (E(x))^2 = detectionWindowSquereExpectancy - detectionWindowExpectancy^2 
// Expectancy(x) = E(x) = sum_of_pixels / size_of_window

double detectionWindowTotalSize = scaningWindowSize->height * scaningWindowSize->width;

// calculate the detection Window Expectancy , e.g the E(x) 
double sumDetectionWindowPoint1,sumDetectionWindowPoint2,sumDetectionWindowPoint3,sumDetectionWindowPoint4;     //  ______________________
sumDetectionWindowPoint1 = cvGetReal2D(sum,iteratorRow,iteratorCol);                                            //  |R1                R2|
sumDetectionWindowPoint2 = cvGetReal2D(sum,iteratorRow+scaningWindowSize->width,iteratorCol);                   //  |                    |   Sum = R4-R2-R3+R1
sumDetectionWindowPoint3 = cvGetReal2D(sum,iteratorRow,iteratorCol+scaningWindowSize->height);                  //  |R3________________R4|
sumDetectionWindowPoint4 = cvGetReal2D(sum,iteratorRow+scaningWindowSize->width,iteratorCol+scaningWindowSize->height);
double detectionWindowSum = calculateSum(sumDetectionWindowPoint1,sumDetectionWindowPoint2,sumDetectionWindowPoint3,sumDetectionWindowPoint4);
const double detectionWindowExpectancy = detectionWindowSum / detectionWindowTotalSize;     // E(x) 

// calculate the Square detection Window Expectancy , e.g the E(x^2) 
double squareSumDetectionWindowPoint1,squareSumDetectionWindowPoint2,squareSumDetectionWindowPoint3,squareSumDetectionWindowPoint4;     //  ______________________
squareSumDetectionWindowPoint1 = cvGetReal2D(sqsum,iteratorRow,iteratorCol);                                            //  |R1                R2|
squareSumDetectionWindowPoint2 = cvGetReal2D(sqsum,iteratorRow+scaningWindowSize->width,iteratorCol);                   //  |                    |   Sum = R4-R2-R3+R1
squareSumDetectionWindowPoint3 = cvGetReal2D(sqsum,iteratorRow,iteratorCol+scaningWindowSize->height);                  //  |R3________________R4|
squareSumDetectionWindowPoint4 = cvGetReal2D(sqsum,iteratorRow+scaningWindowSize->width,iteratorCol+scaningWindowSize->height);
double detectionWindowSquareSum = calculateSum(squareSumDetectionWindowPoint1,squareSumDetectionWindowPoint2,squareSumDetectionWindowPoint3,squareSumDetectionWindowPoint4);
const double detectionWindowSquareExpectancy = detectionWindowSquareSum / detectionWindowTotalSize;     // E(x^2)

const double detectionWindowVariance = detectionWindowSquareExpectancy - std::pow(detectionWindowExpectancy,2);  // Variance(x) = E(x^2) - (E(x))^2 
const  double detectionWindowStandardDeviation = std::sqrt(detectionWindowVariance);

if (detectionWindowVariance<=0)
    return -1 ; // Error 

// Normalize the cascade window to the normal scale window
double normalizeScaleWidth = double(scaningWindowSize->width / face_cascade->oldCascade->orig_window_size.width);
double normalizeScaleHeight = double(scaningWindowSize->height / face_cascade->oldCascade->orig_window_size.height);

// Calculate the cascade for each one of the windows
for( int stageIterator=0; stageIterator< face_cascade->oldCascade->count; stageIterator++ )      // Stage iterator
{

    CvHaarStageClassifier* pCvHaarStageClassifier = face_cascade->oldCascade->stage_classifier + stageIterator;
    for (int CvHaarStageClassifierIterator=0;CvHaarStageClassifierIterator<pCvHaarStageClassifier->count;CvHaarStageClassifierIterator++)    // Classifier iterator
    {
        CvHaarClassifier* classifier = pCvHaarStageClassifier->classifier + CvHaarStageClassifierIterator;
        float classifierSum=0.;

        for( int CvHaarClassifierIterator = 0; CvHaarClassifierIterator < classifier->count;CvHaarClassifierIterator++ )    // Feature iterator
        {
            CvHaarFeature * pCvHaarFeature = classifier->haar_feature;

            // Remark
            if (pCvHaarFeature->tilted==1)
                break; 
            // Remark

            for( int CvHaarFeatureIterator = 0; CvHaarFeatureIterator< CV_HAAR_FEATURE_MAX; CvHaarFeatureIterator++ )   // 3 Features iterator 
            {
                CvRect * currentRect = &(pCvHaarFeature->rect[CvHaarFeatureIterator].r);
                // Normalize the rect to the scaling window scale
                CvRect normalizeRec;
                normalizeRec.x = (int)(currentRect->x*normalizeScaleWidth); 
                normalizeRec.y = (int)(currentRect->y*normalizeScaleHeight);
                normalizeRec.width = (int)(currentRect->width*normalizeScaleWidth); 
                normalizeRec.height = (int)(currentRect->height*normalizeScaleHeight); 

                double sumRectPoint1,sumRectPoint2,sumRectPoint3,sumRectPoint4;                             //  ______________________
                sumRectPoint1 = cvGetReal2D(sum,normalizeRec.x,normalizeRec.y);                             //  |R1                R2|
                sumRectPoint2 = cvGetReal2D(sum,normalizeRec.x+normalizeRec.width,normalizeRec.y);          //  |                    |   Sum = R4-R2-R3+R1
                sumRectPoint3 = cvGetReal2D(sum,normalizeRec.x,normalizeRec.y+normalizeRec.height);         //  |R3________________R4|
                sumRectPoint4 = cvGetReal2D(sum,normalizeRec.x+normalizeRec.width,normalizeRec.y+normalizeRec.height);

                double nonNormalizeRect = calculateSum(sumRectPoint1,sumRectPoint2,sumRectPoint3,sumRectPoint4);        //
                double sumMean = detectionWindowExpectancy*(normalizeRec.width*normalizeRec.height);                    // sigma(Pi) = normalizeRect  = (sigma(Pi- rect) - sigma(mean)) / detectionWindowStandardDeviation
                double normalizeRect = (nonNormalizeRect - sumMean)/detectionWindowStandardDeviation;                   //

                classifierSum += (normalizeRect*(pCvHaarFeature->rect[CvHaarFeatureIterator].weight));
            }
        }
 //             if (classifierSum > (*(classifier->threshold)) )
 //                 return 0;       // That's not a face !  
        if (classifierSum > ((*(classifier->threshold))*detectionWindowStandardDeviation) )
            return -stageIterator;      // That's not a face !  , failed on stage number 

    }
}
return 1;   // That's a face 
   }

1 个答案:

答案 0 :(得分:1)

你需要做一些重大改变。首先,分类器 - >阈值是每个特征的阈值。 classifier-&gt; alpha指向由2个元素组成的数组 - left_val和right_val(据我所知)。你应该在分类器循环之后添加这样的东西 -

a = classifier->alpha[0]
b = classifier->alpha[1]
t = *(classifier->threshold)
stage_sum += classifierSum < t ? a : b

然后将stage_sum与CvHaarStageClassifier :: threshold进行比较,这是阶段阈值,循环通过stage_classifiers [i]。如果它传递了所有这些,那么它就是一个面! 如果你使用haarcascade_frontalface_alt.xml,那么'parent'和'next'在这里没用,它只是一个基于树桩的级联,而不是基于树的。