我通过opencv制作面部识别应用程序。我可以检测到面孔,而且我的资源很少。 我在这个地方堆叠:
// These vectors hold the images and corresponding labels.
vector<Mat> images;
vector<int> labels;
Mat testSample = images[images.size() - 1];
int testLabel = labels[labels.size() - 1];
images.pop_back();
labels.pop_back();
Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
model->train(images, labels);
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
cout << result_message << endl;
// Here is how to get the eigenvalues of this Eigenfaces model:
Mat eigenvalues = model->getMat("eigenvalues");
// And we can do the same to display the Eigenvectors (read Eigenfaces):
Mat W = model->getMat("eigenvectors");
// Get the sample mean from the training data
Mat mean = model->getMat("mean");
可以请,有人解释一下,我应该把检测到的面孔放在哪里以及我将它们与数据库进行比较的地方?我必须将所有这些都放在vector<Mat> images;
或其中?
如何运作字段vector<int> labels;
?我应该在那里写什么?
如果有人能给我写简单的例子,那将会很棒。检测到的面孔我从Java类传递参数。
谢谢!
答案 0 :(得分:0)
您需要导入javacv库附带的FaceRecognizer类(com.googlecode.javacv.cpp.opencv_contrib.FaceRecognizer):
以下是类代码,它包含训练和测试面所需的所有函数
package org.opencv.javacv.facerecognition;
import static com.googlecode.javacv.cpp.opencv_highgui.*;
import static com.googlecode.javacv.cpp.opencv_core.*;
import static com.googlecode.javacv.cpp.opencv_imgproc.*;
import static com.googlecode.javacv.cpp.opencv_contrib.*;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.util.ArrayList;
import org.opencv.android.Utils;
import org.opencv.core.Mat;
import com.googlecode.javacv.cpp.opencv_imgproc;
import com.googlecode.javacv.cpp.opencv_contrib.FaceRecognizer;
import com.googlecode.javacv.cpp.opencv_core.IplImage;
import com.googlecode.javacv.cpp.opencv_core.MatVector;
import android.graphics.Bitmap;
import android.os.Environment;
import android.util.Log;
import android.widget.Toast;
public class PersonRecognizer {
public final static int MAXIMG = 100;
FaceRecognizer faceRecognizer;
String mPath;
int count=0;
labels labelsFile;
static final int WIDTH= 128;
static final int HEIGHT= 128;;
private int mProb=999;
PersonRecognizer(String path)
{
faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createLBPHFaceRecognizer(2,8,8,8,200);
// path=Environment.getExternalStorageDirectory()+"/facerecog/faces/";
mPath=path;
labelsFile= new labels(mPath);
}
void changeRecognizer(int nRec)
{
switch(nRec) {
case 0: faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createLBPHFaceRecognizer(1,8,8,8,100);
break;
case 1: faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createFisherFaceRecognizer();
break;
case 2: faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createEigenFaceRecognizer();
break;
}
train();
}
void add(Mat m, String description) {
Bitmap bmp= Bitmap.createBitmap(m.width(), m.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(m,bmp);
bmp= Bitmap.createScaledBitmap(bmp, WIDTH, HEIGHT, false);
FileOutputStream f;
try {
f = new FileOutputStream(mPath+description+"-"+count+".jpg",true);
count++;
bmp.compress(Bitmap.CompressFormat.JPEG, 100, f);
f.close();
} catch (Exception e) {
Log.e("error",e.getCause()+" "+e.getMessage());
e.printStackTrace();
}
}
public boolean train() {
File root = new File(mPath);
Log.i("mPath",mPath);
FilenameFilter pngFilter = new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.toLowerCase().endsWith(".jpg");
};
};
File[] imageFiles = root.listFiles(pngFilter);
MatVector images = new MatVector(imageFiles.length);
int[] labels = new int[imageFiles.length];
int counter = 0;
int label;
IplImage img=null;
IplImage grayImg;
int i1=mPath.length();
for (File image : imageFiles) {
String p = image.getAbsolutePath();
img = cvLoadImage(p);
if (img==null)
Log.e("Error","Error cVLoadImage");
Log.i("image",p);
int i2=p.lastIndexOf("-");
int i3=p.lastIndexOf(".");
int icount=Integer.parseInt(p.substring(i2+1,i3));
if (count<icount) count++;
String description=p.substring(i1,i2);
if (labelsFile.get(description)<0)
labelsFile.add(description, labelsFile.max()+1);
label = labelsFile.get(description);
grayImg = IplImage.create(img.width(), img.height(), IPL_DEPTH_8U, 1);
cvCvtColor(img, grayImg, CV_BGR2GRAY);
images.put(counter, grayImg);
labels[counter] = label;
counter++;
}
if (counter>0)
if (labelsFile.max()>1)
faceRecognizer.train(images, labels);
labelsFile.Save();
return true;
}
public boolean canPredict()
{
if (labelsFile.max()>1)
return true;
else
return false;
}
public String predict(Mat m) {
if (!canPredict())
return "";
int n[] = new int[1];
double p[] = new double[1];
IplImage ipl = MatToIplImage(m,WIDTH, HEIGHT);
faceRecognizer.predict(ipl, n, p);
if (n[0]!=-1)
mProb=(int)p[0];
else
mProb=-1;
if (n[0] != -1)
return labelsFile.get(n[0]);
else
return "Unkown";
}
IplImage MatToIplImage(Mat m,int width,int heigth)
{
Bitmap bmp=Bitmap.createBitmap(m.width(), m.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(m, bmp);
return BitmapToIplImage(bmp,width, heigth);
}
IplImage BitmapToIplImage(Bitmap bmp, int width, int height) {
if ((width != -1) || (height != -1)) {
Bitmap bmp2 = Bitmap.createScaledBitmap(bmp, width, height, false);
bmp = bmp2;
}
IplImage image = IplImage.create(bmp.getWidth(), bmp.getHeight(),
IPL_DEPTH_8U, 4);
bmp.copyPixelsToBuffer(image.getByteBuffer());
IplImage grayImg = IplImage.create(image.width(), image.height(),
IPL_DEPTH_8U, 1);
cvCvtColor(image, grayImg, opencv_imgproc.CV_BGR2GRAY);
return grayImg;
}
protected void SaveBmp(Bitmap bmp,String path)
{
FileOutputStream file;
try {
file = new FileOutputStream(path , true);
bmp.compress(Bitmap.CompressFormat.JPEG,100,file);
file.close();
}
catch (Exception e) {
// TODO Auto-generated catch block
Log.e("",e.getMessage()+e.getCause());
e.printStackTrace();
}
}
public void load() {
train();
}
public int getProb() {
// TODO Auto-generated method stub
return mProb;
}
}
答案 1 :(得分:0)
package org.tensorflow.lite.examples.test;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.util.ArrayList;
import java.util.List;
import org.opencv.android.Utils;
import org.opencv.core.CvException;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Scalar;
import org.opencv.face.EigenFaceRecognizer;
import org.opencv.face.FaceRecognizer;
import org.opencv.face.FisherFaceRecognizer;
import org.opencv.face.LBPHFaceRecognizer;
import org.opencv.imgproc.Imgproc;
import android.graphics.Bitmap;
import android.util.Log;
public class PersonRecognizer {
// FaceRecognizer faceRecognizer = LBPHFaceRecognizer.create(1, 6, 6, 6, 13.5);
//2,2,12,12,15)
// FaceRecognizer faceRecognizer = FisherFaceRecognizer.create(100,100.0);
// FaceRecognizer faceRecognizer = FisherFaceRecognizer.create(0, 123.0);
// FaceRecognizer faceRecognizer = EigenFaceRecognizer.create(10, 123.0);
// FaceRecognizer faceRecognizer = EigenFaceRecognizer.create(1,190.5);
// FaceRecognizer faceRecognizer = LBPHFaceRecognizer.create(1,8,8,8,123.0)
FaceRecognizer faceRecognizer = LBPHFaceRecognizer.create(2, 8, 8, 8, 64);
// FaceRecognizer faceRecognizer = LBPHFaceRecognizer.create(2,2,12,12,15);
String mPath;
int count=0;
Labels labelsFile;
static final int WIDTH= 224;
static final int HEIGHT= 224;;
private int mProb=999;
PersonRecognizer(String path) {
// path=Environment.getExternalStorageDirectory()+"/facerecog/faces/";
mPath=path;
labelsFile= new Labels(mPath);
}
void add(Bitmap m, String description) {
Mat mat = new Mat();
Utils.bitmapToMat(m, mat);
Mat mGray = new Mat();
Imgproc.cvtColor(mat,mGray,Imgproc.COLOR_RGB2GRAY);
Bitmap bmp = null;
FileOutputStream f;
Mat tmp = new Mat (HEIGHT, WIDTH, CvType.CV_8U, new Scalar(4));
try {
Imgproc.cvtColor(mGray, tmp, Imgproc.COLOR_GRAY2RGBA, 4);
bmp = Bitmap.createBitmap(tmp.cols(), tmp.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(tmp, bmp);
f = new FileOutputStream(mPath+description+"-"+count+".jpg",true);
count++;
bmp.compress(Bitmap.CompressFormat.JPEG, 100, f);
f.close();
}
catch (CvException e) {
Log.d("Exception",e.getMessage());
}
catch (Exception e) {
Log.e("error908",e.getCause()+" "+e.getMessage());
e.printStackTrace();
}
}
public boolean train() {
File root = new File(mPath);
FilenameFilter pngFilter = new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.toLowerCase().endsWith(".jpg");
};
};
File[] imageFiles = root.listFiles(pngFilter);
List<Mat> images = new ArrayList<>();
int[] labels = new int[imageFiles.length];
int counter = 0;
int label;
int i1=mPath.length();
for (File image : imageFiles) {
Log.d("TAG11", "train: collecting");
String p = image.getAbsolutePath();
Mat imagexnm = org.opencv.imgcodecs.Imgcodecs.imread(p, 0);
int i2=p.lastIndexOf("-");
int i3=p.lastIndexOf(".");
int icount=Integer.parseInt(p.substring(i2+1,i3));
if (count<icount) count++;
String description=p.substring(i1,i2);
if (labelsFile.get(description)<0)
Log.d("TAG11", "train: add label");
labelsFile.add(description, labelsFile.max()+1);
label = labelsFile.get(description);
images.add(imagexnm);
labels[counter] = label;
counter++;
}
if (counter>0)
if (labelsFile.max()>1)
{
Mat labelsmat = new Mat(labels.length, 1, CvType.CV_32SC1);
for(int i=0;i<labels.length;i++)
{
labelsmat.put(i,0,labels[i]);
}
faceRecognizer.train(images, labelsmat);
labelsFile.Save();
}
return true;
}
public boolean canPredict()
{
if (labelsFile.max()>1)
return true;
else
return false;
}
public String predict(Bitmap m) {
if (!canPredict())
return "-1";
int n[] = new int[1];
double p[] = new double[1];
Mat mat = new Mat();
Utils.bitmapToMat(m, mat);
Mat mGray = new Mat();
Imgproc.cvtColor(mat,mGray,Imgproc.COLOR_RGB2GRAY);
faceRecognizer.predict(mGray, n, p);
if (n[0]!=-1)
mProb=(int)p[0];
else
mProb=-1;
// if ((n[0] != -1)&&(p[0]<95))
if (n[0] != -1)
return labelsFile.get(n[0]);
else
return "Unknown";
}
protected void SaveBmp(Bitmap bmp,String path)
{
FileOutputStream file;
try {
file = new FileOutputStream(path , true);
bmp.compress(Bitmap.CompressFormat.JPEG,100,file);
file.close();
}
catch (Exception e) {
// TODO Auto-generated catch block
Log.e("",e.getMessage()+e.getCause());
e.printStackTrace();
}
}
public void load() {
train();
}
public int getProb() {
// TODO Auto-generated method stub
return mProb;
}
}