我想在iPhone中创建与code相同的脸部追踪。它是一个mac os代码,但我想在iPhone中使用与给定代码相同的内容。
关于iphone中的脸部追踪的任何想法。
答案 0 :(得分:1)
您必须使用OPENCV来检测面部并将其导入您的代码。在这种方法中,我使用矩形/椭圆来表示检测到的面部
-(UIImage *) opencvFaceDetect:(UIImage *)originalImage {
cvSetErrMode(CV_ErrModeParent);
IplImage *image = [self CreateIplImageFromUIImage:originalImage];
// Scaling down
/*
Creates IPL image (header and data) ----------------cvCreateImage
CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels );
*/
IplImage *small_image = cvCreateImage(cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3);
/*SMOOTHES DOWN THYE GUASSIAN SURFACE--------:cvPyrDown*/
cvPyrDown(image, small_image, CV_GAUSSIAN_5x5);
int scale = 2;
// Load XML
NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
// Check whether the cascade has loaded successfully. Else report and error and quit
if( !cascade )
{
NSLog(@"ERROR: Could not load classifier cascade\n");
//return;
}
//Allocate the Memory storage
CvMemStorage* storage = cvCreateMemStorage(0);
// Clear the memory storage which was used before
cvClearMemStorage( storage );
CGColorSpaceRef colorSpace;
CGContextRef contextRef;
CGRect face_rect;
// Find whether the cascade is loaded, to find the faces. If yes, then:
if( cascade )
{
CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage, 1.1f, 3, 0, cvSize(20, 20));
cvReleaseImage(&small_image);
// Create canvas to show the results
CGImageRef imageRef = originalImage.CGImage;
colorSpace = CGColorSpaceCreateDeviceRGB();
contextRef = CGBitmapContextCreate(NULL, originalImage.size.width, originalImage.size.height,
8, originalImage.size.width * 4, colorSpace,
kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
//VIKAS
CGContextDrawImage(contextRef, CGRectMake(0, 0, originalImage.size.width, originalImage.size.height), imageRef);
CGContextSetLineWidth(contextRef, 4);
CGContextSetRGBStrokeColor(contextRef, 1.0, 1.0, 1.0, 0.5);
// Draw results on the image:Draw all components of face in the form of small rectangles
// Loop the number of faces found.
for(int i = 0; i < faces->total; i++)
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
// Calc the rect of faces
// Create a new rectangle for drawing the face
CvRect cvrect = *(CvRect*)cvGetSeqElem(faces, i);
// CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef,
// CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
face_rect = CGContextConvertRectToDeviceSpace(contextRef,
CGRectMake(cvrect.x*scale, cvrect.y, cvrect.width*scale, cvrect.height*scale*1.25));
facedetectapp=(FaceDetectAppDelegate *)[[UIApplication sharedApplication]delegate];
facedetectapp.grabcropcoordrect=face_rect;
NSLog(@" FACE off %f %f %f %f",facedetectapp.grabcropcoordrect.origin.x,facedetectapp.grabcropcoordrect.origin.y,facedetectapp.grabcropcoordrect.size.width,facedetectapp.grabcropcoordrect.size.height);
CGContextStrokeRect(contextRef, face_rect);
//CGContextFillEllipseInRect(contextRef,face_rect);
CGContextStrokeEllipseInRect(contextRef,face_rect);
[pool release];
}
}
CGImageRef imageRef = CGImageCreateWithImageInRect([originalImage CGImage],face_rect);
UIImage *returnImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
cvReleaseMemStorage(&storage);
cvReleaseHaarClassifierCascade(&cascade);
return returnImage;
}
答案 1 :(得分:0)
看看这篇文章。它包括一个演示项目,并解释了如何在处理实时视频时获得最佳性能。
Computer vision with iOS Part 2: Face tracking in live video