浏览图像和面部检测

时间:2013-11-04 07:59:46

标签: android image-processing face-detection

我在检测浏览图片的面部方面遇到了一些问题。我知道问题是我不知道如何应用我在导入图像上测试的面部检测代码。我正在测试的示例代码是为本地存储的图像编写的。我相信我很亲密,但你可以帮助我吗?

首先,我创建了一个图库方法

    protected void gallery() {
    Intent intent = new Intent();
    intent.setType("image/*");
    intent.setAction("android.intent.action.GET_CONTENT");
    startActivityForResult(Intent.createChooser(intent, "Choose An Image"), 1);         
}

我还在学习意图等等,但据我所知,我需要使用意图使用Android的图库,因为我使用setAction来获取内容,我也使用意图将信息传递给它。话虽如此,我试图将意图中的信息传递给uri。所以这就是我接下来要做的。

protected void onActivityResult(int requestCode, int resultCode, Intent intent) {
    super.onActivityResult(requestCode, resultCode, intent);
    if(requestCode == 1 && resultCode == RESULT_OK)
    {
        Uri uri = intent.getData();
        try {
            InputStream is = getContentResolver().openInputStream(uri);
            Bitmap bitmap = BitmapFactory.decodeStream(is);
            ImageView image = (ImageView)findViewById(R.id.img_view);
            image.setImageBitmap(bitmap);

        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

所以这对我来说是令人困惑的部分。我猜InputStream有图像信息吗?好吧,我试着在同一个try-catch中应用面部检测代码。我想在完成image.setImageBitmap(位图)之后,就是应用面部检测的时候了。这是面部检测代码。

protected void onActivityResult(int requestCode, int resultCode, Intent intent) {
    super.onActivityResult(requestCode, resultCode, intent);
    if(requestCode == 1 && resultCode == RESULT_OK)
    {
        Uri uri = intent.getData();
        try {
            InputStream is = getContentResolver().openInputStream(uri);
            Bitmap bitmap = BitmapFactory.decodeStream(is);
            ImageView image = (ImageView)findViewById(R.id.image_view);
            image.setImageBitmap(bitmap);

            BitmapFactory.Options options = new BitmapFactory.Options();
            options.inPreferredConfig=Bitmap.Config.RGB_565;
            bitmap = BitmapFactory.decodeResource(getResources(), R.id.img_view, options);

            imageWidth = bitmap.getWidth();
            imageHeight = bitmap.getHeight();
            detectedFaces = new FaceDetector.Face[NUM_FACES];
            faceDetector= new FaceDetector(imageWidth, imageHeight, NUM_FACES);
            NUM_FACE_DETECTED = faceDetector.findFaces(bitmap, detectedFaces);
            mIL.invalidate();
        } catch (Exception e) {
            e.printStackTrace();
        }   
    }
}

我不知道如何更改“mFaceBitmap = BitmapFactory.decodeResource(getResources(),R.drawable.smilingfaces,options);”对于本地图像,对于我认为存储在InputStream中的图像(或者是它?所选图像在哪里?)我提出了改为imageView布局的想法,因为图像在布局中。我不明白这一切是如何转移和共同运作的。无论如何,该代码片段被认为是检测面部。然后onDraw()在检测到的面周围绘制方块。我不知道在哪里放,但我把它放在onActivityResult()

之外
protected void onDraw(Canvas canvas) {

    Paint myPaint = new Paint();
    myPaint.setColor(Color.RED);
    myPaint.setStyle(Paint.Style.STROKE);
    myPaint.setStrokeWidth(3);
    myPaint.setDither(true);

    for (int count = 0; count < NUM_FACE_DETECTED; count++) {
        Face face = detectedFaces[count];
        PointF midPoint = new PointF();
        face.getMidPoint(midPoint);

        eyeDistance = face.eyesDistance();
        canvas.drawRect(midPoint.x-eyeDistance, midPoint.y-eyeDistance, midPoint.x+eyeDistance, midPoint.y+eyeDistance, myPaint);   
    }
}

有什么建议吗?我非常接近让这个工作!

1 个答案:

答案 0 :(得分:2)

我理解你真正想要的东西。我会给你写完整的代码然后继续。

在这段代码中,我在布局中使用了一个imageview,两个类,一个活动类,另一个是imageview类。

我将创建两个按钮,其中一个按钮用于从图库中选择图像并显示它(用于面部检测)和第二个用于检测所选图像上的面部的按钮。

首先是mainlayout.xml

<?xml version="1.0" encoding="utf-8"?>

<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent" >

<com.simpleapps.facedetection.MyView
android:id="@+id/faceview"
android:layout_width="fill_parent" 
android:layout_height="fill_parent" 
/>

<LinearLayout 
android:orientation="horizontal"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:layout_gravity="top">

<ImageView
    android:id="@+id/gallery"
    android:layout_width="wrap_content"
    android:layout_height="wrap_content"
    android:layout_marginRight="10dp"
    android:layout_weight="1"
   android:background="@drawable/gallery" />

<ImageView
    android:id="@+id/detectf"
    android:layout_width="wrap_content"
    android:layout_height="wrap_content"
    android:layout_marginRight="10dp"
    android:layout_weight="1"
   android:background="@drawable/detect" />


</LinearLayout>
</FrameLayout>

现在是活动类

MainActivity.java

 public class MainActivity extends Activity {

public MyView faceview;

public static Bitmap defaultBitmap;

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
     requestWindowFeature(Window.FEATURE_NO_TITLE);
        getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,
                WindowManager.LayoutParams.FLAG_FULLSCREEN);

        setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);

    setContentView(R.layout.activity_main);

     DisplayMetrics displaymetrics = new DisplayMetrics();
        getWindowManager().getDefaultDisplay().getMetrics(displaymetrics);
        screenHeight = displaymetrics.heightPixels;  
        screenWidth = displaymetrics.widthPixels; 

          faceview = (MyView)findViewById(R.id.faceview);

             myGallery = (LinearLayout)findViewById(R.id.mygallery);





          gallery=(ImageView)findViewById(R.id.gallery);
          detectf=(ImageView)findViewById(R.id.detectf);



          BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
            bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;

          defaultBitmap=BitmapFactory.decodeResource(getResources(), R.drawable.face,bitmapFatoryOptions);

            faceview.setImage(defaultBitmap);

          gallery.setOnClickListener(new OnClickListener() {

                public void onClick(View v) {
                    // TODO Auto-generated method stub

                    Intent intent = new Intent(Intent.ACTION_GET_CONTENT);
                    intent.setType("image/*");
                    startActivityForResult(intent, 0 );

                }
            });

          detectf.setOnClickListener(new OnClickListener() {

                public void onClick(View v) {
                    // TODO Auto-generated method stub


                    faceview.facedetect();

                }
            });

  }

 @Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {

    super.onActivityResult(requestCode, resultCode, data);

    if (resultCode == Activity.RESULT_OK) {

        if(requestCode==0){

        imageURI = data.getData(); 

        try {

             BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
            bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;

            Bitmap b =  
                       BitmapFactory.decodeStream(getContentResolver().openInputStream(imageURI), null,  
                               bitmapFatoryOptions);


            faceview.myBitmap=b;


        } catch (FileNotFoundException e) {
//               TODO Auto-generated catch block
            e.printStackTrace();
        } catch (IOException e) {
//               TODO Auto-generated catch block  
            e.printStackTrace();
        }


        faceview.invalidate(); 

        }

        faceview.invalidate(); 
    } else {
        System.exit(0);
        Log.e("result", "BAD");
    }
 }
 }

现在是视图类。

MyView.java

public class MyView extends View {

private FaceDetector.Face[] detectedFaces;
private int NUMBER_OF_FACES=10;
private FaceDetector faceDetector;
private int NUMBER_OF_FACE_DETECTED;
private float eyeDistance;

public Paint myPaint;

public Bitmap resultBmp;

public Bitmap myBitmap,HairBitmap;

public PointF midPoint1;

public MyView(Context context, AttributeSet attrs) {
    super(context,attrs);
    // TODO Auto-generated constructor stub
    BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
    bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;

}

public void setImage(Bitmap bitmap) {
    myBitmap = bitmap;

invalidate();
}


public void facedetect(){

     myPaint = new Paint();
        myPaint.setColor(Color.GREEN);
        myPaint.setStyle(Paint.Style.STROKE); 
        myPaint.setStrokeWidth(3);

    detectedFaces=new FaceDetector.Face[NUMBER_OF_FACES];
    faceDetector=new FaceDetector(resultBmp.getWidth(),resultBmp.getHeight(),NUMBER_OF_FACES);
    NUMBER_OF_FACE_DETECTED=faceDetector.findFaces(resultBmp, detectedFaces);

    System.out.println("faces detected are"+NUMBER_OF_FACE_DETECTED);

    Canvas facec=new Canvas();

    for(int count=0;count<NUMBER_OF_FACE_DETECTED;count++)
   {

    if(count==0){

         face1=detectedFaces[count];
        midPoint1=new PointF();
        face1.getMidPoint(midPoint1);

        eyeDistance=face1.eyesDistance();


    }

   }

    invalidate();

    if(NUMBER_OF_FACE_DETECTED==0){

    Toast.makeText(getContext(), "no faces detected", Toast.LENGTH_LONG).show(); 

    }else if(NUMBER_OF_FACE_DETECTED!=0){

        Toast.makeText(getContext(), "faces detected "+NUMBER_OF_FACE_DETECTED, Toast.LENGTH_LONG).show(); 

    }
}

protected void onDraw(Canvas canvas)
{

    if(myBitmap!=null)
    {



     w = myBitmap.getWidth();
     h = myBitmap.getHeight();
     resultBmp = null;

     int widthofBitMap  = MainActivity.screenWidth ;
     int heightofBitMap = widthofBitMap*h/w;

    resultBmp = Bitmap.createScaledBitmap(myBitmap, widthofBitMap, heightofBitMap, true);  
    canvas.drawBitmap(resultBmp, (MainActivity.screenWidth-widthofBitMap)/2,(MainActivity.screenHeight-heightofBitMap)/2, null);


    }

} 

@Override

  public boolean onTouchEvent(MotionEvent event) {
   // TODO Auto-generated method stub

   int action = event.getAction();



   switch(action){
   case MotionEvent.ACTION_MOVE:
    x = event.getX();
    y = event.getY();





    break;
   case MotionEvent.ACTION_DOWN:
    x = event.getX();
    y = event.getY();





    break;
   case MotionEvent.ACTION_UP:
   default:


}
   invalidate();
   return true;
  }


  }

我花了一些时间来编写这段代码。我希望它有所帮助。如果你得到一些错误就问。