使用' android-vision'保存实时检测到的脸部(追踪脸部)图像。库

时间:2017-12-15 21:18:04

标签: java android android-vision

对于我的大学论文,我需要一个可以实时检测和识别面部的android程序。我读过关于“android-vision'库并测试了示例代码。

https://github.com/googlesamples/android-vision/tree/master/visionSamples/FaceTracker/app/src/main/java/com/google/android/gms/samples/vision/face/facetracker.

修改后的代码:

package com.google.android.gms.samples.vision.face.facetracker;

import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.os.AsyncTask;
import android.os.Environment;
import android.util.Log;
import android.widget.Toast;

import com.google.android.gms.samples.vision.face.facetracker.ui.camera.GraphicOverlay;
import com.google.android.gms.vision.face.Face;

import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.net.Socket;
import java.text.SimpleDateFormat;
import java.util.Date;

/**
 * Graphic instance for rendering face position, orientation, and landmarks within an associated
 * graphic overlay view.
 */
class FaceGraphic extends GraphicOverlay.Graphic
{
    private static final float FACE_POSITION_RADIUS = 10.0f;
    private static final float ID_TEXT_SIZE = 40.0f;
    private static final float ID_Y_OFFSET = 50.0f;
    private static final float ID_X_OFFSET = -50.0f;
    private static final float BOX_STROKE_WIDTH = 5.0f;

    public Canvas canvas1;
    public Face face;
    int i =0;
    int flag = 0;

    private static final int COLOR_CHOICES[] = {
        Color.BLUE,
        Color.CYAN,
        Color.GREEN,
        Color.MAGENTA,
        Color.RED,
        Color.WHITE,
        Color.YELLOW
    };
    private static int mCurrentColorIndex = 0;

    private Paint mFacePositionPaint;
    private Paint mIdPaint;
    private Paint mBoxPaint;

    private volatile Face mFace;
    private int mFaceId;
    private float mFaceHappiness;
    public Bitmap myBitmap ;
    FaceGraphic(GraphicOverlay overlay)
    {
        super(overlay);

        mCurrentColorIndex = (mCurrentColorIndex + 1) % COLOR_CHOICES.length;
        final int selectedColor = COLOR_CHOICES[mCurrentColorIndex];

        mFacePositionPaint = new Paint();
        mFacePositionPaint.setColor(selectedColor);

        mIdPaint = new Paint();
        mIdPaint.setColor(selectedColor);
        mIdPaint.setTextSize(ID_TEXT_SIZE);

        mBoxPaint = new Paint();
        mBoxPaint.setColor(selectedColor);
        mBoxPaint.setStyle(Paint.Style.STROKE);
        mBoxPaint.setStrokeWidth(BOX_STROKE_WIDTH);
    }

    void setId(int id)
    {
        mFaceId = id;
        flag = 1;
    }


    /**
     * Updates the face instance from the detection of the most recent frame.  Invalidates the
     * relevant portions of the overlay to trigger a redraw.
     */
    void updateFace(Face face)
    {
        mFace = face;
        postInvalidate();
    }

    /**
     * Draws the face annotations for position on the supplied canvas.
     */
    @Override
    public void draw(Canvas canvas)
    {
        face = mFace;
        if (face == null)
        {
            return;
        }

        // Draws a circle at the position of the detected face, with the face's track id below.
        float x = translateX(face.getPosition().x + face.getWidth() / 2);
        float y = translateY(face.getPosition().y + face.getHeight() / 2);
 //       canvas.drawCircle(x, y, FACE_POSITION_RADIUS, mFacePositionPaint);
        canvas.drawText("id: " + mFaceId, x + ID_X_OFFSET, y + ID_Y_OFFSET, mIdPaint);
  //      canvas.drawText("happiness: " + String.format("%.2f", face.getIsSmilingProbability()), x - ID_X_OFFSET, y - ID_Y_OFFSET, mIdPaint);
  //      canvas.drawText("right eye: " + String.format("%.2f", face.getIsRightEyeOpenProbability()), x + ID_X_OFFSET * 2, y + ID_Y_OFFSET * 2, mIdPaint);
  //      canvas.drawText("left eye: " + String.format("%.2f", face.getIsLeftEyeOpenProbability()), x - ID_X_OFFSET*2, y - ID_Y_OFFSET*2, mIdPaint);

        // Draws a bounding box around the face.
        float xOffset = scaleX(face.getWidth() / 2.0f);
        float yOffset = scaleY(face.getHeight() / 2.0f);
        float left = x - xOffset;
        float top = y - yOffset;
        float right = x + xOffset;
        float bottom = y + yOffset;
        canvas.drawRect(left, top, right, bottom, mBoxPaint);

        Log.d("MyTag", "hello "+i);
        i++;

        if (flag == 1)
        {
            flag = 0;
            canvas1=canvas;
            // send face image to server for recognition
            new MyAsyncTask().execute("ppppp");

        }
    }


    class MyAsyncTask extends AsyncTask<String, Void, String>
    {
        private Context context;

        public MyAsyncTask()
        {
            // TODO Auto-generated constructor stub
            //context = applicationContext;
        }

        protected String doInBackground(String... params)
        {
            try
            {

                Log.d("MyTag", "face.getWidth() "+face.getWidth());
                Bitmap temp_bitmap = Bitmap.createBitmap((int)face.getWidth(), (int)face.getHeight(), Bitmap.Config.RGB_565);
                canvas1.setBitmap(temp_bitmap);


            }
            catch (Exception e)
            {
                Log.e("MyTag", "I got an error", e);
                e.printStackTrace();
            }
            Log.d("MyTag", "doInBackground");
            return null;
        }

        protected void onPostExecute(String result) {
            Log.d("MyTag", "onPostExecute " + result);
            // tv2.setText(s);

        }

    }

}

它给了我这个错误:

12-16 03:08:00.310 22926-23044/com.google.android.gms.samples.vision.face.facetracker E/MyTag: I got an error
                                                                                               java.lang.UnsupportedOperationException
                                                                                                   at android.view.HardwareCanvas.setBitmap(HardwareCanvas.java:39)
                                                                                                   at com.google.android.gms.samples.vision.face.facetracker.FaceGraphic$MyAsyncTask.doInBackground(FaceGraphic.java:175)
                                                                                                   at com.google.android.gms.samples.vision.face.facetracker.FaceGraphic$MyAsyncTask.doInBackground(FaceGraphic.java:158)

此代码可以实时检测面部。对于识别部分,我打算使用JavaCV&#39; https://github.com/bytedeco/javacv。如果我可以在位图中捕获面部,那么我可以将其保存在.jpg图像中然后我就可以识别它。你能不能给我一些建议,如何保存检测到的脸。谢谢。

1 个答案:

答案 0 :(得分:0)

  

TL; DR:捕获一个Frame,对其进行处理,然后保存/导出。

From the source

@Override
public void setBitmap(Bitmap bitmap) {
    throw new UnsupportedOperationException();
}

这意味着Canvas无法处理setBitmap(Bitmap bitmap)方法

您的执行情况有几个问题。

  

首先:加载了AsynkTask,其中许多是无用/冗余的

如果您使用的是com.google.android.gms.vision.*类,则每秒可能会收到约30个事件。当发生事件时,几乎可以确保所捕获的帧与所评估的帧不同。 You are racing against your conditions

  

第二:使用画布设置位图

在使用类时,请始终检查其文档和祖先,最后是其实现。

ImageView将按照您的期望执行。通过接收位图,然后对其进行设置。所有竞争条件将由OS <处理,而多余的请求将由Main Looper

丢弃
  

最后

如果您需要的是“拍照,当有人闭着眼睛微笑时”,那么您需要颠倒您的逻辑。使用源生成框架。然后处理框架,如果框架符合您的条件,请保存它。

This codelabs project does almost what you want,它很好地解释了其详细信息