视频人脸识别,抖动

时间:2020-07-10 12:47:56

标签: flutter dart mobile

最近我一直在开发Flutter应用程序,我需要在应用程序内部进行面部识别,以捕获视频中记录的面部。但是我找不到任何可以帮助我的地方。

我已经尝试了mlKit,firebase_ml_vision,flutter_face_recognition和其他一些技术,但是在这些方法中,我都没有使Flutter来检测记录的面部。

我的代码,第一个是控制器,第二个是主类

    class FaceDetectorController {

  pickVideo(File video) async {
    final FirebaseVisionImage visionImage = FirebaseVisionImage.fromFile(video);
    print('VisionImage: $video');

    final FaceDetectorOptions faceDetectorOptions = FaceDetectorOptions(
      enableTracking: true,
      enableLandmarks: true,
      enableContours: false,
      enableClassification: true,
      minFaceSize: 0.1,
      mode: FaceDetectorMode.accurate,
    );

    final FaceDetector faceDetector =
        FirebaseVision.instance.faceDetector(faceDetectorOptions);

    faceDetector.processImage(visionImage);

    final List<Face> faces = await faceDetector.processImage(visionImage);
    print('Quantidade de Faces: ${faces.length}');

    for (Face face in faces) {
      var boundingBox = face.boundingBox;
      final double rotY = face.headEulerAngleY;
      final double rotZ = face.headEulerAngleZ;
      print('Rotação do Rosto: $rotY, $rotZ');

      if (face.trackingId != null) {
        final int id = face.trackingId;
        print('Rosto: $id');
      }

      final FaceLandmark leftEar = face.getLandmark(FaceLandmarkType.leftEar);
      if (leftEar != null) {
        var leftEarPos = leftEar.position;
        print('posição da orelha esquerda: ${leftEarPos}');
      }

      if (face.smilingProbability != null) {
        var smileProb = face.smilingProbability;
        print('Sorrindo: $smileProb');
      }

      if (face.leftEyeOpenProbability != null) {
        var leftEyeOpen = face.leftEyeOpenProbability;
        print('Olho Esquerdo aberto: $leftEyeOpen');
      }
      if (face.rightEyeOpenProbability != null) {
        var rightEyeOpen = face.rightEyeOpenProbability;
        print('Olho Direito aberto: $rightEyeOpen');
      }
    }
  }
}

// --------------------------------------------- -----------------------------------------------

import 'dart:io';
import 'package:firebase_ml_vision/firebase_ml_vision.dart';
import 'package:flutter/material.dart';
import 'package:file_picker/file_picker.dart';
import 'package:image_picker/image_picker.dart';
import 'package:flutter_app_face/face_detector_controller.dart';

class MyFaceDetectorView extends StatefulWidget {
  @override
  _MyFaceDetectorViewState createState() => _MyFaceDetectorViewState();
}

class _MyFaceDetectorViewState extends State<MyFaceDetectorView> {
  final _picker = ImagePicker();
  File _imageFile;
  File _videoFile;

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text(
          "Detector de Face",
          style: TextStyle(fontSize: 30),
        ),
        centerTitle: true,
      ),
      body: myFaceView(),
    );
  }

  myFaceView() {
    return SingleChildScrollView(
      child: Container(
        child: Column(
          children: <Widget>[
            Container(
              child: Container(
                  child: Column(
                children: [
                  Padding(
                    padding: EdgeInsets.all(8.0),
                    child: Text(
                      "Selecione um arquivo",
                      textAlign: TextAlign.center,
                      style: TextStyle(fontSize: 20),
                    ),
                  ),
                  Row(
                    mainAxisAlignment: MainAxisAlignment.center,
                    children: [
                      Padding(
                        padding: EdgeInsets.all(10),
                        child: CircleAvatar(
                          child: IconButton(
                            icon: Icon(Icons.video_library),
                            onPressed: () async => pickVideo(),
                          ),
                        ),
                      ),
                    ],
                  ),
                  Padding(
                    padding: EdgeInsets.only(bottom: 18),
                    child: RaisedButton(
                      child: Text("Escanear", style: TextStyle(fontSize: 16)),
                      onPressed: scanner,
                      color: Colors.orange,
                    ),
                  ),
                ],
              )),
            ),
          ],
        ),
      ),
    );
  }

  scanner() {
    List<Face> faces;

    if (faces != null) {
      return _alert();
    } else {
      FaceDetectorController faceDetec = FaceDetectorController();
      faceDetec.pickVideo(_videoFile);
      print("escaneado");
    }
  }

  Future<void> _alert() {
    return showDialog<void>(
      context: context,
      builder: (BuildContext context) {
        return AlertDialog(
          title: Text('Nenhum rosto encontrado'),
          content: const Text(
              'Não encontramos nada no seu arquivo, tente novamente.'),
          actions: <Widget>[
            FlatButton(
              child: Text('Ok'),
              onPressed: () {
                Navigator.of(context).pop();
              },
            ),
          ],
        );
      },
    );
  }

  pickImage() async {
    final PickedFile picFile =
        await _picker.getImage(source: ImageSource.camera);
    setState(() {
      this._imageFile = File(picFile.path);
    });
/*
Padding(
  padding: EdgeInsets.all(10),
  child: CircleAvatar(
    child: IconButton(
        icon: Icon(Icons.insert_photo),
        onPressed: () async => pickImage(),
        ),
      ),
   ),

*/
  }

  pickVideo() async {
    //final videoFile = await FilePicker.getFile(type: FileType.video);
    final PickedFile videoFile = await _picker.getVideo(
      source: ImageSource.gallery,
      maxDuration: Duration(seconds: 5),
    );

    setState(() {
      this._videoFile = File(videoFile.path);
    });
  }
}

0 个答案:

没有答案