我收到了一个进行面部识别的项目,该项目由用户录制在视频中,以避免欺诈。用户录制了一个+/- 5秒的视频,该代码取了他的脸,并将其发送到银行。事实证明,我的代码给出了一个非常奇怪的错误。该代码从库中获取视频,但将其保存为.jpg,并且在您按下“扫描”时出现此错误:
E/flutter (10653): [ERROR:flutter/lib/ui/ui_dart_state.cc(166)] Unhandled Exception: 'package:firebase_ml_vision/src/firebase_vision.dart': Failed assertion: line 111 pos 12: 'imageFile != null': is not true.
我四处张望,却找不到任何帮助。
import 'dart:io';
import 'package:flutter/material.dart';
import 'package:file_picker/file_picker.dart';
import 'package:image_picker/image_picker.dart';
import 'package:firebase_ml_vision/firebase_ml_vision.dart';
import 'package:flutter_app_face/face_detector_controller.dart';
class MyFaceDetectorView extends StatefulWidget {
@override
_MyFaceDetectorViewState createState() => _MyFaceDetectorViewState();
}
class _MyFaceDetectorViewState extends State<MyFaceDetectorView> {
final _picker = ImagePicker();
File _videoFile;
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text(
"Face Detector",
style: TextStyle(fontSize: 30),
),
centerTitle: true,
),
body: myFaceView(),
);
}
myFaceView() {
return SingleChildScrollView(
child: Container(
child: Column(
children: <Widget>[
Container(
child: Container(
child: Column(
children: [
Padding(
padding: EdgeInsets.all(8.0),
child: Text(
"Select a file",
textAlign: TextAlign.center,
style: TextStyle(fontSize: 20),
),
),
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
Padding(
padding: EdgeInsets.all(10),
child: CircleAvatar(
child: IconButton(
icon: Icon(Icons.video_library),
onPressed: () async => await pickVideo(),
),
),
),
],
),
Padding(
padding: EdgeInsets.only(bottom: 18),
child: RaisedButton(
child: Text("Scan", style: TextStyle(fontSize: 16)),
onPressed: scanner,
color: Colors.orange,
),
),
],
)),
),
],
),
),
);
}
scanner() {
List<Face> faces;
if (faces != null) {
return _alert();
} else {
FaceDetectorController faceDetec = FaceDetectorController();
faceDetec.pickVideo(_videoFile);
print("scanned");
}
}
Future<void> _alert() {
return showDialog<void>(
context: context,
builder: (BuildContext context) {
return AlertDialog(
title: Text('No faces found'),
content: const Text(
'We didn't find anything in your file, please try again.'),
actions: <Widget>[
FlatButton(
child: Text('Ok'),
onPressed: () {
Navigator.of(context).pop();
},
),
],
);
},
);
}
pickVideo() async {
final PickedFile videoFile = await _picker.getVideo(
source: ImageSource.gallery,
maxDuration: Duration(seconds: 5),
);
setState(() {
this._videoFile = File(videoFile.path);
});
print("Finish");
}
}
控制器:
import 'dart:io';
import 'package:firebase_ml_vision/firebase_ml_vision.dart';
class FaceDetectorController {
pickVideo(File video) async {
final FirebaseVisionImage visionImage = FirebaseVisionImage.fromFile(video);
print('VisionImage: $video');
final FaceDetectorOptions faceDetectorOptions = FaceDetectorOptions(
enableTracking: true,
enableLandmarks: true,
enableContours: false,
enableClassification: true,
minFaceSize: 0.1,
mode: FaceDetectorMode.accurate,
);
final FaceDetector faceDetector =
FirebaseVision.instance.faceDetector(faceDetectorOptions);
faceDetector.processImage(visionImage);
final List<Face> faces = await faceDetector.processImage(visionImage);
print('Number of Faces: ${faces.length}');
for (Face face in faces) {
var boundingBox = face.boundingBox;
final double rotY = face.headEulerAngleY;
final double rotZ = face.headEulerAngleZ;
print('Face Rotation: $rotY, $rotZ');
if (face.trackingId != null) {
final int id = face.trackingId;
print('Face: $id');
}
final FaceLandmark leftEar = face.getLandmark(FaceLandmarkType.leftEar);
if (leftEar != null) {
var leftEarPos = leftEar.position;
print('left ear position: ${leftEarPos}');
}
if (face.smilingProbability != null) {
var smileProb = face.smilingProbability;
print('Smile: $smileProb');
}
if (face.leftEyeOpenProbability != null) {
var leftEyeOpen = face.leftEyeOpenProbability;
print('Left eye open: $leftEyeOpen');
}
if (face.rightEyeOpenProbability != null) {
var rightEyeOpen = face.rightEyeOpenProbability;
print('Right eye open: $rightEyeOpen');
}
}
}
}
答案 0 :(得分:0)
您是否尝试过python的deepface软件包?它会访问您的网络摄像头并实时应用人脸识别。
List<Student> _firebaseStudentsFromSnapshot(QuerySnapshot snapshot) {
return snapshot.documents.map((doc) {
List<Mark> marks = [];
List<dynamic> markMap = doc.data['grades'];
markMap.forEach((element) {
marks.add(new Mark(
mark = element['mark'],
grade = element['grade'],
markedBy = element['markedBy'],
feedback = element['feedback'];
));
});
return Student(
name: doc.data['name'] ?? '',
subject: doc.data['subject'] ?? '',
marks: marks,
);
}).toList();
}
在这里,您需要将面部图像数据库存储在C:/ my_db文件夹中。