OpenCV(4.2.0)C:\ projects \ opencv-python \ opencv \ modules \ videoio \ src \ cap_images.cpp:253:错误:(-5:错误的参数)CAP_IMAGES:找不到起始编号(在文件名中):函数'cv :: icvExtractPattern'中的head_pose_poc.webm
[错误-系统]无法从源文件中读取:head_pose_poc.webm
我正在尝试使用opencv和dlib运行头部姿势估计,但出现错误。我正在使用Windows,并在命令提示符下出现此错误。我不知道这有什么问题。这是代码:
#!/usr/bin/env python3
import os
import cv2
import sys
import dlib
import argparse
import numpy as np
# helper modules
from drawFace import draw
import reference_world as world
PREDICTOR_PATH = os.path.join("models", "shape_predictor_68_face_landmarks.dat")
if not os.path.isfile(PREDICTOR_PATH):
print("[ERROR] USE models/downloader.sh to download the predictor")
sys.exit()
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--focal",
type=float,
help="Callibrated Focal Length of the camera")
args = parser.parse_args()
def main(source=0):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
cap = cv2.VideoCapture(source)
while True:
GAZE = "Face Not Found"
ret, img = cap.read()
if not ret:
print(f"[ERROR - System]Cannot read from source: {source}")
break
faces = detector(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
face3Dmodel = world.ref3DModel()
for face in faces:
shape = predictor(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), face)
draw(img, shape)
refImgPts = world.ref2dImagePoints(shape)
height, width, channels = img.shape
focalLength = (args.focal * width)
cameraMatrix = world.cameraMatrix(focalLength, (height / 2, width / 2))
mdists = np.zeros((4, 1), dtype=np.float64)
# calculate rotation and translation vector using solvePnP
success, rotationVector, translationVector = cv2.solvePnP(
face3Dmodel, refImgPts, cameraMatrix, mdists)
noseEndPoints3D = np.array([[0, 0, 1000.0]], dtype=np.float64)
noseEndPoint2D, jacobian = cv2.projectPoints(
noseEndPoints3D, rotationVector, translationVector, cameraMatrix, mdists)
# draw nose line
p1 = (int(refImgPts[0, 0]), int(refImgPts[0, 1]))
p2 = (int(noseEndPoint2D[0, 0, 0]), int(noseEndPoint2D[0, 0, 1]))
cv2.line(img, p1, p2, (110, 220, 0),
thickness=2, lineType=cv2.LINE_AA)
# calculating euler angles
rmat, jac = cv2.Rodrigues(rotationVector)
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
if angles[1] < -15:
GAZE = "Looking: Left"
elif angles[1] > 15:
GAZE = "Looking: Right"
else:
GAZE = "Forward"
cv2.putText(img, GAZE, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 80), 2)
cv2.imshow("Head Pose", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main("head_pose_poc.webm")
答案 0 :(得分:0)
更改此:
import { Component, OnInit } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { BehaviorSubject, combineLatest, forkJoin, from, fromEvent,
interval, Observable, of, zip } from 'rxjs';
import { delay, map, take } from 'rxjs/operators';
@Component({
selector: 'my-app',
template: `
RXJS EXAMPLE. See Console
<button (click)="test1()"> click 1</button>
<button (click)="test2()"> click 2</button>
<div *ngIf="data">
{{data}}
</div>
`,
})
export class AppComponent {
private source1 = new BehaviorSubject<number>(1);
obs1$ = this.source1.asObservable();
private source2 = new BehaviorSubject<number>(1);
obs2$ = this.source2.asObservable();
test1(){
var randomVal = Math.floor(Math.random() * 10)*2;
this.source1.next(randomVal);
this.source1.complete();
}
test2(){
var randomVal = Math.floor(Math.random() * 10);
this.source2.next(randomVal);
this.source2.complete();
}
constructor() {
forkJoin([ this.obs1$, this.obs2$])
.subscribe(res => {
this.data = res; });
}}
至:
if __name__ == "__main__": main("head_pose_poc.webm")