使用Vision框架跟踪本地视频中的人脸

时间:2017-06-28 22:02:16

标签: ios cocoa-touch ios11 coreml apple-vision

我正在尝试使用Vision框架检测本地录制视频中的人脸。提供的大多数样本都是在实时摄像头视频中检测面部。

  • 如何在本地视频中进行面部检测,并使用Vision / CoreML框架在运行时在检测到的面部放置一个矩形?

1 个答案:

答案 0 :(得分:4)

  • 等待您的videoItem准备好播放
  • 向其添加输出
  • 添加一个周期性的观察者,该观察者应该在每一帧上进行操作
  • 提取新的像素缓冲区并根据需要在Vision / CoreML中处理它们:
  • 如果您使用视觉框架,则需要使用VNSequenceRequestHandler而不是VNImageRequestHandler

import UIKit
import AVFoundation
import CoreML
import Vision

class ViewController: UIViewController {
  var player: AVPlayer!
  var videoOutput: AVPlayerItemVideoOutput?

  override func viewDidLoad() {
    super.viewDidLoad()

    let player = AVPlayer(url: localURL)
    player.play()

    player.currentItem?.addObserver(
      self,
      forKeyPath: #keyPath(AVPlayerItem.status),
      options: [.initial, .old, .new],
      context: nil)
    player.addPeriodicTimeObserver(
      forInterval: CMTime(value: 1, timescale: 30),
      queue: DispatchQueue(label: "videoProcessing", qos: .background),
      using: { time in
        self.doThingsWithFaces()
    })
    self.player = player
  }

  override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
    guard let keyPath = keyPath, let item = object as? AVPlayerItem
      else { return }

    switch keyPath {
    case #keyPath(AVPlayerItem.status):
      if item.status == .readyToPlay {
        self.setUpOutput()
      }
      break
    default: break
    }
  }

  func setUpOutput() {
    guard self.videoOutput == nil else { return }
    let videoItem = player.currentItem!
    if videoItem.status != AVPlayerItemStatus.readyToPlay {
      // see https://forums.developer.apple.com/thread/27589#128476
      return
    }

    let pixelBuffAttributes = [
      kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,
      ] as [String: Any]

    let videoOutput = AVPlayerItemVideoOutput(pixelBufferAttributes: pixelBuffAttributes)
    videoItem.add(videoOutput)
    self.videoOutput = videoOutput
  }

  func getNewFrame() -> CVPixelBuffer? {
    guard let videoOutput = videoOutput, let currentItem = player.currentItem else { return nil }

    let time = currentItem.currentTime()
    if !videoOutput.hasNewPixelBuffer(forItemTime: time) { return nil }
    guard let buffer = videoOutput.copyPixelBuffer(forItemTime: time, itemTimeForDisplay: nil)
      else { return nil }
    return buffer
  }

  func doThingsWithFaces() {
    guard let buffer = getNewFrame() else { return }
    // some CoreML / Vision things on that.
    // There are numerous examples with this
  }
}