如何在Swift中处理现有视频的帧

时间:2017-06-27 14:57:05

标签: ios iphone swift avfoundation

目前我正在尝试使用OpenCV处理现有视频的帧。是否有任何AV阅读器库包含在播放视频时处理帧的委托方法?我知道如何通过使用AVCaptureVideoDataOutput和captureOutput委托方法在实时AVCaptureSession期间处理帧。播放视频有类似的东西吗?

任何帮助都会受到谴责。

4 个答案:

答案 0 :(得分:1)

这是解决方案。感谢Tim Bull的回答,我使用AVAssetReader / AssetReaderOutput

完成了此任务

我在按钮内调用的以下函数单击以开始视频,并开始使用OpenCV处理每一帧:

func processVids() {
    guard let pathOfOrigVid = Bundle.main.path(forResource: "output_10_34_34", ofType: "mp4") else{
        print("video.m4v not found\n")
        exit(0)
    }
    
    var path: URL? = nil

    do{
        path = try FileManager.default.url(for: .documentDirectory, in:.userDomainMask, appropriateFor: nil, create: false)
        path = path?.appendingPathComponent("grayVideo.mp4")
    }catch{
        print("Unable to make URL to Movies path\n")
        exit(0)
    }
    
    
    let movie: AVURLAsset = AVURLAsset(url: NSURL(fileURLWithPath: pathOfOrigVid) as URL, options: nil)
    let tracks: [AVAssetTrack] = movie.tracks(withMediaType: AVMediaTypeVideo)
    let track: AVAssetTrack = tracks[0]
    var reader: AVAssetReader? = nil
    do{
        reader = try AVAssetReader(asset: movie)
    }
    catch{
        print("Problem initializing AVReader\n")
    }
    
    let settings : [String: Any?] = [
        String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32ARGB),
        String(kCVPixelBufferIOSurfacePropertiesKey): [:]
    ]

    let rout: AVAssetReaderTrackOutput = AVAssetReaderTrackOutput(track: track, outputSettings: settings)
    reader?.add(rout)
    reader?.startReading()
    
    
    DispatchQueue.global().async(execute: {
        while reader?.status == AVAssetReaderStatus.reading {
            if(rout.copyNextSampleBuffer() != nil){

                // Buffer of the frame to perform OpenCV processing on
                let sbuff: CMSampleBuffer = rout.copyNextSampleBuffer()!    
                
            }
            usleep(10000)
        }
        
    })
   
}

答案 1 :(得分:0)

AVAssetReader / AVAssetReaderOutput是您正在寻找的。查看CopyNextSampleBuffer方法。

https://developer.apple.com/documentation/avfoundation/avassetreaderoutput

答案 2 :(得分:0)

您可以使用AVVideoComposition

如果您想使用CoreImage处理框架,可以通过调用init(asset:applyingCIFiltersWithHandler:)方法创建实例。

或者您可以创建自定义comopsitor

  

您可以通过实施自己的自定义视频合成器来实现   AVVideoCompositing协议;提供自定义视频合成器   在播放期间为每个视频源提供像素缓冲区   其他操作,可以执行任意图形操作   它们是为了产生视觉输出。

有关详细信息,请参阅docsHere你可以找到一个例子(但是例子是在Objective-C中)。

答案 3 :(得分:0)

对于需要通过OpenCV处理视频帧的人。

解码视频:

@objc public protocol ARVideoReaderDelegate : NSObjectProtocol {
    func reader(_ reader:ARVideoReader!, newFrameReady sampleBuffer:CMSampleBuffer?, _ frameCount:Int)
    func readerDidFinished(_ reader:ARVideoReader!, totalFrameCount:Int)
}
@objc open class ARVideoReader: NSObject {
    var _asset: AVURLAsset!
    @objc var _delegate: ARVideoReaderDelegate?

    @objc public init!(urlAsset asset:AVURLAsset){
        _asset = asset
        super.init()
    }

    @objc open func startReading() -> Void {
        if let reader = try? AVAssetReader.init(asset: _asset){

            let videoTrack = _asset.tracks(withMediaType: .video).compactMap{ $0 }.first;
            let options = [kCVPixelBufferPixelFormatTypeKey : Int(kCVPixelFormatType_32BGRA)]
            let readerOutput = AVAssetReaderTrackOutput.init(track: videoTrack!, outputSettings: options as [String : Any])
            reader.add(readerOutput)
            reader.startReading()

            var count = 0
            //reading
            while (reader.status == .reading && videoTrack?.nominalFrameRate != 0){
                let sampleBuffer = readerOutput.copyNextSampleBuffer()
                _delegate?.reader(self, newFrameReady: sampleBuffer, count)
                count = count+1;
            }
            _delegate?.readerDidFinished(self,totalFrameCount: count)
        }
    }
}

在委托的回调中:

//convert sampleBuffer to cv::Mat
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
char *baseBuffer = (char*)CVPixelBufferGetBaseAddress(imageBuffer);

cv::Mat cvImage = cv::Mat((int)height,(int)width,CV_8UC3);

cv::MatIterator_<cv::Vec3b> it_start = cvImage.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> it_end = cvImage.end<cv::Vec3b>();
long cur = 0;
size_t padding = CVPixelBufferGetBytesPerRow(imageBuffer) - width*4;
size_t offset = padding;
while (it_start != it_end) {
    //opt pixel
    long p_idx = cur*4 + offset;
    char b = baseBuffer[p_idx];
    char g = baseBuffer[p_idx + 1];
    char r = baseBuffer[p_idx + 2];
    cv::Vec3b newpixel(b,g,r);
    *it_start = newpixel;
    cur++;
    it_start++;
    if (cur%width == 0) {
        offset = offset + padding;
    }
}
CVPixelBufferUnlockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
//process cvImage now