我使用名为NextLevel的媒体捕获库,它在每个帧上吐出CMSampleBuffer
。我想使用此缓冲区并通过rawDataInput
将其提供给GPUImage2并将其传递给某些过滤器并从链末端的rawDataOutput
读回...
CMSampleBuffer bytes - > rawDataInput - > someFilter - > someotherFilter - > rawDataOutput - >为其他东西制作一个CVPixelBuffer。
问题是,如何将CMSampleBuffer转换为UInt8数组 这样rawDataInput就可以接收它。
我有以下代码,但它的速度非常慢......框架一直通过链条到rawDataOuput
。dataAvailableCallback
,但速度慢到每秒1帧。我在网上发现了这个代码,不知道它在数学上是做什么的,但我想它效率很低。
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let lumaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)
let chromaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let lumaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0)
let chromaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1)
let lumaBuffer = lumaBaseAddress?.assumingMemoryBound(to: UInt8.self)
let chromaBuffer = chromaBaseAddress?.assumingMemoryBound(to: UInt8.self)
var rgbaImage = [UInt8](repeating: 0, count: 4*width*height)
for x in 0 ..< width {
for y in 0 ..< height {
let lumaIndex = x+y*lumaBytesPerRow
let chromaIndex = (y/2)*chromaBytesPerRow+(x/2)*2
let yp = lumaBuffer?[lumaIndex]
let cb = chromaBuffer?[chromaIndex]
let cr = chromaBuffer?[chromaIndex+1]
let ri = Double(yp!) + 1.402 * (Double(cr!) - 128)
let gi = Double(yp!) - 0.34414 * (Double(cb!) - 128) - 0.71414 * (Double(cr!) - 128)
let bi = Double(yp!) + 1.772 * (Double(cb!) - 128)
let r = UInt8(min(max(ri,0), 255))
let g = UInt8(min(max(gi,0), 255))
let b = UInt8(min(max(bi,0), 255))
rgbaImage[(x + y * width) * 4] = b
rgbaImage[(x + y * width) * 4 + 1] = g
rgbaImage[(x + y * width) * 4 + 2] = r
rgbaImage[(x + y * width) * 4 + 3] = 255
}
}
self.rawInput.uploadBytes(rgbaImage, size: Size.init(width: Float(width), height: Float(height)), pixelFormat: PixelFormat.rgba)
CVPixelBufferUnlockBaseAddress( pixelBuffer, CVPixelBufferLockFlags(rawValue: 0) );
更新1
我使用名为NextLevel的相机库来检索相机帧(CMSampleBuffer)并将它们提供给过滤链,在本例中为RawDataInput,通过一个UInt8字节数组。因为NextLevel在可能的情况下使用亮度/色度,我评论https://github.com/NextLevel/NextLevel/blob/master/Sources/NextLevel.swift#L1106中的5行作为@rythmic fishman评论。但是上面的代码会破坏,所以我用以下代码替换它。
let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0));
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let int8Buffer = CVPixelBufferGetBaseAddress(pixelBuffer)?.assumingMemoryBound(to: UInt8.self)
var rgbaImage = [UInt8](repeating: 0, count: 4*width*height)
for i in 0 ..< (width*height*4){
rgbaImage[i] = UInt8((int8Buffer?[i])!)
}
self.rawInput.uploadBytes(rgbaImage, size: Size.init(width: Float(width), height: Float(height)), pixelFormat: PixelFormat.rgba)
CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0))
当NextLevel不使用亮度/色度时,此代码有效,但是当使用GPUImage RenderView在过滤链的末尾显示帧时帧仍然非常慢。
更新2
所以我决定在GPUImage2的Camera.swift中创建一个自定义的RawDataInput.swift。因为Camera类以CMSampleBuffer格式从本机相机中获取帧,所以我认为..好NextLevel正在抛出完全相同的缓冲区,我可以复制GPUImage2 Camera类的实现并删除我不需要的所有内容,只留下1个方法,接收CMSampleBuffer并处理它。结果证明它完美无缺。 除了...有一个滞后(没有丢帧,只是滞后)。我不知道瓶颈在哪里,我正在阅读处理/修改来自本机相机的CMSampleBuffers然后显示它们......可能导致此问题中提到的延迟:How to keep low latency during the preview of video coming from AVFoundation?
我制作了一段关于我遇到的滞后的视频...... https://www.youtube.com/watch?v=5DQRnOTi4wk
顶角预览来自NextLevel的'previewLayer: AVCaptureVideoPreviewLayer
',过滤后的预览是链末端的GPUImage2 Renderview ..在iPhone 6中以1920px分辨率和7个滤镜运行。 GPUImage2 Camera类不会发生这种延迟。
这是我放在一起的自定义RawDataInput。
#if os(Linux)
#if GLES
import COpenGLES.gles2
#else
import COpenGL
#endif
#else
#if GLES
import OpenGLES
#else
import OpenGL.GL3
#endif
#endif
import AVFoundation
public enum PixelFormat {
case bgra
case rgba
case rgb
case luminance
func toGL() -> Int32 {
switch self {
case .bgra: return GL_BGRA
case .rgba: return GL_RGBA
case .rgb: return GL_RGB
case .luminance: return GL_LUMINANCE
}
}
}
// TODO: Replace with texture caches where appropriate
public class RawDataInput: ImageSource {
public let targets = TargetContainer()
let frameRenderingSemaphore = DispatchSemaphore(value:1)
let cameraProcessingQueue = DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default)
let captureAsYUV:Bool = true
let yuvConversionShader:ShaderProgram?
var supportsFullYUVRange:Bool = false
public init() {
if captureAsYUV {
supportsFullYUVRange = false
let videoOutput = AVCaptureVideoDataOutput()
let supportedPixelFormats = videoOutput.availableVideoCVPixelFormatTypes
for currentPixelFormat in supportedPixelFormats! {
if ((currentPixelFormat as! NSNumber).int32Value == Int32(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)) {
supportsFullYUVRange = true
}
}
if (supportsFullYUVRange) {
yuvConversionShader = crashOnShaderCompileFailure("Camera"){try sharedImageProcessingContext.programForVertexShader(defaultVertexShaderForInputs(2), fragmentShader:YUVConversionFullRangeFragmentShader)}
} else {
yuvConversionShader = crashOnShaderCompileFailure("Camera"){try sharedImageProcessingContext.programForVertexShader(defaultVertexShaderForInputs(2), fragmentShader:YUVConversionVideoRangeFragmentShader)}
}
} else {
yuvConversionShader = nil
}
}
public func uploadPixelBuffer(_ cameraFrame: CVPixelBuffer ) {
guard (frameRenderingSemaphore.wait(timeout:DispatchTime.now()) == DispatchTimeoutResult.success) else { return }
let bufferWidth = CVPixelBufferGetWidth(cameraFrame)
let bufferHeight = CVPixelBufferGetHeight(cameraFrame)
CVPixelBufferLockBaseAddress(cameraFrame, CVPixelBufferLockFlags(rawValue:CVOptionFlags(0)))
sharedImageProcessingContext.runOperationAsynchronously{
let cameraFramebuffer:Framebuffer
let luminanceFramebuffer:Framebuffer
let chrominanceFramebuffer:Framebuffer
if sharedImageProcessingContext.supportsTextureCaches() {
var luminanceTextureRef:CVOpenGLESTexture? = nil
let _ = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, sharedImageProcessingContext.coreVideoTextureCache, cameraFrame, nil, GLenum(GL_TEXTURE_2D), GL_LUMINANCE, GLsizei(bufferWidth), GLsizei(bufferHeight), GLenum(GL_LUMINANCE), GLenum(GL_UNSIGNED_BYTE), 0, &luminanceTextureRef)
let luminanceTexture = CVOpenGLESTextureGetName(luminanceTextureRef!)
glActiveTexture(GLenum(GL_TEXTURE4))
glBindTexture(GLenum(GL_TEXTURE_2D), luminanceTexture)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_CLAMP_TO_EDGE)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_CLAMP_TO_EDGE)
luminanceFramebuffer = try! Framebuffer(context:sharedImageProcessingContext, orientation:.portrait, size:GLSize(width:GLint(bufferWidth), height:GLint(bufferHeight)), textureOnly:true, overriddenTexture:luminanceTexture)
var chrominanceTextureRef:CVOpenGLESTexture? = nil
let _ = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, sharedImageProcessingContext.coreVideoTextureCache, cameraFrame, nil, GLenum(GL_TEXTURE_2D), GL_LUMINANCE_ALPHA, GLsizei(bufferWidth / 2), GLsizei(bufferHeight / 2), GLenum(GL_LUMINANCE_ALPHA), GLenum(GL_UNSIGNED_BYTE), 1, &chrominanceTextureRef)
let chrominanceTexture = CVOpenGLESTextureGetName(chrominanceTextureRef!)
glActiveTexture(GLenum(GL_TEXTURE5))
glBindTexture(GLenum(GL_TEXTURE_2D), chrominanceTexture)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_CLAMP_TO_EDGE)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_CLAMP_TO_EDGE)
chrominanceFramebuffer = try! Framebuffer(context:sharedImageProcessingContext, orientation:.portrait, size:GLSize(width:GLint(bufferWidth / 2), height:GLint(bufferHeight / 2)), textureOnly:true, overriddenTexture:chrominanceTexture)
} else {
glActiveTexture(GLenum(GL_TEXTURE4))
luminanceFramebuffer = sharedImageProcessingContext.framebufferCache.requestFramebufferWithProperties(orientation:.portrait, size:GLSize(width:GLint(bufferWidth), height:GLint(bufferHeight)), textureOnly:true)
luminanceFramebuffer.lock()
glBindTexture(GLenum(GL_TEXTURE_2D), luminanceFramebuffer.texture)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_LUMINANCE, GLsizei(bufferWidth), GLsizei(bufferHeight), 0, GLenum(GL_LUMINANCE), GLenum(GL_UNSIGNED_BYTE), CVPixelBufferGetBaseAddressOfPlane(cameraFrame, 0))
glActiveTexture(GLenum(GL_TEXTURE5))
chrominanceFramebuffer = sharedImageProcessingContext.framebufferCache.requestFramebufferWithProperties(orientation:.portrait, size:GLSize(width:GLint(bufferWidth / 2), height:GLint(bufferHeight / 2)), textureOnly:true)
chrominanceFramebuffer.lock()
glBindTexture(GLenum(GL_TEXTURE_2D), chrominanceFramebuffer.texture)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_LUMINANCE_ALPHA, GLsizei(bufferWidth / 2), GLsizei(bufferHeight / 2), 0, GLenum(GL_LUMINANCE_ALPHA), GLenum(GL_UNSIGNED_BYTE), CVPixelBufferGetBaseAddressOfPlane(cameraFrame, 1))
}
cameraFramebuffer = sharedImageProcessingContext.framebufferCache.requestFramebufferWithProperties(orientation:.portrait, size:luminanceFramebuffer.sizeForTargetOrientation(.portrait), textureOnly:false)
let conversionMatrix:Matrix3x3
if (self.supportsFullYUVRange) {
conversionMatrix = colorConversionMatrix601FullRangeDefault
} else {
conversionMatrix = colorConversionMatrix601Default
}
convertYUVToRGB(shader:self.yuvConversionShader!, luminanceFramebuffer:luminanceFramebuffer, chrominanceFramebuffer:chrominanceFramebuffer, resultFramebuffer:cameraFramebuffer, colorConversionMatrix:conversionMatrix)
//ONLY RGBA
//let cameraFramebuffer:Framebuffer = sharedImageProcessingContext.framebufferCache.requestFramebufferWithProperties(orientation:.portrait, size:GLSize(width:GLint(bufferWidth), height:GLint(bufferHeight)), textureOnly:true)
//glBindTexture(GLenum(GL_TEXTURE_2D), cameraFramebuffer.texture)
//glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, GLsizei(bufferWidth), GLsizei(bufferHeight), 0, GLenum(GL_BGRA), GLenum(GL_UNSIGNED_BYTE), CVPixelBufferGetBaseAddress(cameraFrame))
CVPixelBufferUnlockBaseAddress(cameraFrame, CVPixelBufferLockFlags(rawValue:CVOptionFlags(0)))
self.updateTargetsWithFramebuffer(cameraFramebuffer)
self.frameRenderingSemaphore.signal()
}
}
public func uploadBytes(_ bytes:[UInt8], size:Size, pixelFormat:PixelFormat, orientation:ImageOrientation = .portrait) {
let dataFramebuffer = sharedImageProcessingContext.framebufferCache.requestFramebufferWithProperties(orientation:orientation, size:GLSize(size), textureOnly:true, internalFormat:pixelFormat.toGL(), format:pixelFormat.toGL())
glActiveTexture(GLenum(GL_TEXTURE1))
glBindTexture(GLenum(GL_TEXTURE_2D), dataFramebuffer.texture)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, size.glWidth(), size.glHeight(), 0, GLenum(pixelFormat.toGL()), GLenum(GL_UNSIGNED_BYTE), bytes)
updateTargetsWithFramebuffer(dataFramebuffer)
}
public func transmitPreviousImage(to target:ImageConsumer, atIndex:UInt) {
// TODO: Determine if this is necessary for the raw data uploads
// if let buff = self.dataFramebuffer {
// buff.lock()
// target.newFramebufferAvailable(buff, fromSourceIndex:atIndex)
// }
}
}
我只是不明白为什么会出现这种延迟,如果它与GPUImage2 Camera类没什么不同。 NextLevel没有对这些帧进行任何其他处理,它只是将它们传递过来,为什么延迟?