我正在通过对等iOS设备获取jpeg数据流。我以以下方式使用CoreImage和Metal + MTKView处理并显示它。我收到jpeg数据并将其转换为CIImage。接下来,我在CIImage上应用适当的转换并将其渲染到CVPixelBuffer。然后,我通过直通着色器将此CVPixelBuffer显示给MTKView(金属处理和MTKView不使用CIImage代码部分使用的ciContext)。然后,我将像素缓冲区转换为YUV,进行一些图像处理,并在MTKView中也显示该数据。问题是它有时会变得过于缓慢和缓慢。下面的代码中是否存在使用CoreImage和Metal引起的瓶颈?如何使这个管道更好?
private func handleImageData(_ data:Data) {
DispatchQueue.main.async {
if let image = CIImage(data: data) {
self.displayImage(image)
}
}
}
private func displayImage(_ image:CIImage) {
let transformFilter = CIFilter(name: "CIAffineTransform")!
transformFilter.setValue(image, forKey: "inputImage")
transformFilter.setValue(NSValue(cgAffineTransform: self.framesTransform), forKey: "inputTransform")
let sourceImage = transformFilter.value(forKey: "outputImage") as! CIImage
//Render CIImage to pixel buffer
var pixelBuffer:CVPixelBuffer? = nil
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, self.ciPixelBufferPool!, &pixelBuffer)
if let pixBuf = pixelBuffer {
self.ciContext.render(sourceImage, to: pixBuf)
self.displayPixelBufferOnMTKView(pixBuf)
//Convert the pixel buffer to YUV as some filters operate only on YUV
if let yuvPixelBuffer = self.rgb2yuvFrameRenderer?.copyRenderedPixelBuffer(pixBuf) {
self.processYUVPixelBuffer(yuvPixelBuffer)
}
}
}
这里是Metal着色器代码:
// Compute kernel
kernel void kernelRGBtoYUV(texture2d<half, access::read> inputTexture [[ texture(0) ]],
texture2d<half, access::write> textureY [[ texture(1) ]],
texture2d<half, access::write> textureCbCr [[ texture(2) ]],
constant ColorConversion &colorConv [[ buffer(0) ]],
uint2 gid [[thread_position_in_grid]])
{
// Make sure we don't read or write outside of the texture
if ((gid.x >= inputTexture.get_width()) || (gid.y >= inputTexture.get_height())) {
return;
}
float3 inputColor = float3(inputTexture.read(gid).rgb);
float3 yuv = colorConv.matrix*inputColor + colorConv.offset;
half4 uv = half4(yuv.gbrr);
textureY.write(half(yuv.x), gid);
if (gid.x % 2 == 0 && gid.y % 2 == 0) {
textureCbCr.write(uv, uint2(gid.x / 2, gid.y / 2));
}
}