我正在从ARKit捕获帧并从中获取CVPixelBuffer
func session(_ session: ARSession, didUpdate frame: ARFrame) {
if self.detectionFrame != nil {
return
}
self.detectionFrame = frame
// Retain the image buffer for Vision processing.
let pixelBuffer = frame.capturedImage
DispatchQueue.global().async {
self.recognizeText(from: pixelBuffer)
}
}
在recognizeText
中,我将初始化Tesseract,并将图像转换为UIImage
后传递图像。
func recognizeText(from image:CVPixelBuffer){
// 1
if let tesseract = MGTesseract(language: "jpn+jpn_vert") {
// 2
tesseract.engineMode = .tesseractCubeCombined
// 3
tesseract.pageSegmentationMode = .auto
// 4
let ciImage = CIImage(cvPixelBuffer: image)
tesseract.image = UIImage(ciImage: ciImage)
// 5
tesseract.recognize()
// 6
let text = tesseract.recognizedText
print(text ?? "")
}
}
此结果始终位于
Thread 15: EXC_BAD_ACCESS (code=1, address=0x0)
在
- (Pix *)pixForImage:(UIImage *)image
{
int width = image.size.width;
int height = image.size.height;
CGImage *cgImage = image.CGImage;
CFDataRef imageData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage));
const UInt8 *pixels = CFDataGetBytePtr(imageData); <<< EXC_BAD_ACCESS
size_t bitsPerPixel = CGImageGetBitsPerPixel(cgImage);
size_t bytesPerPixel = bitsPerPixel / 8;
size_t bytesPerRow = CGImageGetBytesPerRow(cgImage);
我在做什么错了?
答案 0 :(得分:0)
找到丢失的部分,要将缓冲区转换为UIImage,您需要提供CIContext和缓冲区大小
let ciImage = CIImage(cvPixelBuffer: pixBuffer)
let ciContext = CIContext(options: nil)
if let videoImage = ciContext.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixBuffer), height: CVPixelBufferGetHeight(pixBuffer))) {
self.prcessedImage = UIImage(cgImage: videoImage )
tesseract.image = self.prcessedImage
// 5
tesseract.recognize()
// 6
let text = tesseract.recognizedText
print(text ?? "")
}