CoreImage / CoreVideo中的内存泄漏

时间:2012-06-26 09:51:10

标签: ios memory-leaks core-image core-video

我正在构建一个可以进行基本检测的iOS应用。 我从AVCaptureVideoDataOutput获取原始帧,将CMSampleBufferRef转换为UIImage,调整UIImage的大小,然后将其转换为CVPixelBufferRef。 据我所知,使用Instruments可以检测到泄漏是我将CGImage转换为CVPixelBufferRef的最后一部分。

这是我使用的代码:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{
    videof = [[ASMotionDetect alloc] initWithSampleImage:[self resizeSampleBuffer:sampleBuffer]];
    // ASMotionDetect is my class for detection and I use videof to calculate the movement
}

-(UIImage*)resizeSampleBuffer:(CMSampleBufferRef) sampleBuffer {
    UIImage *img;
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    CVPixelBufferLockBaseAddress(imageBuffer,0);        // Lock the image buffer 

    uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);   // Get information of the image 
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
    size_t width = CVPixelBufferGetWidth(imageBuffer); 
    size_t height = CVPixelBufferGetHeight(imageBuffer); 
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

    CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); 
    CGImageRef newImage = CGBitmapContextCreateImage(newContext); 
    CGContextRelease(newContext); 

    CGColorSpaceRelease(colorSpace); 
    CVPixelBufferUnlockBaseAddress(imageBuffer,0); 
    /* CVBufferRelease(imageBuffer); */  // do not call this!

    img = [UIImage imageWithCGImage:newImage];
    CGImageRelease(newImage);
    newContext = nil;
    img = [self resizeImageToSquare:img];
    return img;
}

-(UIImage*)resizeImageToSquare:(UIImage*)_temp {
    UIImage *img;
    int w = _temp.size.width;
    int h = _temp.size.height;
    CGRect rect;
    if (w>h) {
        rect = CGRectMake((w-h)/2,0,h,h);
    } else {
        rect = CGRectMake(0, (h-w)/2, w, w);
    }
    //
    img = [self crop:_temp inRect:rect];
    return img;
}

-(UIImage*) crop:(UIImage*)image inRect:(CGRect)rect{
    UIImage *sourceImage = image;
    CGRect selectionRect = rect;
    CGRect transformedRect = TransformCGRectForUIImageOrientation(selectionRect, sourceImage.imageOrientation, sourceImage.size);
    CGImageRef resultImageRef = CGImageCreateWithImageInRect(sourceImage.CGImage, transformedRect);
    UIImage *resultImage = [[UIImage alloc] initWithCGImage:resultImageRef scale:1.0 orientation:image.imageOrientation];
    CGImageRelease(resultImageRef);
    return resultImage;
}

在我的检测课上,我有:

- (id)initWithSampleImage:(UIImage*)sampleImage {
  if ((self = [super init])) {
    _frame = new CVMatOpaque();
    _histograms = new CVMatNDOpaque[kGridSize *
                                    kGridSize];
    [self extractFrameFromImage:sampleImage];
  }
  return self;
}

- (void)extractFrameFromImage:(UIImage*)sampleImage {
    CGImageRef imageRef = [sampleImage CGImage];
    CVImageBufferRef imageBuffer = [self pixelBufferFromCGImage:imageRef];
    CVPixelBufferLockBaseAddress(imageBuffer, 0);
  // Collect some information required to extract the frame.
    void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
    size_t height = CVPixelBufferGetHeight(imageBuffer);
    size_t width = CVPixelBufferGetWidth(imageBuffer);

  // Extract the frame, convert it to grayscale, and shove it in _frame.
    cv::Mat frame(height, width, CV_8UC4, baseAddress, bytesPerRow);
    cv::cvtColor(frame, frame, CV_BGR2GRAY);
    _frame->matrix = frame;
    CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
    CGImageRelease(imageRef);
}

- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
    CVPixelBufferRef pxbuffer = NULL;
    int width = CGImageGetWidth(image)*2;
    int height = CGImageGetHeight(image)*2;

    NSMutableDictionary *attributes = [NSMutableDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, [NSNumber numberWithInt:width], kCVPixelBufferWidthKey, [NSNumber numberWithInt:height], kCVPixelBufferHeightKey, nil];
    CVPixelBufferPoolRef pixelBufferPool; 
    CVReturn theError = CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, (__bridge CFDictionaryRef) attributes, &pixelBufferPool);
    NSParameterAssert(theError == kCVReturnSuccess);
    CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, pixelBufferPool, &pxbuffer);
    NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);

    CVPixelBufferLockBaseAddress(pxbuffer, 0);
    void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
    NSParameterAssert(pxdata != NULL);
    CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(pxdata, width,
                                                 height, 8, width*4, rgbColorSpace, 
                                                 kCGImageAlphaNoneSkipFirst);
    NSParameterAssert(context);
/* here is the problem: */
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
    CGColorSpaceRelease(rgbColorSpace);
    CGContextRelease(context);

    CVPixelBufferUnlockBaseAddress(pxbuffer, 0);

    return pxbuffer;
}

使用Instrument我发现问题出在CVPixelBufferRef分配上,但我不明白为什么 - 有人能看到问题吗?

谢谢

1 个答案:

答案 0 :(得分:1)

-pixelBufferFromCGImage:中,pxBufferpixelBufferPool都未发布。这对于pxBuffer来说是有道理的,因为它是一个返回值,但不适用于pixelBufferPool - 每次调用该方法时都会创建并泄漏一个。{1}}。

快速修复应该是

  1. pixelBufferPool
  2. 中发布-pixelBufferFromCGImage:
  3. pxBuffer
  4. 中发布-pixelBufferFromCGImage:-extractFrameFromImage:的返回值)

    您还应该将-pixelBufferFromCGImage:重命名为-createPixelBufferFromCGImage:,以明确它返回一个保留的对象。