奇怪的问题。我从视频文件(.mov)中获取帧并使用AVAssetWriter将它们写入另一个文件,而不进行任何显式处理。实际上我只是将帧从一个内存缓冲区复制到另一个内存缓冲区,然后通过PixelbufferAdaptor将它们刷新。然后我获取生成的文件,删除原始文件,将生成的文件放入原始文件并执行相同的操作。有趣的是文件的大小不断增长!有人能解释一下原因吗?
if(adaptor.assetWriterInput.readyForMoreMediaData==YES) {
CVImageBufferRef cvimgRef=nil;
CMTime lastTime=CMTimeMake(fcounter++, 30);
CMTime presentTime=CMTimeAdd(lastTime, frameTime);
CMSampleBufferRef framebuffer=nil;
CGImageRef frameImg=nil;
if ( [asr status]==AVAssetReaderStatusReading ){
framebuffer = [asset_reader_output copyNextSampleBuffer];
frameImg = [self imageFromSampleBuffer:framebuffer withColorSpace:rgbColorSpace];
}
if(frameImg && screenshot){
//CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(framebuffer);
CVReturn stat= CVPixelBufferLockBaseAddress(screenshot, 0);
pxdata=CVPixelBufferGetBaseAddress(screenshot);
bufferSize = CVPixelBufferGetDataSize(screenshot);
// Get the number of bytes per row for the pixel buffer.
bytesPerRow = CVPixelBufferGetBytesPerRow(screenshot);
// Get the pixel buffer width and height.
width = CVPixelBufferGetWidth(screenshot);
height = CVPixelBufferGetHeight(screenshot);
// Create a Quartz direct-access data provider that uses data we supply.
CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, pxdata, bufferSize, NULL);
CGImageAlphaInfo ai=CGImageGetAlphaInfo(frameImg);
size_t bpx=CGImageGetBitsPerPixel(frameImg);
CGColorSpaceRef fclr=CGImageGetColorSpace(frameImg);
// Create a bitmap image from data supplied by the data provider.
CGImageRef cgImage = CGImageCreate(width, height, 8, 32, bytesPerRow,rgbColorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrder32Big,dataProvider, NULL, true, kCGRenderingIntentDefault);
CGDataProviderRelease(dataProvider);
stat= CVPixelBufferLockBaseAddress(finalPixelBuffer, 0);
pxdata=CVPixelBufferGetBaseAddress(finalPixelBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(finalPixelBuffer);
CGContextRef context = CGBitmapContextCreate(pxdata, imgsize.width,imgsize.height, 8, bytesPerRow, rgbColorSpace, kCGImageAlphaNoneSkipLast);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(frameImg), CGImageGetHeight(frameImg)), frameImg);
//CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
//CGImageRef myMaskedImage;
const float myMaskingColors[6] = { 0, 0, 0, 1, 0, 0 };
CGImageRef myColorMaskedImage = CGImageCreateWithMaskingColors (cgImage, myMaskingColors);
//CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(myColorMaskedImage), CGImageGetHeight(myColorMaskedImage)), myColorMaskedImage);
[adaptor appendPixelBuffer:finalPixelBuffer withPresentationTime:presentTime];}
答案 0 :(得分:0)
NSDictionary *codecSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:1100000], AVVideoAverageBitRateKey,
[NSNumber numberWithInt:5],AVVideoMaxKeyFrameIntervalKey,
nil];
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:[SharedApplicationData sharedData].overlayView.frame.size.width], AVVideoWidthKey,
[NSNumber numberWithInt:[SharedApplicationData sharedData].overlayView.frame.size.height], AVVideoHeightKey,
codecSettings,AVVideoCompressionPropertiesKey,
nil];
AVAssetWriterInput* writerInput = [AVAssetWriterInput
assetWriterInputWithMediaType:AVMediaTypeVideo
outputSettings:videoSettings];
现在文件大小仍在增长但速度要慢得多。在文件大小和视频质量之间存在权衡 - 尺寸减小会影响质量。