我使用下面的代码渲染一个带有红色矩形的简单视频。使用_CanvasSize = CGSizeMake(320,200);一切正常。但是,如果我将大小更改为_CanvasSize = CGSizeMake(321,200),则视频会撕裂;或(100,100)。 有谁知道为什么我应该选择哪种尺寸? (我使用XCode 7.3.1 iOS 9 SDK)。
NSString *fileNameOut = @"temp.mp4";
NSString *directoryOut = @"tmp/";
NSString *outFile = [NSString stringWithFormat:@"%@%@",directoryOut,fileNameOut];
NSString *path = [NSHomeDirectory() stringByAppendingPathComponent:[NSString stringWithFormat:@"%@",outFile]];
NSURL *videoTempURL = [NSURL fileURLWithPath:[NSString stringWithFormat:@"%@%@", NSTemporaryDirectory(), fileNameOut]];
// WARNING: AVAssetWriter does not overwrite files for us, so remove the destination file if it already exists
NSFileManager *fileManager = [NSFileManager defaultManager];
[fileManager removeItemAtPath:[videoTempURL path] error:NULL];
CGSize _CanvasSize;// = CGSizeMake(size.width, size.height);
NSError *error = nil;
NSInteger FPS = 30;
AVAssetWriter* VIDCtrl = [[AVAssetWriter alloc] initWithURL:[NSURL fileURLWithPath:path] fileType:AVFileTypeMPEG4 error:&error];
if (!VIDCtrl || error)
{
NSLog(@"Can NOT Create Video Writer");
return;
}
_CanvasSize = CGSizeMake(321, 200);
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:_CanvasSize.width], AVVideoWidthKey,
[NSNumber numberWithInt:_CanvasSize.height], AVVideoHeightKey,
nil];
AVAssetWriterInput* writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo
outputSettings:videoSettings];
AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
sourcePixelBufferAttributes:nil];
NSParameterAssert(writerInput);
NSParameterAssert([VIDCtrl canAddInput:writerInput]);
[VIDCtrl addInput:writerInput];
[VIDCtrl startWriting];
[VIDCtrl startSessionAtSourceTime:kCMTimeZero];
CVPixelBufferRef buffer = NULL;
double ftime = 600.0 / FPS;
double currenttime = 0;
double frametime = 1.0 / FPS;
int i = 0;
while (1)
{
// Check if the writer is ready for more data, if not, just wait
if(writerInput.readyForMoreMediaData){
CMTime frameTime = CMTimeMake(ftime, 600);
// CMTime = Value and Timescale.
// Timescale = the number of tics per second you want
// Value is the number of tics
// For us - each frame we add will be 1/4th of a second
// Apple recommend 600 tics per second for video because it is a
// multiple of the standard video rates 24, 30, 60 fps etc.
CMTime lastTime=CMTimeMake(i*ftime, 600);
CMTime presentTime=CMTimeAdd(lastTime, frameTime);
if (i == 0) {presentTime = CMTimeMake(0, 600);}
// This ensures the first frame starts at 0.
buffer = NULL;
if (i < 30)
{
NSLog(@"%d %d",i, presentTime.value);
CGSize sz = _CanvasSize;
int height = sz.height, width = sz.width;
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef pxbuffer = NULL;
if (!pxbuffer)
{
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, width,
height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
&pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
}
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * sz.width;
NSUInteger bitsPerComponent = 8;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef gc = CGBitmapContextCreate(pxdata, sz.width, sz.height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaNoneSkipFirst);
UIGraphicsPushContext(gc);
CGContextTranslateCTM(gc, 0, sz.height);
CGContextScaleCTM(gc, 1.0, -1.0);
CGContextSetFillColorWithColor(gc, [UIColor whiteColor].CGColor);
CGContextFillRect(gc, (CGRect){0,0,sz});
CGContextSetStrokeColorWithColor(gc, [UIColor redColor].CGColor);
CGContextStrokeRect(gc, CGRectMake(10, 10, 30, 30));
CGColorSpaceRelease(colorSpace);
CGContextRelease(gc);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
buffer = pxbuffer;
i++;
}
currenttime+=frametime;
if (buffer)
{
// Give the CGImage to the AVAssetWriter to add to your video
[adaptor appendPixelBuffer:buffer withPresentationTime:presentTime];
// CVBufferRelease(buffer);
CFRelease(buffer);
}
else
{
//Finish the session:
// This is important to be done exactly in this order
[writerInput markAsFinished];
// WARNING: finishWriting in the solution above is deprecated.
// You now need to give a completion handler.
[VIDCtrl finishWritingWithCompletionHandler:^{
NSLog(@"Finished writing...checking completion status...");
if (VIDCtrl.status != AVAssetWriterStatusFailed && VIDCtrl.status == AVAssetWriterStatusCompleted)
{
NSLog(@"Video writing succeeded To %@",path);
} else
{
NSLog(@"Video writing failed: %@", VIDCtrl.error);
}
}]; // end videoWriter finishWriting Block
CVPixelBufferPoolRelease(adaptor.pixelBufferPool);
NSLog (@"Done");
break;
}
}
}
感谢您阅读我的问题!
答案 0 :(得分:1)
好的,经过一天的测试。视频的宽度应该可以被16整除。(32,320,144,480,1280,1920等......)