我正在尝试创建方形视频,为此我使用的是640 * 480相机视图,但无法从顶部裁剪视频?我尝试了很多但没有成功。所以请建议怎么做,什么转换是有帮助的?
答案 0 :(得分:4)
用法:
创建一个属性(或任何其他变量来保存VideoTranscoder)
self.videoTranscoder = [SCVideoTranscoder new];
self.videoTranscoder.asset = PUT_UR_AVASSET_HERE;
self.videoTranscoder.outputURL = PUT_A_FILE_URL_HERE;
__weak typeof(self) weakSelf = self;
self.videoTranscoder.completionBlock = ^(BOOL success){
//PUT YOUR CODE HERE WHEN THE TRANSCODING IS DONE...
};
[self.videoTranscoder start];
如果您只是致电[transcoder cancel]
,则可以取消转码过程。
SCVideoTranscoder.h文件
#import <Foundation/Foundation.h>
@interface SCVideoTranscoder : NSObject
@property (nonatomic, strong) AVAsset *asset;
@property (nonatomic, assign) BOOL cancelled;
@property (nonatomic, strong) NSURL *outputURL;
@property (nonatomic, strong) void (^completionBlock)(BOOL success);
- (void)start;
- (void)cancel;
@end
SCVideoTranscoder.m文件 在文件中,你可以发现我将视频设置为480x480,你可以。将其更改为您喜欢的值!
#import "SCVideoTranscoder.h"
@interface SCVideoTranscoder()
@property (nonatomic, strong) dispatch_queue_t mainSerializationQueue;
@property (nonatomic, strong) dispatch_queue_t rwAudioSerializationQueue;
@property (nonatomic, strong) dispatch_queue_t rwVideoSerializationQueue;
@property (nonatomic, strong) dispatch_group_t dispatchGroup;
@property (nonatomic, strong) AVAssetReader* assetReader;
@property (nonatomic, strong) AVAssetWriter* assetWriter;
@property (nonatomic, strong) AVAssetReaderTrackOutput *assetReaderAudioOutput;
@property (nonatomic, strong) AVAssetReaderTrackOutput *assetReaderVideoOutput;
@property (nonatomic, strong) AVAssetWriterInput *assetWriterAudioInput;
@property (nonatomic, strong) AVAssetWriterInput *assetWriterVideoInput;
@property (nonatomic, assign) BOOL audioFinished;
@property (nonatomic, assign) BOOL videoFinished;
@end
@implementation SCVideoTranscoder
- (void)start
{
NSString *serializationQueueDescription = [NSString stringWithFormat:@"%@ serialization queue", self];
// Create the main serialization queue.
self.mainSerializationQueue = dispatch_queue_create([serializationQueueDescription UTF8String], NULL);
NSString *rwAudioSerializationQueueDescription = [NSString stringWithFormat:@"%@ rw audio serialization queue", self];
// Create the serialization queue to use for reading and writing the audio data.
self.rwAudioSerializationQueue = dispatch_queue_create([rwAudioSerializationQueueDescription UTF8String], NULL);
NSString *rwVideoSerializationQueueDescription = [NSString stringWithFormat:@"%@ rw video serialization queue", self];
// Create the serialization queue to use for reading and writing the video data.
self.rwVideoSerializationQueue = dispatch_queue_create([rwVideoSerializationQueueDescription UTF8String], NULL);
self.cancelled = NO;
// Asynchronously load the tracks of the asset you want to read.
[self.asset loadValuesAsynchronouslyForKeys:@[@"tracks"] completionHandler:^{
// Once the tracks have finished loading, dispatch the work to the main serialization queue.
dispatch_async(self.mainSerializationQueue, ^{
// Due to asynchronous nature, check to see if user has already cancelled.
if (self.cancelled)
return;
BOOL success = YES;
NSError *localError = nil;
// Check for success of loading the assets tracks.
success = ([self.asset statusOfValueForKey:@"tracks" error:&localError] == AVKeyValueStatusLoaded);
if (success)
{
// If the tracks loaded successfully, make sure that no file exists at the output path for the asset writer.
NSFileManager *fm = [NSFileManager defaultManager];
NSString *localOutputPath = [self.outputURL path];
if ([fm fileExistsAtPath:localOutputPath])
success = [fm removeItemAtPath:localOutputPath error:&localError];
}
if (success)
{
success = [self setupAssetReaderAndAssetWriter:&localError];
if (success)
{
[self startAssetReaderAndWriter:&localError];
}else{
[self readingAndWritingDidFinishSuccessfully:success withError:localError];
}
}
});
}];
}
- (BOOL)setupAssetReaderAndAssetWriter:(NSError **)outError
{
// Create and initialize the asset reader.
self.assetReader = [[AVAssetReader alloc] initWithAsset:self.asset error:outError];
BOOL success = (self.assetReader != nil);
if (success)
{
// If the asset reader was successfully initialized, do the same for the asset writer.
self.assetWriter = [[AVAssetWriter alloc] initWithURL:self.outputURL fileType:AVFileTypeMPEG4 error:outError];
success = (self.assetWriter != nil);
}
if (success)
{
// If the reader and writer were successfully initialized, grab the audio and video asset tracks that will be used.
AVAssetTrack *assetAudioTrack = nil, *assetVideoTrack = nil;
NSArray *audioTracks = [self.asset tracksWithMediaType:AVMediaTypeAudio];
if ([audioTracks count] > 0)
assetAudioTrack = [audioTracks objectAtIndex:0];
NSArray *videoTracks = [self.asset tracksWithMediaType:AVMediaTypeVideo];
if ([videoTracks count] > 0)
assetVideoTrack = [videoTracks objectAtIndex:0];
if (assetAudioTrack)
{
// If there is an audio track to read, set the decompression settings to Linear PCM and create the asset reader output.
NSDictionary *decompressionAudioSettings = @{ AVFormatIDKey : [NSNumber numberWithUnsignedInt:kAudioFormatLinearPCM] };
self.assetReaderAudioOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:assetAudioTrack outputSettings:decompressionAudioSettings];
if (DEBUG) {
self.assetReader.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMake(20, 1));
}else{
self.assetReader.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMake(10, 1));
}
[self.assetReader addOutput:self.assetReaderAudioOutput];
// Then, set the compression settings to 128kbps AAC and create the asset writer input.
AudioChannelLayout stereoChannelLayout = {
.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo,
.mChannelBitmap = 0,
.mNumberChannelDescriptions = 0
};
NSData *channelLayoutAsData = [NSData dataWithBytes:&stereoChannelLayout length:offsetof(AudioChannelLayout, mChannelDescriptions)];
NSDictionary *compressionAudioSettings = @{
AVFormatIDKey : [NSNumber numberWithUnsignedInt:kAudioFormatMPEG4AAC],
AVEncoderBitRateKey : [NSNumber numberWithInteger:128000],
AVSampleRateKey : [NSNumber numberWithInteger:44100],
AVChannelLayoutKey : channelLayoutAsData,
AVNumberOfChannelsKey : [NSNumber numberWithUnsignedInteger:2]
};
self.assetWriterAudioInput = [AVAssetWriterInput assetWriterInputWithMediaType:[assetAudioTrack mediaType] outputSettings:compressionAudioSettings];
[self.assetWriter addInput:self.assetWriterAudioInput];
}
if (assetVideoTrack)
{
// If there is a video track to read, set the decompression settings for YUV and create the asset reader output.
NSDictionary *decompressionVideoSettings = @{
(id)kCVPixelBufferPixelFormatTypeKey : [NSNumber numberWithUnsignedInt:kCVPixelFormatType_422YpCbCr8],
(id)kCVPixelBufferIOSurfacePropertiesKey : [NSDictionary dictionary]
};
self.assetReaderVideoOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:assetVideoTrack outputSettings:decompressionVideoSettings];
[self.assetReader addOutput:self.assetReaderVideoOutput];
CMFormatDescriptionRef formatDescription = NULL;
// Grab the video format descriptions from the video track and grab the first one if it exists.
NSArray *videoFormatDescriptions = [assetVideoTrack formatDescriptions];
if ([videoFormatDescriptions count] > 0)
formatDescription = (__bridge CMFormatDescriptionRef)[videoFormatDescriptions objectAtIndex:0];
CGSize trackDimensions = {
.width = 0.0,
.height = 0.0,
};
CGAffineTransform videoTransform = [assetVideoTrack preferredTransform];
// If the video track had a format description, grab the track dimensions from there. Otherwise, grab them direcly from the track itself.
if (formatDescription)
trackDimensions = CMVideoFormatDescriptionGetPresentationDimensions(formatDescription, false, false);
else
trackDimensions = [assetVideoTrack naturalSize];
int width = 480;
int height = 480;
int bitrate = 1000000;
NSDictionary *compressionSettings = @{
AVVideoAverageBitRateKey: [NSNumber numberWithInt:bitrate],AVVideoMaxKeyFrameIntervalKey: @(150),
AVVideoProfileLevelKey: AVVideoProfileLevelH264BaselineAutoLevel,
AVVideoAllowFrameReorderingKey: @NO,
AVVideoH264EntropyModeKey: AVVideoH264EntropyModeCAVLC,
AVVideoExpectedSourceFrameRateKey: @(30)
};
if ([SCDeviceModel speedgrade]==0) {
width = 320;
height = 320;
compressionSettings = @{
AVVideoAverageBitRateKey: [NSNumber numberWithInt:bitrate],
AVVideoProfileLevelKey: AVVideoProfileLevelH264BaselineAutoLevel,
AVVideoAllowFrameReorderingKey: @NO};
}
NSDictionary *videoSettings =
@{
AVVideoCodecKey: AVVideoCodecH264,
AVVideoScalingModeKey: AVVideoScalingModeResizeAspectFill,
AVVideoWidthKey: [NSNumber numberWithInt:width],
AVVideoHeightKey: [NSNumber numberWithInt:height],
AVVideoCompressionPropertiesKey: compressionSettings};
// Create the asset writer input and add it to the asset writer.
self.assetWriterVideoInput = [AVAssetWriterInput assetWriterInputWithMediaType:[assetVideoTrack mediaType] outputSettings:videoSettings];
self.assetWriterVideoInput.transform = videoTransform;
[self.assetWriter addInput:self.assetWriterVideoInput];
}
}
return success;
}
- (void)startAssetReaderAndWriter:(NSError **)outError
{
BOOL success = YES;
// Attempt to start the asset reader.
success = [self.assetReader startReading];
if (!success)
*outError = [self.assetReader error];
if (success)
{
// If the reader started successfully, attempt to start the asset writer.
success = [self.assetWriter startWriting];
if (!success)
*outError = [self.assetWriter error];
}
if (success)
{
// If the asset reader and writer both started successfully, create the dispatch group where the reencoding will take place and start a sample-writing session.
self.dispatchGroup = dispatch_group_create();
[self.assetWriter startSessionAtSourceTime:kCMTimeZero];
self.audioFinished = NO;
self.videoFinished = NO;
if (self.assetWriterAudioInput)
{
// If there is audio to reencode, enter the dispatch group before beginning the work.
dispatch_group_enter(self.dispatchGroup);
// Specify the block to execute when the asset writer is ready for audio media data, and specify the queue to call it on.
[self.assetWriterAudioInput requestMediaDataWhenReadyOnQueue:self.rwAudioSerializationQueue usingBlock:^{
// Because the block is called asynchronously, check to see whether its task is complete.
if (self.audioFinished)
return;
BOOL completedOrFailed = NO;
// If the task isn't complete yet, make sure that the input is actually ready for more media data.
while ([self.assetWriterAudioInput isReadyForMoreMediaData] && !completedOrFailed)
{
// Get the next audio sample buffer, and append it to the output file.
CMSampleBufferRef sampleBuffer = [self.assetReaderAudioOutput copyNextSampleBuffer];
if (sampleBuffer != NULL)
{
BOOL success = [self.assetWriterAudioInput appendSampleBuffer:sampleBuffer];
CFRelease(sampleBuffer);
sampleBuffer = NULL;
completedOrFailed = !success;
}
else
{
completedOrFailed = YES;
}
}
if (completedOrFailed)
{
// Mark the input as finished, but only if we haven't already done so, and then leave the dispatch group (since the audio work has finished).
BOOL oldFinished = self.audioFinished;
self.audioFinished = YES;
if (oldFinished == NO)
{
[self.assetWriterAudioInput markAsFinished];
}
dispatch_group_leave(self.dispatchGroup);
}
}];
}
if (self.assetWriterVideoInput)
{
// If we had video to reencode, enter the dispatch group before beginning the work.
dispatch_group_enter(self.dispatchGroup);
// Specify the block to execute when the asset writer is ready for video media data, and specify the queue to call it on.
[self.assetWriterVideoInput requestMediaDataWhenReadyOnQueue:self.rwVideoSerializationQueue usingBlock:^{
// Because the block is called asynchronously, check to see whether its task is complete.
if (self.videoFinished)
return;
BOOL completedOrFailed = NO;
// If the task isn't complete yet, make sure that the input is actually ready for more media data.
while ([self.assetWriterVideoInput isReadyForMoreMediaData] && !completedOrFailed)
{
// Get the next video sample buffer, and append it to the output file.
CMSampleBufferRef sampleBuffer = [self.assetReaderVideoOutput copyNextSampleBuffer];
if (sampleBuffer != NULL)
{
BOOL success = [self.assetWriterVideoInput appendSampleBuffer:sampleBuffer];
CFRelease(sampleBuffer);
sampleBuffer = NULL;
completedOrFailed = !success;
}
else
{
completedOrFailed = YES;
}
}
if (completedOrFailed)
{
// Mark the input as finished, but only if we haven't already done so, and then leave the dispatch group (since the video work has finished).
BOOL oldFinished = self.videoFinished;
self.videoFinished = YES;
if (oldFinished == NO)
{
[self.assetWriterVideoInput markAsFinished];
}
dispatch_group_leave(self.dispatchGroup);
}
}];
}
// Set up the notification that the dispatch group will send when the audio and video work have both finished.
dispatch_group_notify(self.dispatchGroup, self.mainSerializationQueue, ^{
BOOL finalSuccess = YES;
NSError *finalError = nil;
// Check to see if the work has finished due to cancellation.
if (self.cancelled)
{
// If so, cancel the reader and writer.
[self.assetReader cancelReading];
[self.assetWriter cancelWriting];
}
else
{
// If cancellation didn't occur, first make sure that the asset reader didn't fail.
if ([self.assetReader status] == AVAssetReaderStatusFailed)
{
finalSuccess = NO;
finalError = [self.assetReader error];
}
// If the asset reader didn't fail, attempt to stop the asset writer and check for any errors.
}
if (finalSuccess)
{
[self.assetWriter finishWritingWithCompletionHandler:^{
NSError *finalError = nil;
if(self.assetWriter.status == AVAssetWriterStatusFailed)
{
finalError = self.assetWriter.error;
}
[self readingAndWritingDidFinishSuccessfully:finalError==nil
withError:finalError];
}];
return;
}
// Call the method to handle completion, and pass in the appropriate parameters to indicate whether reencoding was successful.
[self readingAndWritingDidFinishSuccessfully:finalSuccess withError:finalError];
});
}
}
- (void)readingAndWritingDidFinishSuccessfully:(BOOL)success withError:(NSError *)error
{
if (!success)
{
// If the reencoding process failed, we need to cancel the asset reader and writer.
[self.assetReader cancelReading];
[self.assetWriter cancelWriting];
dispatch_async(dispatch_get_main_queue(), ^{
if (self.completionBlock) {
self.completionBlock(NO);
}
});
}
else
{
// Reencoding was successful, reset booleans.
self.cancelled = NO;
self.videoFinished = NO;
self.audioFinished = NO;
dispatch_async(dispatch_get_main_queue(), ^{
if (self.completionBlock) {
self.completionBlock(YES);
}
});
}
}
- (void)cancel
{
// Handle cancellation asynchronously, but serialize it with the main queue.
dispatch_async(self.mainSerializationQueue, ^{
// If we had audio data to reencode, we need to cancel the audio work.
if (self.assetWriterAudioInput)
{
// Handle cancellation asynchronously again, but this time serialize it with the audio queue.
dispatch_async(self.rwAudioSerializationQueue, ^{
// Update the Boolean property indicating the task is complete and mark the input as finished if it hasn't already been marked as such.
BOOL oldFinished = self.audioFinished;
self.audioFinished = YES;
if (oldFinished == NO)
{
[self.assetWriterAudioInput markAsFinished];
}
// Leave the dispatch group since the audio work is finished now.
dispatch_group_leave(self.dispatchGroup);
});
}
if (self.assetWriterVideoInput)
{
// Handle cancellation asynchronously again, but this time serialize it with the video queue.
dispatch_async(self.rwVideoSerializationQueue, ^{
// Update the Boolean property indicating the task is complete and mark the input as finished if it hasn't already been marked as such.
BOOL oldFinished = self.videoFinished;
self.videoFinished = YES;
if (oldFinished == NO)
{
[self.assetWriterVideoInput markAsFinished];
}
// Leave the dispatch group, since the video work is finished now.
dispatch_group_leave(self.dispatchGroup);
});
}
// Set the cancelled Boolean property to YES to cancel any work on the main queue as well.
self.cancelled = YES;
});
}
@end
答案 1 :(得分:0)
[composition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
到
AVMutableCompositionTrack *theTrack = [composition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
修正你的时间范围,身高,宽度等。