我正在尝试将AAC音频解码为iOS中的PCM音频,最好的方法是什么?任何示例代码都非常有用......是否有任何简单的API来执行此操作..?
答案 0 :(得分:7)
我有示例代码来执行此操作。
开始时你应该配置输入/输出ASBD(AudioStreamBasicDescription)并创建转换器:
- (void)setupAudioConverter{
AudioStreamBasicDescription outFormat;
memset(&outFormat, 0, sizeof(outFormat));
outFormat.mSampleRate = 44100;
outFormat.mFormatID = kAudioFormatLinearPCM;
outFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
outFormat.mBytesPerPacket = 2;
outFormat.mFramesPerPacket = 1;
outFormat.mBytesPerFrame = 2;
outFormat.mChannelsPerFrame = 1;
outFormat.mBitsPerChannel = 16;
outFormat.mReserved = 0;
AudioStreamBasicDescription inFormat;
memset(&inFormat, 0, sizeof(inFormat));
inFormat.mSampleRate = 44100;
inFormat.mFormatID = kAudioFormatMPEG4AAC;
inFormat.mFormatFlags = kMPEG4Object_AAC_LC;
inFormat.mBytesPerPacket = 0;
inFormat.mFramesPerPacket = 1024;
inFormat.mBytesPerFrame = 0;
inFormat.mChannelsPerFrame = 1;
inFormat.mBitsPerChannel = 0;
inFormat.mReserved = 0;
OSStatus status = AudioConverterNew(&inFormat, &outFormat, &_audioConverter);
if (status != 0) {
printf("setup converter error, status: %i\n", (int)status);
}
}
之后你应该为音频转换器做回调函数:
struct PassthroughUserData {
UInt32 mChannels;
UInt32 mDataSize;
const void* mData;
AudioStreamPacketDescription mPacket;
};
OSStatus inInputDataProc(AudioConverterRef aAudioConverter,
UInt32* aNumDataPackets /* in/out */,
AudioBufferList* aData /* in/out */,
AudioStreamPacketDescription** aPacketDesc,
void* aUserData)
{
PassthroughUserData* userData = (PassthroughUserData*)aUserData;
if (!userData->mDataSize) {
*aNumDataPackets = 0;
return kNoMoreDataErr;
}
if (aPacketDesc) {
userData->mPacket.mStartOffset = 0;
userData->mPacket.mVariableFramesInPacket = 0;
userData->mPacket.mDataByteSize = userData->mDataSize;
*aPacketDesc = &userData->mPacket;
}
aData->mBuffers[0].mNumberChannels = userData->mChannels;
aData->mBuffers[0].mDataByteSize = userData->mDataSize;
aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
// No more data to provide following this run.
userData->mDataSize = 0;
return noErr;
}
帧解码方法:
- (void)decodeAudioFrame:(NSData *)frame withPts:(NSInteger)pts{
if(!_audioConverter){
[self setupAudioConverter];
}
PassthroughUserData userData = { 1, (UInt32)frame.length, [frame bytes]};
NSMutableData *decodedData = [NSMutableData new];
const uint32_t MAX_AUDIO_FRAMES = 128;
const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * 1;
do{
uint8_t *buffer = (uint8_t *)malloc(maxDecodedSamples * sizeof(short int));
AudioBufferList decBuffer;
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = 1;
decBuffer.mBuffers[0].mDataByteSize = maxDecodedSamples * sizeof(short int);
decBuffer.mBuffers[0].mData = buffer;
UInt32 numFrames = MAX_AUDIO_FRAMES;
AudioStreamPacketDescription outPacketDescription;
memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
outPacketDescription.mDataByteSize = MAX_AUDIO_FRAMES;
outPacketDescription.mStartOffset = 0;
outPacketDescription.mVariableFramesInPacket = 0;
OSStatus rv = AudioConverterFillComplexBuffer(_audioConverter,
inInputDataProc,
&userData,
&numFrames /* in/out */,
&decBuffer,
&outPacketDescription);
if (rv && rv != kNoMoreDataErr) {
NSLog(@"Error decoding audio stream: %d\n", rv);
break;
}
if (numFrames) {
[decodedData appendBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize];
}
if (rv == kNoMoreDataErr) {
break;
}
}while (true);
//void *pData = (void *)[decodedData bytes];
//audioRenderer->Render(&pData, decodedData.length, pts);
}
答案 1 :(得分:1)
您需要使用Core Audio。在Apple文档中查找Core Audio Overview。