将音频从iPhone麦克风转换为iLBC

时间:2016-04-27 10:41:59

标签: objective-c iphone audio audio-recording audio-converter

我希望从iPhone麦克风录制我的音频并转换为iLBC,然后流式传输到远程服务器。但我总是在1768846202中获得AudioConverterFillComplexBuffer。 我知道这意味着kAudioConverterErr_InvalidInputSize,但我不知道哪个输入错了。

我搜索了一些文章,例如Stream audio from iOSRecord audio on iPhone with smallest file sizeAudioUnit PCM compression to iLBC and decompression to PCM,但这些文章都没有解决我的问题。

这是我的转换功能:

-(AudioBuffer) doConvert: (AudioBuffer)pcmData
{
char *outputBuffer = NULL;
OSStatus status;

UInt32 theOutputBufSize = pcmData.mDataByteSize;//32768;
outputBuffer = (char*)malloc(sizeof(char) * theOutputBufSize);

/* Create the output buffer list */
AudioBufferList outBufferList;
outBufferList.mNumberBuffers = 1;
outBufferList.mBuffers[0].mNumberChannels = 1;
outBufferList.mBuffers[0].mDataByteSize   = theOutputBufSize;
outBufferList.mBuffers[0].mData           = outputBuffer;

//Converting
//UInt32 ioOutputDataPackets = numOutputPackets;
UInt32 numOutputDataPackets = 1;
AudioStreamPacketDescription outPacketDesc[1];
status = AudioConverterFillComplexBuffer(audioConverterDecode,
                                         encodeProc,
                                         &pcmData,
                                         &numOutputDataPackets,
                                         &outBufferList,
                                         outPacketDesc);
//outBufferList.mBuffers[0].mDataByteSize   = theOutputBufSize;
[self hasError:status:__FILE__:__LINE__];

/* Set the ouput data */
AudioBuffer outData;
outData.mNumberChannels      = 1;
outData.mData           = outBufferList.mBuffers[0].mData;
outData.mDataByteSize = outBufferList.mBuffers[0].mDataByteSize;

return outData;
}

我最初的职能:

-(void)initDecoder
{

NSLog(@"initDecoder");
AudioStreamBasicDescription srcFormat, dstFormat;
//AudioConverterRef   converter = NULL;
char *outputBuffer = NULL;
OSStatus status;

//output format
dstFormat.mSampleRate = 8000.0;
dstFormat.mFormatID = kAudioFormatiLBC ;
dstFormat.mChannelsPerFrame = 1;
//dstFormat.mBitsPerChannel = 0;
dstFormat.mBytesPerPacket = 38;//50;
dstFormat.mFramesPerPacket = 160;//240;
dstFormat.mBytesPerFrame = 0;
dstFormat.mBitsPerChannel = 0;
dstFormat.mFormatFlags =  0;// little-endian

//source format
srcFormat.mSampleRate           = SAMPLE_RATE;   //This is 48000
srcFormat.mFormatID         = kAudioFormatLinearPCM;
srcFormat.mFormatFlags      = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
srcFormat.mFramesPerPacket  = 1;
srcFormat.mChannelsPerFrame = 1;
srcFormat.mBitsPerChannel       = 16;
srcFormat.mBytesPerPacket       = 2;
srcFormat.mBytesPerFrame        = 2;
srcFormat.mReserved           = 0;



status = AudioConverterNew(&srcFormat, &dstFormat, &audioConverterDecode);
[self hasError:status:__FILE__:__LINE__];
}

1 个答案:

答案 0 :(得分:1)

我在this中更改了一些代码,然后找到了我的解决方案。

createAudioConvert:

AudioStreamBasicDescription inputFormat = *(CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer))); // 输入音频格式
AudioStreamBasicDescription outputFormat; 

memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mSampleRate       = 8000;
outputFormat.mFormatID         = kAudioFormatiLBC;
outputFormat.mChannelsPerFrame = 1;

// use AudioFormat API to fill out the rest of the description
UInt32 size = sizeof(outputFormat);
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);


outputFormat.mBytesPerPacket  = 50;
outputFormat.mFramesPerPacket = 240;


AudioClassDescription *desc = [self getAudioClassDescriptionWithType:kAudioFormatiLBC, fromManufacturer:kAppleSoftwareAudioCodecManufacturer];
if (AudioConverterNewSpecific(&inputFormat, &outputFormat, 1, desc, &m_converter) != noErr)
{
    printf("AudioConverterNewSpecific failed\n");
    return NO;
}

return YES;

encoderAAC

if ([self createAudioConvert:sampleBuffer] != YES)
{
    return NO;
}

CMBlockBufferRef blockBuffer = nil;
AudioBufferList  inBufferList;
if (CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &inBufferList, sizeof(inBufferList), NULL, NULL, 0, &blockBuffer) != noErr)
{
    printf("CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer failed");
    return NO;
}

AudioBufferList outBufferList;
outBufferList.mNumberBuffers              = 1;
outBufferList.mBuffers[0].mNumberChannels = 1;//2;
outBufferList.mBuffers[0].mDataByteSize   = *aacLen; 
outBufferList.mBuffers[0].mData           = aacData; 

UInt32 outputDataPacketSize               = 1;



OSStatus err = AudioConverterFillComplexBuffer(m_converter, inputDataProc, &inBufferList, &outputDataPacketSize, &outBufferList, NULL);
printf("AudioConverterFillComplexBuffer\n");


if ( err != noErr)
{
    printf("AudioConverterFillComplexBuffer failed\n");
    return NO;
}

*aacLen = outBufferList.mBuffers[0].mDataByteSize; 
CFRelease(blockBuffer);
return YES;

回调函数:

OSStatus inputDataProc(AudioConverterRef inConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData,AudioStreamPacketDescription **outDataPacketDescription, void *inUserData) { 

ioData->mNumberBuffers = 1;


AudioBufferList bufferList = *(AudioBufferList*)inUserData;
ioData->mBuffers[0].mNumberChannels = 1;
ioData->mBuffers[0].mData           = bufferList.mBuffers[0].mData;
ioData->mBuffers[0].mDataByteSize   = bufferList.mBuffers[0].mDataByteSize;

UInt32 maxPackets = bufferList.mBuffers[0].mDataByteSize / 2;
*ioNumberDataPackets = maxPackets;



return noErr;
}