我在以下上下文中从AudioUnitRender收到错误-50(无效参数)。我正在使用这个Pitch Detector示例应用作为我的起点,它运行正常。我项目中唯一的主要区别是我也使用远程I / O单元进行音频输出。音频输出工作正常。这是我的输入回调和我的初始化代码(为简洁起见,删除了错误检查)。我知道这很多但是错误-50确实给出了关于问题所在的信息。
输入回调:
OSStatus inputCallback( void* inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
WBAudio* audioObject= (WBAudio*)inRefCon;
AudioUnit rioUnit = audioObject->m_audioUnit;
OSStatus renderErr;
UInt32 bus1 = 1;
renderErr = AudioUnitRender(rioUnit, ioActionFlags,
inTimeStamp, bus1, inNumberFrames, audioObject->m_inBufferList );
if (renderErr < 0) {
return renderErr; // breaks here
}
return noErr;
} // end inputCallback()
初始化:
- (id) init {
self= [super init];
if( !self ) return nil;
OSStatus result;
//! Initialize a buffer list for rendering input
size_t bytesPerSample;
bytesPerSample = sizeof(SInt16);
m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));
m_inBufferList->mNumberBuffers = 1;
m_inBufferList->mBuffers[0].mNumberChannels = 1;
m_inBufferList->mBuffers[0].mDataByteSize = 512*bytesPerSample;
m_inBufferList->mBuffers[0].mData = calloc(512, bytesPerSample);
//! Initialize an audio session to get buffer size
result = AudioSessionInitialize(NULL, NULL, NULL, NULL);
UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
result = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
// Set preferred buffer size
Float32 preferredBufferSize = static_cast<float>(m_pBoard->m_uBufferSize) / m_pBoard->m_fSampleRate;
result = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
// Get actual buffer size
Float32 audioBufferSize;
UInt32 size = sizeof (audioBufferSize);
result = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &audioBufferSize);
result = AudioSessionSetActive(true);
//! Create our Remote I/O component description
AudioComponentDescription desc;
desc.componentType= kAudioUnitType_Output;
desc.componentSubType= kAudioUnitSubType_RemoteIO;
desc.componentFlags= 0;
desc.componentFlagsMask= 0;
desc.componentManufacturer= kAudioUnitManufacturer_Apple;
//! Find the corresponding component
AudioComponent outputComponent = AudioComponentFindNext(NULL, &desc);
//! Create the component instance
result = AudioComponentInstanceNew(outputComponent, &m_audioUnit);
//! Enable audio output
UInt32 flag = 1;
result = AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag));
//! Enable audio input
result= AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
//! Create our audio stream description
m_audioFormat.mSampleRate= m_pBoard->m_fSampleRate;
m_audioFormat.mFormatID= kAudioFormatLinearPCM;
m_audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
m_audioFormat.mFramesPerPacket= 1;
m_audioFormat.mChannelsPerFrame= 1;
m_audioFormat.mBitsPerChannel= 16;
m_audioFormat.mBytesPerPacket= 2;
m_audioFormat.mBytesPerFrame= 2;
//! Set the stream format
result = AudioUnitSetProperty( m_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &m_audioFormat, sizeof(m_audioFormat));
result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus, &m_audioFormat, sizeof(m_audioFormat));
//! Set the render callback
AURenderCallbackStruct renderCallbackStruct= {0};
renderCallbackStruct.inputProc= renderCallback;
renderCallbackStruct.inputProcRefCon= m_pBoard;
result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallbackStruct, sizeof(renderCallbackStruct));
//! Set the input callback
AURenderCallbackStruct inputCallbackStruct = {0};
inputCallbackStruct.inputProc= inputCallback;
inputCallbackStruct.inputProcRefCon= self;
result= AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, kOutputBus, &inputCallbackStruct, sizeof( inputCallbackStruct ) );
//! Initialize the unit
result = AudioUnitInitialize( m_audioUnit );
return self;
}
答案 0 :(得分:0)
开发文档中的错误-50表示params错误, 确保你在AudioUnitRender中传递了正确的参数。 检查流格式和您的单位
答案 1 :(得分:0)
我同意Kurt Pattyn的观点,m_inBufferList
的分配不正确,并且可能是导致-50错误的参数错误的原因。
除了我认为对于单个缓冲区应该是
(AudioBufferList *)malloc(sizeof(AudioBufferList)
我的证据是以下尺寸以及以下来自Adamson&Avila的代码。
(lldb)sizeof(AudioBufferList) 24
(lldb)po sizeof(AudioBuffer) 16
(lldb)po offsetof(AudioBufferList,mBuffers [0]) 8
根据Learning Core Audio中的克里斯·亚当森(Chris Adamson)和凯文·阿维拉(Kevin Avila):
// Allocate an AudioBufferList plus enough space for
// array of AudioBuffers
UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) +
(sizeof(AudioBuffer) * player->streamFormat.mChannelsPerFrame);
// malloc buffer lists
player->inputBuffer = (AudioBufferList *)malloc(propsize);
player->inputBuffer->mNumberBuffers = player->streamFormat.mChannelsPerFrame;
// Pre-malloc buffers for AudioBufferLists
for(UInt32 i =0; i< player->inputBuffer->mNumberBuffers ; i++) {
player->inputBuffer->mBuffers[i].mNumberChannels = 1;
player->inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes;
player->inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes);
}
最后但并非最不重要的一点,我只是在这段代码上出现了以下注释:)
//credit to TheAmazingAudioEngine for an illustration of proper audiobufferlist allocation.
// Google leads to some really really bad allocation code...
[other code]
sampleABL = malloc(sizeof(AudioBufferList) + (bufferCnt-1)*sizeof(AudioBuffer));
答案 2 :(得分:-1)
您正在将m_inBufferList分配为:
m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));
这应该是:
m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * numberOfBuffers); //numberOfBuffers in your case is 1
也许这可以解决您的问题。