我使用下面的代码来初始化我的音频组件。
-(void) startListeningWithCoreAudio
{
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error:&error];
if (error)
NSLog(@"error setting up audio session: %@", [error localizedDescription]);
[[AVAudioSession sharedInstance] setDelegate:self];
OSStatus status = AudioSessionSetActive(YES);
checkStatus(status);
// Find the apple mic
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext( NULL, &desc );
status = AudioComponentInstanceNew( inputComponent, &kAudioUnit );
checkStatus( status );
// enable mic output as our input
UInt32 flag = 1;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag) );
checkStatus(status);
// Define mic output audio format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 16000.0;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
status = AudioUnitSetProperty( kAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat) );
checkStatus(status);
// Define our callback methods
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct) );
checkStatus(status);
// By pass voice processing
UInt32 audiobypassProcessing = [[NSUserDefaults standardUserDefaults] boolForKey:VOICE_BY_PASS_PROCESSING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global, kInputBus, &audiobypassProcessing, sizeof(audiobypassProcessing));
checkStatus(status);
// Automatic Gain Control
UInt32 audioAGC = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_AGC];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC,
kAudioUnitScope_Global, kInputBus, &audioAGC, sizeof(audioAGC));
checkStatus(status);
//Non Audio Voice Ducking
UInt32 audioDucking = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_DUCKING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio,
kAudioUnitScope_Global, kInputBus, &audioDucking, sizeof(audioDucking));
checkStatus(status);
//Audio Quality
UInt32 quality = [[NSUserDefaults standardUserDefaults]integerForKey:VOICE_QUALITY];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingQuality,
kAudioUnitScope_Global, kInputBus, &quality, sizeof(quality));
checkStatus(status);
status = AudioUnitInitialize(kAudioUnit);
checkStatus(status);
status = AudioOutputUnitStart( kAudioUnit );
checkStatus(status);
UInt32 audioRoute = (UInt32)kAudioSessionOverrideAudioRoute_Speaker;
status = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof (audioRoute), &audioRoute);
checkStatus(status);
}
-(void) stopListeningWithCoreAudio
{
OSStatus status = AudioUnitUninitialize( kAudioUnit );
checkStatus(status);
status = AudioOutputUnitStop( kAudioUnit );
checkStatus( status );
// if(kAudioUnit)
// {
// status = AudioComponentInstanceDispose(kAudioUnit);
// checkStatus(status);
// kAudioUnit = nil;
// }
status = AudioSessionSetActive(NO);
checkStatus(status);
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategorySoloAmbient error:&error];
if (error)
NSLog(@"error setting up audio session: %@", [error localizedDescription]);
}
第一次工作正常。我的意思是按下按钮事件调用startListeningWithCoreAudio
。它可以很好地记录/处理音频。在其他事件中,我呼叫stopListeningWithCoreAudio
停止录制/处理音频。
当我再次尝试调用函数startListeningWithCoreAudio
时,问题就出现了。它会抛出两个函数的错误。从AudioUnitInitialize
调用的AudioOutputUnitStart
和startListeningWithCoreAudio
。
有谁可以帮我解决问题是什么?
答案 0 :(得分:2)
我找到了解决方案。 如果我们背靠背地调用以下函数,则会产生问题。
extern OSStatus AudioUnitUninitialize(AudioUnit inUnit)
extern OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance)
所以,我通过以下方式在主线程上调用了dispose方法。
[self performSelectorOnMainThread:@selector(disposeCoreAudio) withObject:nil waitUntilDone:NO];
-(void) disposeCoreAudio
{
OSStatus status = AudioComponentInstanceDispose(kAudioUnit);
kAudioUnit = nil;
}
它解决了这个问题。因此,正确的顺序是停止录制,取消初始化录制器并在主线程上配置录制器。
答案 1 :(得分:1)
一个可能的问题是您的代码在停止之前尝试取消初始化正在运行的音频单元。