这是我第一次尝试使用CoreAudio,但我的目标是捕获麦克风数据,将其重新采样到新的采样率,然后捕获原始的16位PCM数据。
我的策略是使用麦克风制作AUGraph - >一个采样率转换器,然后有一个回调器从转换器的输出端获取数据(我希望以新的采样率输出麦克风?)。
现在我的回调只是使用一个空的AudioBufferList *来激活,这显然是不正确的。我应该如何设置它以及我做错了什么?
代码如下:
CheckError(NewAUGraph(&audioGraph), @"Creating graph");
CheckError(AUGraphOpen(audioGraph), @"Opening graph");
AUNode micNode, converterNode;
AudioUnit micUnit, converterUnit;
makeMic(&audioGraph, &micNode, &micUnit);
// get the Input/inputBus's stream description
UInt32 sizeASBD = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription hwASBDin;
AudioUnitGetProperty(micUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kInputBus,
&hwASBDin,
&sizeASBD);
makeConverter(&audioGraph, &converterNode, &converterUnit, hwASBDin);
// connect mic output to converterNode
CheckError(AUGraphConnectNodeInput(audioGraph, micNode, 1, converterNode, 0),
@"Connecting mic to converter");
// set callback on the output? maybe?
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = audioCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
CheckError(AudioUnitSetProperty(micUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct)),
@"Setting callback");
CheckError(AUGraphInitialize(audioGraph), @"AUGraphInitialize");
// activate audio session
NSError *err = nil;
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
if (![audioSession setActive:YES error:&err]){
[self error:[NSString stringWithFormat:@"Couldn't activate audio session: %@", err]];
}
CheckError(AUGraphStart(audioGraph), @"AUGraphStart");
和
void makeMic(AUGraph *graph, AUNode *micNode, AudioUnit *micUnit) {
AudioComponentDescription inputDesc;
inputDesc.componentType = kAudioUnitType_Output;
inputDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
inputDesc.componentFlags = 0;
inputDesc.componentFlagsMask = 0;
inputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
CheckError(AUGraphAddNode(*graph, &inputDesc, micNode),
@"Adding mic node");
CheckError(AUGraphNodeInfo(*graph, *micNode, 0, micUnit),
@"Getting mic unit");
// enable microphone for recording
UInt32 flagOn = 1; // enable value
CheckError(AudioUnitSetProperty(*micUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flagOn,
sizeof(flagOn)),
@"Enabling microphone");
}
和
void makeConverter(AUGraph *graph, AUNode *converterNode, AudioUnit *converterUnit, AudioStreamBasicDescription inFormat) {
AudioComponentDescription sampleConverterDesc;
sampleConverterDesc.componentType = kAudioUnitType_FormatConverter;
sampleConverterDesc.componentSubType = kAudioUnitSubType_AUConverter;
sampleConverterDesc.componentFlags = 0;
sampleConverterDesc.componentFlagsMask = 0;
sampleConverterDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
CheckError(AUGraphAddNode(*graph, &sampleConverterDesc, converterNode),
@"Adding converter node");
CheckError(AUGraphNodeInfo(*graph, *converterNode, 0, converterUnit),
@"Getting converter unit");
// describe desired output format
AudioStreamBasicDescription convertedFormat;
convertedFormat.mSampleRate = 16000.0;
convertedFormat.mFormatID = kAudioFormatLinearPCM;
convertedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
convertedFormat.mFramesPerPacket = 1;
convertedFormat.mChannelsPerFrame = 1;
convertedFormat.mBitsPerChannel = 16;
convertedFormat.mBytesPerPacket = 2;
convertedFormat.mBytesPerFrame = 2;
// set format descriptions
CheckError(AudioUnitSetProperty(*converterUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0, // should be the only bus #
&inFormat,
sizeof(inFormat)),
@"Setting format of converter input");
CheckError(AudioUnitSetProperty(*converterUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0, // should be the only bus #
&convertedFormat,
sizeof(convertedFormat)),
@"Setting format of converter output");
}
答案 0 :(得分:1)
渲染回调用作音频单元的源。如果在remoteIO设备上设置kAudioOutputUnitProperty_SetInputCallback属性,则必须从您提供的回调中调用AudioUnitRender,然后您必须手动执行采样率转换,这很难看。
有一个"更容易"办法。 remoteIO充当两个单元,输入(麦克风)和输出(扬声器)。使用remoteIO创建图形,然后使用所需格式将麦克风连接到扬声器。然后,您可以使用renderNotify回调获取数据,该回调充当"点击"。
我创建了一个ViewController类来演示
#import "ViewController.h"
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
@implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
//Set your audio session to allow recording
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:NULL];
[audioSession setActive:1 error:NULL];
//Create graph and units
AUGraph graph = NULL;
NewAUGraph(&graph);
AUNode ioNode;
AudioUnit ioUnit = NULL;
AudioComponentDescription ioDescription = {0};
ioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioDescription.componentType = kAudioUnitType_Output;
ioDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
AUGraphAddNode(graph, &ioDescription, &ioNode);
AUGraphOpen(graph);
AUGraphNodeInfo(graph, ioNode, NULL, &ioUnit);
UInt32 enable = 1;
AudioUnitSetProperty(ioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input,1,&enable,sizeof(enable));
//Set the output of the ioUnit's input bus, and the input of it's output bus to the desired format.
//Core audio basically has implicite converters that we're taking advantage of.
AudioStreamBasicDescription asbd = {0};
asbd.mSampleRate = 16000.0;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd.mFramesPerPacket = 1;
asbd.mChannelsPerFrame = 1;
asbd.mBitsPerChannel = 16;
asbd.mBytesPerPacket = 2;
asbd.mBytesPerFrame = 2;
AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &asbd, sizeof(asbd));
AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(asbd));
//Connect output of the remoteIO's input bus to the input of it's output bus
AUGraphConnectNodeInput(graph, ioNode, 1, ioNode, 0);
//Add a render notify with a bridged reference to self (If using ARC)
AudioUnitAddRenderNotify(ioUnit, renderNotify, (__bridge void *)self);
//Start graph
AUGraphInitialize(graph);
AUGraphStart(graph);
CAShow(graph);
}
OSStatus renderNotify(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
//Filter anything that isn't a post render call on the input bus
if (*ioActionFlags != kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}
//Get a reference to self
ViewController *self = (__bridge ViewController *)inRefCon;
//Do stuff with audio
//Optionally mute the audio by setting it to zero;
for (int i = 0; i < ioData->mNumberBuffers; i++) {
memset(ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize);
}
return noErr;
}
@end