如何将音频单元输出的相位移动180度

时间:2015-10-12 16:55:03

标签: ios audio core-audio audiounit phase

我正在尝试从麦克风输入音频并对该输入流应用180相移并输出。

以下是我用来初始化会话和捕获音频的代码(采样率设置为44.1 KHz)

OSStatus status = noErr;

status = AudioSessionSetActive(true);
assert(status == noErr);

UInt32 category = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(UInt32), &category);
assert(status == noErr);

float aBufferLength = 0.002902; // In seconds


status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
                                 sizeof(aBufferLength), &aBufferLength);

assert(status == noErr);

AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// get AU component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

// create audio unit by component
status = AudioComponentInstanceNew(inputComponent, &_audioState->audioUnit);
assert(status == noErr);

// record io on the input bus
UInt32 flag = 1;
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Input,
                              1, /*input*/
                              &flag,
                              sizeof(flag));
assert(status == noErr);

// play on io on the output bus
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Output,
                              0, /*output*/
                              &flag,
                              sizeof(flag));

assert(status == noErr);


// Fetch sample rate, in case we didn't get quite what we requested
Float64 achievedSampleRate;
UInt32 size = sizeof(achievedSampleRate);
status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &achievedSampleRate);
if ( achievedSampleRate != SAMPLE_RATE ) {
    NSLog(@"Hardware sample rate is %f", achievedSampleRate);
} else {
    achievedSampleRate = SAMPLE_RATE;
    NSLog(@"Hardware sample rate is %f", achievedSampleRate);
}


// specify stream format for recording
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = achievedSampleRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;

// set the format on the output stream
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Output,
                              kInputBus,
                              &audioFormat,
                              sizeof(audioFormat));

assert(status == noErr);

// set the format on the input stream
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Input,
                              kOutputBus,
                              &audioFormat,
                              sizeof(audioFormat));
assert(status == noErr);

AURenderCallbackStruct callbackStruct;
memset(&callbackStruct, 0, sizeof(AURenderCallbackStruct));
callbackStruct.inputProc = RenderCallback;
callbackStruct.inputProcRefCon = _audioState;

// set input callback
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioOutputUnitProperty_SetInputCallback,
                              kAudioUnitScope_Global,
                              kInputBus,
                              &callbackStruct,
                              sizeof(callbackStruct));
assert(status == noErr);

callbackStruct.inputProc = PlaybackCallback;
callbackStruct.inputProcRefCon = _audioState;

// set Render callback for output
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_SetRenderCallback,
                              kAudioUnitScope_Global,
                              kOutputBus,
                              &callbackStruct,
                              sizeof(callbackStruct));
assert(status == noErr);

flag = 0;

// allocate render buffer
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_ShouldAllocateBuffer,
                              kAudioUnitScope_Output,
                              kInputBus,
                              &flag,
                              sizeof(flag));
assert(status == noErr);

_audioState->audioBuffer.mNumberChannels = 1;
_audioState->audioBuffer.mDataByteSize = 256 * 2;
_audioState->audioBuffer.mData = malloc(256 * 2);

// initialize the audio unit
status = AudioUnitInitialize(_audioState->audioUnit);
assert(status == noErr);
}

有没有人知道如何改变阶段以创造破坏性的正弦波?我听说过使用vDSP进行带通滤波,但我不确定...

1 个答案:

答案 0 :(得分:1)

除非你知道从麦克风到输入缓冲器的延迟,从输出缓冲器到扬声器的延迟,你想要取消的频率,以及一些知道这些频率在那段时间内是静止的,你可以'为了消除目的,可靠地产生180度相移。相反,您将尝试取消之前十几毫秒或更长时间内发生的声音,如果此时频率发生变化,您最终可能会添加声音而不是取消声音。此外,如果声源,扬声器源和收听者之间的距离改变了足够大的波长部分,则扬声器输出可能最终使声源的响度加倍,而不是取消它。对于1 kHz的声音,这是一个6英寸的运动。

有源噪声消除需要非常准确地了解输入到输出时间滞后;包括麦克风,输入滤波器和扬声器响应以及ADC / DAC延迟。 Apple没有指定这些,并且它们很可能在iOS设备型号之间有所不同。

鉴于对内到外延迟的准确了解以及对源信号频率的准确分析(通过FFT),可能需要在每个频率上超过180度的一些相移以尝试取消固定源