我有一个8khz的样本缓冲区,我试图简单地将一个低通滤波器应用于缓冲区。意思是,我从一个8khz样本的缓冲区开始,我想最终得到8khz LOWPASSED样本的缓冲区。如果我连接一个低通单元并将其与默认输出单元连接并提供我的缓冲区,它听起来很完美并且正常低通过。但是,只要我删除输出并直接在低通音频单元上调用AudioUnitRender,结果样本就会被别名化并被剪裁。
#import "EffectMachine.h"
#import <AudioToolbox/AudioToolbox.h>
#import "AudioHelpers.h"
#import "Buffer.h"
@interface EffectMachine ()
@property (nonatomic, strong) Buffer *buffer;
@end
typedef struct EffectPlayer {
NSUInteger index;
AudioUnit lowPassUnit;
__unsafe_unretained Buffer *buffer;
} EffectPlayer;
OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData);
OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
struct EffectPlayer *player = (struct EffectPlayer *)inRefCon;
for (int i = 0; i < inNumberFrames; i++) {
float sample;
if (player->index < player->buffer.size) {
sample = (float)player->buffer.samples[player->index];
player->index += 1;
} else {
sample = 0;
}
((float *)ioData->mBuffers[0].mData)[i] = sample;
((float *)ioData->mBuffers[1].mData)[i] = sample;
}
return noErr;
}
@implementation EffectMachine {
EffectPlayer player;
}
-(instancetype)initWithBuffer:(Buffer *)buffer {
if (self = [super init]) {
self.buffer = buffer;
}
return self;
}
-(Buffer *)process {
struct EffectPlayer initialized = {0};
player = initialized;
player.buffer = self.buffer;
[self setupAudioUnits];
Buffer *buffer = [self processedBuffer];
[self cleanup];
return buffer;
}
-(void)setupAudioUnits {
AudioComponentDescription lowpasscd = {0};
lowpasscd.componentType = kAudioUnitType_Effect;
lowpasscd.componentSubType = kAudioUnitSubType_LowPassFilter;
lowpasscd.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent comp = AudioComponentFindNext(NULL, &lowpasscd);
if (comp == NULL) NSLog(@"can't get lowpass unit");
AudioComponentInstanceNew(comp, &player.lowPassUnit);
AURenderCallbackStruct input;
input.inputProc = EffectMachineCallbackRenderProc;
input.inputProcRefCon = &player;
CheckError(AudioUnitSetProperty(player.lowPassUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty for callback failed");
CheckError(AudioUnitSetParameter(player.lowPassUnit,
kLowPassParam_CutoffFrequency,
kAudioUnitScope_Global,
0,
1500,
0), "AudioUnitSetParameter cutoff for lowpass failed");
CheckError(AudioUnitSetParameter(player.lowPassUnit,
kLowPassParam_Resonance,
kAudioUnitScope_Global,
0,
0,
0), "AudioUnitSetParameter resonance for lowpass failed");
CheckError(AudioUnitInitialize(player.lowPassUnit),
"Couldn't initialize lowpass unit");
}
-(Buffer *)processedBuffer {
AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList));
UInt32 blockSize = 1024;
float *left = malloc(sizeof(float) * blockSize);
float *right = malloc(sizeof(float) * blockSize);
bufferlist->mBuffers[0].mData = left;
bufferlist->mBuffers[1].mData = right;
UInt32 size = sizeof(float) * blockSize;
AudioTimeStamp inTimeStamp;
memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
inTimeStamp.mSampleTime = 0;
AudioUnitRenderActionFlags flag = 0;
NSUInteger length = ceil(self.buffer.size / (float)blockSize);
double *processed = malloc(sizeof(double) * blockSize * length);
for (int i = 0; i < length; i++) {
bufferlist->mBuffers[0].mDataByteSize = size;
bufferlist->mBuffers[1].mDataByteSize = size;
bufferlist->mNumberBuffers = 2;
inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
AudioUnitRender(player.lowPassUnit, &flag, &inTimeStamp, 0, blockSize, bufferlist);
for (NSUInteger j = 0; j < blockSize; j++) {
processed[j + (blockSize * i)] = left[j];
}
inTimeStamp.mSampleTime += blockSize;
}
Buffer *buffer = [[Buffer alloc] initWithSamples:processed size:self.buffer.size sampleRate:self.buffer.sampleRate];
free(bufferlist);
free(left);
free(right);
free(processed);
return buffer;
}
-(void)cleanup {
AudioOutputUnitStop(player.lowPassUnit);
AudioUnitUninitialize(player.lowPassUnit);
AudioComponentInstanceDispose(player.lowPassUnit);
}
@end
如果我添加一个通用输出并尝试在其输入上设置一个8khz ASBD,那么我只是输出垃圾噪声..它看起来像,0,0,0,0,0,17438231945853048031929171968.000000,0,0, 0,-2548199532257382185315640279040.000000 ...哎呀!
我尝试将ASBD添加到低通单元的输入和输出,给它一个8khz的采样率属性,它没有做任何事情。我尝试在之前,之后和之后添加转换器单元(ASBD设置为8khz)然后在低通滤波器之前和之后(在链中),这也不起作用。
作为一个附带问题,我的缓冲区是单声道8khz样本,如果我让我的缓冲区列表将mNumberBuffers设置为1,那么我的低通输入渲染过程永远不会被调用...有没有办法不必使用立体声频道?
答案 0 :(得分:0)
我在两端都使用转换器,ASBD设置为8000采样单声道浮点数,用于输入转换器和输出转换器输出,同时使用44100.0立体声输入和输出低通单元,并在末端转换器上调用AudioUnitRender没有用于离线渲染的io单元。对于在线渲染,我在io单元之前放置了一个转换器单元,因此渲染回调将从8K的缓冲区拉出来进行回放。似乎输出ASBD上的较低采样率要求每个切片具有更高的最大帧数和更小的切片(AudioUnitRender inNumberFrames),这就是它不会渲染的原因。
#import "ViewController.h"
#import <AudioToolbox/AudioToolbox.h>
@implementation ViewController{
int sampleCount;
int renderBufferHead;
float *renderBuffer;
}
- (void)viewDidLoad {
[super viewDidLoad];
float sampleRate = 8000;
int bufferSeconds = 3;
sampleCount = sampleRate * bufferSeconds;//seconds
float *originalSaw = generateSawWaveBuffer(440, sampleRate, sampleCount);
renderBuffer = originalSaw;
renderBufferHead = 0;
AURenderCallbackStruct cbStruct = {renderCallback,(__bridge void *)self};
//this will do offline render using the render callback, callback just reads from renderBuffer at samplerate
float *processedBuffer = offlineRender(sampleCount, sampleRate, &cbStruct);
renderBufferHead = 0;//rewind render buffer after processing
//set up audio units to do live render using the render callback at sample rate then self destruct after delay
//it will play originalSaw for bufferSeconds, then after delay will switch renderBuffer to point at processedBuffer
float secondsToPlayAudio = (bufferSeconds + 1) * 2;
onlineRender(sampleRate, &cbStruct,secondsToPlayAudio);
//wait for original to finish playing, then change render callback source buffer to processed buffer
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)((secondsToPlayAudio / 2) * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
renderBuffer = processedBuffer;
renderBufferHead = 0;//rewind render buffer
});
//destroy after all rendering done
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(secondsToPlayAudio * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
free(originalSaw);
free(processedBuffer);
});
}
float * offlineRender(int count, double sampleRate, AURenderCallbackStruct *cbStruct){
AudioComponentInstance inConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioComponentInstance lowPass = getComponentInstance(kAudioUnitType_Effect, kAudioUnitSubType_LowPassFilter);
AudioComponentInstance outConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
AudioUnitSetProperty(inConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(outConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(inConverter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));
formatAndConnect(inConverter, lowPass);
formatAndConnect(lowPass, outConverter);
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(inConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitSetProperty(lowPass, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitSetProperty(outConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitInitialize(inConverter);
AudioUnitInitialize(lowPass);
AudioUnitInitialize(outConverter);
AudioUnitSetParameter(lowPass, kLowPassParam_CutoffFrequency, kAudioUnitScope_Global, 0, 500, 0);
AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList) + sizeof(AudioBufferList));//stereo bufferlist + sizeof(AudioBuffer)
float *left = malloc(sizeof(float) * 4096);
bufferlist->mBuffers[0].mData = left;
bufferlist->mNumberBuffers = 1;
AudioTimeStamp inTimeStamp;
memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
inTimeStamp.mSampleTime = 0;
float *buffer = malloc(sizeof(float) * count);
int inNumberframes = 512;
AudioUnitRenderActionFlags flag = 0;
int framesRead = 0;
while (count){
inNumberframes = MIN(inNumberframes, count);
bufferlist->mBuffers[0].mDataByteSize = sizeof(float) * inNumberframes;
printf("Offline Render %i frames\n",inNumberframes);
AudioUnitRender(outConverter, &flag, &inTimeStamp, 0, inNumberframes, bufferlist);
memcpy(buffer + framesRead, left, sizeof(float) * inNumberframes);
inTimeStamp.mSampleTime += inNumberframes;
count -= inNumberframes;
framesRead += inNumberframes;
}
free(left);
// free(right);
free(bufferlist);
AudioUnitUninitialize(inConverter);
AudioUnitUninitialize(lowPass);
AudioUnitUninitialize(outConverter);
return buffer;
}
OSStatus renderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData){
ViewController *self = (__bridge ViewController*)inRefCon;
float *left = ioData->mBuffers[0].mData;
for (int i = 0; i < inNumberFrames; i++) {
if (self->renderBufferHead >= self->sampleCount) {
left[i] = 0;
}
else{
left[i] = self->renderBuffer[self->renderBufferHead++];
}
}
if(ioData->mNumberBuffers == 2){
memcpy(ioData->mBuffers[1].mData, left, sizeof(float) * inNumberFrames);
}
printf("render %f to %f\n",inTimeStamp->mSampleTime,inTimeStamp->mSampleTime + inNumberFrames);
return noErr;
}
void onlineRender(double sampleRate, AURenderCallbackStruct *cbStruct,float duration){
AudioComponentInstance converter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioComponentInstance ioUnit = getComponentInstance(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput);
AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
AudioUnitSetProperty(converter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(converter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));
formatAndConnect(converter, ioUnit);
AudioUnitInitialize(converter);
AudioUnitInitialize(ioUnit);
AudioOutputUnitStart(ioUnit);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(duration * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
AudioOutputUnitStop(ioUnit);
AudioUnitUninitialize(ioUnit);
AudioUnitUninitialize(converter);
});
}
float * generateSawWaveBuffer(float frequency,float sampleRate, int sampleCount){
float *buffer = malloc(sizeof(float) * sampleCount);
float increment = (frequency / sampleRate) * 2;
int increasing = 1;
float sample = 0;
for (int i = 0; i < sampleCount; i++) {
if (increasing) {
sample += increment;
if (sample >= 1) {
increasing = 0;
}
}
else{
sample -= increment;
if (sample < -1) {
increasing = 1;
}
}
buffer[i] = sample;
}
return buffer;
}
AudioComponentInstance getComponentInstance(OSType type,OSType subType){
AudioComponentDescription desc = {0};
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentSubType = subType;
desc.componentType = type;
AudioComponent ioComponent = AudioComponentFindNext(NULL, &desc);
AudioComponentInstance unit;
AudioComponentInstanceNew(ioComponent, &unit);
return unit;
}
AudioStreamBasicDescription getMonoFloatASBD(double sampleRate){
AudioStreamBasicDescription asbd = {0};
asbd.mSampleRate = sampleRate;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked;
asbd.mFramesPerPacket = 1;
asbd.mChannelsPerFrame = 1;
asbd.mBitsPerChannel = 32;
asbd.mBytesPerPacket = 4;
asbd.mBytesPerFrame = 4;
return asbd;
}
void formatAndConnect(AudioComponentInstance src,AudioComponentInstance dst){
AudioStreamBasicDescription asbd;
UInt32 propsize = sizeof(AudioStreamBasicDescription);
AudioUnitGetProperty(dst, kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&asbd,&propsize);
AudioUnitSetProperty(src, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitConnection connection = {0};
connection.destInputNumber = 0;
connection.sourceAudioUnit = src;
connection.sourceOutputNumber = 0;
AudioUnitSetProperty(dst, kAudioUnitProperty_MakeConnection, kAudioUnitScope_Input, 0, &connection, sizeof(AudioUnitConnection));
}
@end