当麦克风输入音量/ DB低时关闭麦克风

时间:2014-10-22 08:03:39

标签: ios objective-c iphone audio fft

我正在使用这个实时音高检测程序: https://github.com/fotock/PitchDetectorExample/tree/1c68491f9c9bff2e851f5711c47e1efe4092f4de

为了我的目的,它运作良好;它有一个频率标签,当你唱一个音高时,标签会记录一个频率,当你唱一个更高的音高时,频率标签会增加。

问题是当我没有在麦克风上唱歌时,频率标签仍会记录一个频率,通常在70Hz左右,但有时即使麦克风关闭也会跳到200Hz。

当音量/数据库足够响亮时,有没有办法让麦克风仅打开?仅在麦克风接收预设幅度时触发的事件侦听器。基本上,我需要一个音频门,如果DB很低,只有线路噪声,那么话筒就会关闭。

以下是来自上述应用程序的音高检测代码 - 未删节。我试图无效,将vDSP代码添加到此代码并读取输入频率的幅度以打开和关闭麦克风。

PitchDetector.m

#import "PitchDetector.h"
#import <Accelerate/Accelerate.h>

#define PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(v)  ([[[UIDevice currentDevice] systemVersion] compare:v options:NSNumericSearch] != NSOrderedAscending)

@implementation PitchDetector
@synthesize lowBoundFrequency, hiBoundFrequency, sampleRate, delegate, running;

#pragma mark Initialize Methods


-(id) initWithSampleRate: (float) rate andDelegate: (id<PitchDetectorDelegate>) initDelegate {
    return [self initWithSampleRate:rate lowBoundFreq:40 hiBoundFreq:4500 andDelegate:initDelegate];
}

-(id) initWithSampleRate: (float) rate lowBoundFreq: (int) low hiBoundFreq: (int) hi andDelegate: (id<PitchDetectorDelegate>) initDelegate {
    self.lowBoundFrequency = low;
    self.hiBoundFrequency = hi;
    self.sampleRate = rate;
    self.delegate = initDelegate;

    bufferLength = self.sampleRate/self.lowBoundFrequency;    


    hann = (float*) malloc(sizeof(float)*bufferLength);
    vDSP_hann_window(hann, bufferLength, vDSP_HANN_NORM);

    sampleBuffer = (SInt16*) malloc(512);
    samplesInSampleBuffer = 0;

    result = (float*) malloc(sizeof(float)*bufferLength);

    return self;
}

#pragma  mark Insert Samples

- (void) addSamples:(SInt16 *)samples inNumberFrames:(int)frames {
    int newLength = frames;
    if(samplesInSampleBuffer>0) {
        newLength += samplesInSampleBuffer;
    }

    SInt16 *newBuffer = (SInt16*) malloc(sizeof(SInt16)*newLength);
    memcpy(newBuffer, sampleBuffer, samplesInSampleBuffer*sizeof(SInt16));
    memcpy(&newBuffer[samplesInSampleBuffer], samples, frames*sizeof(SInt16));

    free(sampleBuffer);
    sampleBuffer = newBuffer;
    samplesInSampleBuffer = newLength;

    if(samplesInSampleBuffer>(self.sampleRate/self.lowBoundFrequency)) {
        if(!self.running) {
            [self performSelectorInBackground:@selector(performWithNumFrames:) withObject:[NSNumber numberWithInt:newLength]];
            self.running = YES;
        }
        samplesInSampleBuffer = 0;
    } else {
        //printf("NOT ENOUGH SAMPLES: %d\n", newLength);
    }
}


#pragma mark Perform Auto Correlation

-(void) performWithNumFrames: (NSNumber*) numFrames;
{
    int n = numFrames.intValue; 
    float freq = 0;

    SInt16 *samples;
    if (PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(@"7.1")) {
        @synchronized(self) {
            samples = malloc(sizeof(SInt16)*numFrames.intValue);
            memcpy(&samples, &sampleBuffer, sizeof(samples));
        }
    } else {
        samples = sampleBuffer;
    }

    int returnIndex = 0;
    float sum;
    bool goingUp = false;
    float normalize = 0;

    for(int i = 0; i<n; i++) {
        sum = 0;
        for(int j = 0; j<n; j++) {
            sum += (samples[j]*samples[j+i])*hann[j];
        }
        if(i ==0 ) normalize = sum;
        result[i] = sum/normalize;
    }


    for(int i = 0; i<n-8; i++) {
        if(result[i]<0) {
            i+=2; // no peaks below 0, skip forward at a faster rate
        } else {
            if(result[i]>result[i-1] && goingUp == false && i >1) {

                //local min at i-1

                goingUp = true;

            } else if(goingUp == true && result[i]<result[i-1]) {

                //local max at i-1

                if(returnIndex==0 && result[i-1]>result[0]*0.95) {
                    returnIndex = i-1;
                    break; 
                    //############### NOTE ##################################
                    // My implemenation breaks out of this loop when it finds the first peak.
                    // This is (probably) the greatest source of error, so if you would like to
                    // improve this algorithm, start here. the next else if() will trigger on 
                    // future local maxima (if you first take out the break; above this paragraph)
                    //#######################################################
                } else if(result[i-1]>result[0]*0.85) {
                }
                goingUp = false;
            }       
        }
    }

    freq =self.sampleRate/interp(result[returnIndex-1], result[returnIndex], result[returnIndex+1], returnIndex);
    if(freq >= self.lowBoundFrequency && freq <= self.hiBoundFrequency) {
        dispatch_async(dispatch_get_main_queue(), ^{
            [delegate updatedPitch:freq];
        }); 
    }
    self.running = NO;
}


float interp(float y1, float y2, float y3, int k);
float interp(float y1, float y2, float y3, int k) {

    float d, kp;
    d = (y3 - y1) / (2 * (2 * y2 - y1 - y3));
    //printf("%f = %d + %f\n", k+d, k, d);
    kp  =  k + d;
    return kp;
}
@end

这是初学化麦克风的AudioControll.m类 - 未删节。看看根据数据库打开和关闭麦克风的其他例子,我尝试了许多框架,如音频队列服务和加速框架,但无济于事。

AudioControll.m

#import "AudioController.h"

#define kOutputBus 0
#define kInputBus 1

@implementation AudioController
@synthesize rioUnit, audioFormat, delegate;


+ (AudioController *) sharedAudioManager
{
    static AudioController *sharedAudioManager;

    @synchronized(self)
    {
        if (!sharedAudioManager) {
            sharedAudioManager = [[AudioController alloc] init];
            [sharedAudioManager startAudio];
        }
        return sharedAudioManager;
    }
}


void checkStatus(OSStatus status);
void checkStatus(OSStatus status) {
    if(status!=0)
        printf("Error: %ld\n", status);
}

#pragma mark init

- (id)init
{
    OSStatus status;
    status = AudioSessionInitialize(NULL, NULL, NULL, (__bridge void*) self);
    checkStatus(status);

    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &rioUnit);
    checkStatus(status);


    // Enable IO for recording
    UInt32 flag = 1;

    status = AudioUnitSetProperty(rioUnit,                                   
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));
    checkStatus(status);


    // Describe format
    audioFormat.mSampleRate= 44100.0;
    audioFormat.mFormatID= kAudioFormatLinearPCM;
    audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket= 1;
    audioFormat.mChannelsPerFrame= 1;
    audioFormat.mBitsPerChannel= 16;
    audioFormat.mBytesPerPacket= 2;
    audioFormat.mBytesPerFrame= 2;

    // Apply format
    status = AudioUnitSetProperty(rioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);

    status = AudioUnitSetProperty(rioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);

    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = (__bridge void*)self;

    status = AudioUnitSetProperty(rioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Global, 
                                  kInputBus, 
                                  &callbackStruct, 
                                  sizeof(callbackStruct));
    checkStatus(status);


    // Disable buffer allocation for the recorder
    flag = 0;
    status = AudioUnitSetProperty(rioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Global, kInputBus, &flag, sizeof(flag));


    // Initialise
    UInt32 category = kAudioSessionCategory_PlayAndRecord;
    status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
    checkStatus(status);

    status = 0;
    status = AudioSessionSetActive(YES);
    checkStatus(status);

    status = AudioUnitInitialize(rioUnit);
    checkStatus(status);

    return self;
}

#pragma mark Recording Callback
static OSStatus recordingCallback(void *inRefCon, 
                                  AudioUnitRenderActionFlags *ioActionFlags, 
                                  const AudioTimeStamp *inTimeStamp, 
                                  UInt32 inBusNumber, 
                                  UInt32 inNumberFrames, 
                                  AudioBufferList *ioData) {

    AudioController *THIS = (__bridge AudioController*) inRefCon;

    THIS->bufferList.mNumberBuffers = 1;
    THIS->bufferList.mBuffers[0].mDataByteSize = sizeof(SInt16)*inNumberFrames;
    THIS->bufferList.mBuffers[0].mNumberChannels = 1;
    THIS->bufferList.mBuffers[0].mData = (SInt16*) malloc(sizeof(SInt16)*inNumberFrames);

    OSStatus status;

    status = AudioUnitRender(THIS->rioUnit, 
                             ioActionFlags, 
                             inTimeStamp, 
                             inBusNumber, 
                             inNumberFrames, 
                             &(THIS->bufferList));
    checkStatus(status);

    dispatch_async(dispatch_get_main_queue(), ^{
        [THIS.delegate  receivedAudioSamples:(SInt16*)THIS->bufferList.mBuffers[0].mData length:inNumberFrames];
    }); 

    return noErr;
}



-(void) startAudio
{
    OSStatus status = AudioOutputUnitStart(rioUnit);
    checkStatus(status);
    printf("Audio Initialized - sampleRate: %f\n", audioFormat.mSampleRate);
}

@end

0 个答案:

没有答案