AudioUnit音频发生器在每个音调结束时给我一个唧唧声

时间:2012-04-07 02:24:23

标签: objective-c ios core-audio audiounit

我正在为旧的GWBasic PLAY命令创建一个旧的学校音乐模拟器。为此,我有一个音源和音乐播放器。在播放的每个音符之间,我发出一声唧唧喳喳的声音。以下是我的两个课程:

ToneGen.h

#import <Foundation/Foundation.h>

@interface ToneGen : NSObject
@property (nonatomic) id delegate;
@property (nonatomic) double frequency;
@property (nonatomic) double sampleRate;
@property (nonatomic) double theta;
- (void)play:(float)ms;
- (void)play;
- (void)stop;
@end

ToneGen.m

#import <AudioUnit/AudioUnit.h>
#import "ToneGen.h"

OSStatus RenderTone(
                    void *inRefCon, 
                    AudioUnitRenderActionFlags  *ioActionFlags, 
                    const AudioTimeStamp        *inTimeStamp, 
                    UInt32                      inBusNumber, 
                    UInt32                      inNumberFrames, 
                    AudioBufferList             *ioData);
void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState);


@interface ToneGen()
@property (nonatomic) AudioComponentInstance toneUnit;
@property (nonatomic) NSTimer *timer;
- (void)createToneUnit;
@end


@implementation ToneGen
@synthesize toneUnit = _toneUnit;
@synthesize timer = _timer;
@synthesize delegate = _delegate;
@synthesize frequency = _frequency;
@synthesize sampleRate = _sampleRate;
@synthesize theta = _theta;

- (id) init
{
    self = [super init];
    if (self)
    {
        self.sampleRate = 44100;
        self.frequency = 1440.0f;
        return self;
    }
    return nil;
}

- (void)play:(float)ms
{
    [self play];
    self.timer = [NSTimer scheduledTimerWithTimeInterval:(ms / 100) 
                                                  target:self 
                                                selector:@selector(stop) 
                                                userInfo:nil 
                                                 repeats:NO];
    [[NSRunLoop mainRunLoop] addTimer:self.timer forMode:NSRunLoopCommonModes];
}

- (void)play
{
    if (!self.toneUnit)
    {
        [self createToneUnit];

        // Stop changing parameters on the unit
        OSErr err = AudioUnitInitialize(self.toneUnit);
        if (err)
            DLog(@"Error initializing unit");

        // Start playback
        err = AudioOutputUnitStart(self.toneUnit);
        if (err)
            DLog(@"Error starting unit");
    }
}

- (void)stop
{
    [self.timer invalidate];
    self.timer = nil;

    if (self.toneUnit)
    {
        AudioOutputUnitStop(self.toneUnit);
        AudioUnitUninitialize(self.toneUnit);
        AudioComponentInstanceDispose(self.toneUnit);
        self.toneUnit = nil;
    }

    if(self.delegate && [self.delegate respondsToSelector:@selector(toneStop)]) {
        [self.delegate performSelector:@selector(toneStop)];
    }
}

- (void)createToneUnit
{
    AudioComponentDescription defaultOutputDescription;
    defaultOutputDescription.componentType = kAudioUnitType_Output;
    defaultOutputDescription.componentSubType = kAudioUnitSubType_DefaultOutput;
    defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    defaultOutputDescription.componentFlags = 0;
    defaultOutputDescription.componentFlagsMask = 0;

    // Get the default playback output unit
    AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
    if (!defaultOutput)
        DLog(@"Can't find default output");

    // Create a new unit based on this that we'll use for output
    OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
    if (err)
        DLog(@"Error creating unit");

    // Set our tone rendering function on the unit
    AURenderCallbackStruct input;
    input.inputProc = RenderTone;
    input.inputProcRefCon = (__bridge void*)self;
    err = AudioUnitSetProperty(self.toneUnit, 
                               kAudioUnitProperty_SetRenderCallback, 
                               kAudioUnitScope_Input,
                               0, 
                               &input, 
                               sizeof(input));
    if (err)
        DLog(@"Error setting callback");

    // Set the format to 32 bit, single channel, floating point, linear PCM
    const int four_bytes_per_float = 4;
    const int eight_bits_per_byte = 8;
    AudioStreamBasicDescription streamFormat;
    streamFormat.mSampleRate = self.sampleRate;
    streamFormat.mFormatID = kAudioFormatLinearPCM;
    streamFormat.mFormatFlags =
    kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
    streamFormat.mBytesPerPacket = four_bytes_per_float;
    streamFormat.mFramesPerPacket = 1;  
    streamFormat.mBytesPerFrame = four_bytes_per_float;     
    streamFormat.mChannelsPerFrame = 1; 
    streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
    err = AudioUnitSetProperty (self.toneUnit,
                                kAudioUnitProperty_StreamFormat,
                                kAudioUnitScope_Input,
                                0,
                                &streamFormat,
                                sizeof(AudioStreamBasicDescription));
    if (err)
        DLog(@"Error setting stream format");
}

@end


OSStatus RenderTone(
                    void *inRefCon, 
                    AudioUnitRenderActionFlags  *ioActionFlags, 
                    const AudioTimeStamp        *inTimeStamp, 
                    UInt32                      inBusNumber, 
                    UInt32                      inNumberFrames, 
                    AudioBufferList             *ioData)

{
    // Fixed amplitude is good enough for our purposes
    const double amplitude = 0.25;

    // Get the tone parameters out of the view controller
    ToneGen *toneGen = (__bridge ToneGen *)inRefCon;
    double theta = toneGen.theta;
    double theta_increment = 2.0 * M_PI * toneGen.frequency / toneGen.sampleRate;

    // This is a mono tone generator so we only need the first buffer
    const int channel = 0;
    Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

    // Generate the samples
    for (UInt32 frame = 0; frame < inNumberFrames; frame++) 
    {
        buffer[frame] = sin(theta) * amplitude;

        theta += theta_increment;
        if (theta > 2.0 * M_PI)
        {
            theta -= 2.0 * M_PI;
        }
    }

    // Store the theta back in the view controller
    toneGen.theta = theta;

    return noErr;
}

void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState)
{
    ToneGen *toneGen = (__bridge ToneGen *)inClientData;
    [toneGen stop];
}

Music.h

#import <Foundation/Foundation.h>

@interface Music : NSObject
- (void) play:(NSString *)music;
- (void) stop;
@end

Music.m

#import "Music.h"
#import "ToneGen.h"

@interface Music()
@property (nonatomic, readonly) ToneGen *toneGen;
@property (nonatomic, assign) int octive;
@property (nonatomic, assign) int tempo;
@property (nonatomic, assign) int length;

@property (nonatomic, strong) NSData *music;
@property (nonatomic, assign) int dataPos;
@property (nonatomic, assign) BOOL isPlaying;

- (void)playNote;
@end

@implementation Music
@synthesize toneGen = _toneGen;
- (ToneGen*)toneGen
{
    if (_toneGen == nil)
    {
        _toneGen = [[ToneGen alloc] init];
        _toneGen.delegate = self;
    }
    return _toneGen;
}
@synthesize octive = _octive;
- (void)setOctive:(int)octive
{
    // Sinity Check
    if (octive < 0)
        octive = 0;
    if (octive > 6)
        octive = 6;
    _octive = octive;
}
@synthesize tempo = _tempo;
- (void)setTempo:(int)tempo
{
    // Sinity Check
    if (tempo < 30)
        tempo = 30;
    if (tempo > 255)
        tempo = 255;
    _tempo = tempo;
}
@synthesize length = _length;
- (void)setLength:(int)length
{
    // Sinity Check
    if (length < 1)
        length = 1;
    if (length > 64)
        length = 64;
    _length = length;
}
@synthesize music = _music;
@synthesize dataPos = _dataPos;
@synthesize isPlaying = _isPlaying;


- (id)init
{
    self = [super init];
    if (self)
    {
        self.octive = 4;
        self.tempo = 120;
        self.length = 1;
        return self;
    }
    return nil;
}

- (void) play:(NSString *)music
{
    DLog(@"%@", music);
    self.music = [[music stringByReplacingOccurrencesOfString:@"+" withString:@"#"]
                  dataUsingEncoding: NSASCIIStringEncoding];
    self.dataPos = 0;
    self.isPlaying = YES;
    [self playNote];
}

- (void)stop
{
    self.isPlaying = NO;
}

- (void)playNote
{
    if (!self.isPlaying)
        return;

    if (self.dataPos > self.music.length || self.music.length == 0) {
        self.isPlaying = NO;
        return;
    }

    unsigned char *data = (unsigned char*)[self.music bytes];
    unsigned int code = (unsigned int)data[self.dataPos];
    self.dataPos++;

    switch (code) {
        case 65: // A
        case 66: // B
        case 67: // C
        case 68: // D
        case 69: // E
        case 70: // F
        case 71: // G
            {
                // Peak at the next char to look for sharp or flat
                bool sharp = NO;
                bool flat = NO;
                if (self.dataPos < self.music.length) {
                    unsigned int peak = (unsigned int)data[self.dataPos];
                    if (peak == 35) // #
                    {
                        self.dataPos++;
                        sharp = YES;
                    }
                    else if (peak == 45)  // -
                    {
                        self.dataPos++;
                        flat = YES;
                    }
                }

                // Peak ahead for a length changes
                bool look = YES;
                int count = 0;
                int newLength = 0;
                while (self.dataPos < self.music.length && look) {
                    unsigned int peak = (unsigned int)data[self.dataPos];
                    if (peak >= 48 && peak <= 57)
                    {
                        peak -= 48;
                        int n = (count * 10);
                        if (n == 0) { n = 1; }
                        newLength += peak * n;
                        self.dataPos++;
                    } else {
                        look = NO;
                    }
                }

                // Pick the note length
                int length = self.length;
                if (newLength != 0)
                {
                    DLog(@"InlineLength: %d", newLength);
                    length = newLength;
                }


                // Create the note string
                NSString *note = [NSString stringWithFormat:@"%c", code];
                if (sharp)
                    note = [note stringByAppendingFormat:@"#"];
                else if (flat)
                    note = [note stringByAppendingFormat:@"-"];

                // Set the tone generator freq
                [self setFreq:[self getNoteNumber:note]];

                // Play the note
                [self.toneGen play:(self.tempo / length)];
            }
            break;

        case 76: // L (length)
        {
            bool look = YES;
            int newLength = 0;
            while (self.dataPos < self.music.length && look) {
                unsigned int peak = (unsigned int)data[self.dataPos];
                if (peak >= 48 && peak <= 57)
                {
                    peak -= 48;
                    newLength = newLength * 10 + peak;
                    self.dataPos++;
                } else {
                    look = NO;
                }
            }
            self.length = newLength;
            DLog(@"Length: %d", self.length);
            [self playNote];
        }
            break;

        case 79: // O (octive)
            {
                bool look = YES;
                int newOctive = 0;
                while (self.dataPos < self.music.length && look) {
                    unsigned int peak = (unsigned int)data[self.dataPos];
                    if (peak >= 48 && peak <= 57)
                    {
                        peak -= 48;
                        newOctive = newOctive * 10 + peak;
                        self.dataPos++;
                    } else {
                        look = NO;
                    }
                }
                self.octive = newOctive;
                DLog(@"Octive: %d", self.self.octive);
                [self playNote];
            }
            break;

        case 84: // T (tempo)
            {
                bool look = YES;
                int newTempo = 0;
                while (self.dataPos < self.music.length && look) {
                    unsigned int peak = (unsigned int)data[self.dataPos];
                    if (peak >= 48 && peak <= 57)
                    {
                        peak -= 48;
                        newTempo = newTempo * 10 + peak;
                        self.dataPos++;
                    } else {
                        look = NO;
                    }
                }
                self.tempo = newTempo;
                DLog(@"Tempo: %d", self.self.tempo);
                [self playNote];
            }
            break;

        default:
            [self playNote];
            break;
    }
}


- (int)getNoteNumber:(NSString*)note
{
    note = [note uppercaseString];
    DLog(@"%@", note);

    if ([note isEqualToString:@"A"])
        return 0;
    else if ([note isEqualToString:@"A#"] || [note isEqualToString:@"B-"])
        return 1;
    else if ([note isEqualToString:@"B"] || [note isEqualToString:@"C-"])
        return 2;
    else if ([note isEqualToString:@"C"] || [note isEqualToString:@"B#"])
        return 3;
    else if ([note isEqualToString:@"C#"] || [note isEqualToString:@"D-"])
        return 4;
    else if ([note isEqualToString:@"D"])
        return 5;
    else if ([note isEqualToString:@"D#"] || [note isEqualToString:@"E-"])
        return 6;
    else if ([note isEqualToString:@"E"] || [note isEqualToString:@"F-"])
        return 7;
    else if ([note isEqualToString:@"F"] || [note isEqualToString:@"E#"])
        return 8;
    else if ([note isEqualToString:@"F#"] || [note isEqualToString:@"G-"])
        return 9;
    else if ([note isEqualToString:@"G"])
        return 10;
    else if ([note isEqualToString:@"G#"])
        return 11;
}

- (void)setFreq:(int)note
{
    float a = powf(2, self.octive);
    float b = powf(1.059463, note);
    float freq = roundf((275.0 * a * b) / 10);
    self.toneGen.frequency = freq;
}

- (void)toneStop
{
    [self playNote];
}

@end

要玩小tune,请创建一个Music对象并播放...

[self.music play:@"T180 DF#A L2 A L4 O4 AA P4 F#F# P4 O3 D DF#A L2 A L4 O4 AA P4 GG P4 O3 C#C#EB L2 B L4 O4 BB P4 GG P4 O3 C#C#EB L2 B L4 O4 BB P4 F+F+ P4 O3 DDF#A L2 O4 D L4 O5 DD P4O4 AA P4 O3 DDF#A L2 O4 D L4 O5 DD P4O4 BB P4 EEG L8 B P8 ML B1 L4 MN G#A ML L3 O5 F#1L4 MN D O4 F# ML L2 F# MN L4 E ML L2 B MN L4 AD P8 D8 D4"];

有关如何消除音符之间啁啾的想法吗?

2 个答案:

答案 0 :(得分:3)

我认为在音符之间停止音频输出的位是罪魁祸首:

if (self.toneUnit)
{
    AudioOutputUnitStop(self.toneUnit);
    AudioUnitUninitialize(self.toneUnit);
    AudioComponentInstanceDispose(self.toneUnit);
    self.toneUnit = nil;
}

只需让音调单元保持活动状态,您就可以减少啁啾声。您可能需要一些其他方法来生成静音,可能是让RenderTone继续运行但生成零幅度。

我能够通过频率变化消除残留的轻微唧唧声,将振幅降低到零,更新频率,然后再次淡入。这当然是老式PC扬声器无法做到的事情(除了少数人再次快速启动它),但是如果没有唧唧喳喳,你可以很快地消除衰老。

这是我的淡化RenderTone函数(目前正在使用邪恶的全局变量):

double currentFrequency=0;
double currentSampleRate=0;
double currentAmplitude=0;

OSStatus RenderTone(
                    void *inRefCon, 
                    AudioUnitRenderActionFlags  *ioActionFlags, 
                    const AudioTimeStamp        *inTimeStamp, 
                    UInt32                      inBusNumber, 
                    UInt32                      inNumberFrames, 
                    AudioBufferList             *ioData)

{
    // Fixed amplitude is good enough for our purposes
    const double amplitude = 0.5;

    // Get the tone parameters out of the view controller
    ToneGen *toneGen = (__bridge ToneGen *)inRefCon;
    double theta = toneGen.theta;

    BOOL fadingOut = NO;
    if ((currentFrequency != toneGen.frequency) || (currentSampleRate != toneGen.sampleRate))
    {
        if (currentAmplitude > DBL_EPSILON)
        {
            fadingOut = YES;
        }
        else
        {
            currentFrequency = toneGen.frequency;
            currentSampleRate = toneGen.sampleRate;
        }
    }

    double theta_increment = 2.0 * M_PI * currentFrequency /currentSampleRate;

    // This is a mono tone generator so we only need the first buffer
    const int channel = 0;
    Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

    // Generate the samples
    for (UInt32 frame = 0; frame < inNumberFrames; frame++) 
    {
        buffer[frame] = sin(theta) * currentAmplitude;
        //NSLog(@"amplitude = %f", currentAmplitude);

        theta += theta_increment;
        if (theta > 2.0 * M_PI)
        {
            theta -= 2.0 * M_PI;
        }
        if (fadingOut)
        {
            if (currentAmplitude > 0)
            {
                currentAmplitude -= 0.001;
                if (currentAmplitude < 0)
                    currentAmplitude = 0;
            }
        }
        else
        {
            if (currentAmplitude < amplitude)
            {
                currentAmplitude += 0.001;
                if (currentAmplitude > amplitude)
                    currentAmplitude = amplitude;
            }
        }

    }

    // Store the theta back in the view controller
    toneGen.theta = theta;

    return noErr;
}

答案 1 :(得分:1)

那个小小的唧唧声通常是数学的神器。耳朵基本上分析频域中的输入信号。例如,频率为220 Hz的稳定正弦波听起来像A。但是,当您的正弦波不稳定时,由于边界会出现其他频率。特别是,由于突然启动或停止声音的频率非常高,你会有点流行。

我在synthesizer中解决这个问题的方法(在Javascript中,而不是Obj-C,但这里的概念是相同的)是在超过300个样本中淡化声音等等,并且淡化发出超过300个样本的声音。除了根本没有边界之外,没有办法真正消除边界效应,但即使是很小且难以察觉的渐变量也会使边界效应难以察觉。