不期望iOS信号生成模式

时间:2014-05-27 02:32:58

标签: ios objective-c signal-processing core-audio

我想用下面的波形模式生成音调(最后一个中点应该像之前的设置一样位于底部。)

Expected

将频率从44.44Hz链接到45.89 Hz后,变为

Reality

即使我已将缓冲区长度从1024更改为960,仍然显示缓冲区长度仍为1024。因此,它会导致一些剩余尾部显示中点尾部而不是底部的问题。

以下是我的代码

OSStatus RenderTone(
                    void *inRefCon,
                    AudioUnitRenderActionFlags  *ioActionFlags,
                    const AudioTimeStamp        *inTimeStamp,
                    UInt32                      inBusNumber,
                    UInt32                      inNumberFrames,
                    AudioBufferList             *ioData)

{
    // Fixed amplitude is good enough for our purposes
    const double amplitude =  2.7171;

    // Get the tone parameters out of the view controller
    ToneGeneratorViewController *viewController =
    (ToneGeneratorViewController *)inRefCon;
    double theta = viewController->theta; //992 f0r 44.44 , 959 for 45.89
    double theta_increment = viewController->sampleRate / viewController->frequency;
    int increment = ceil(theta_increment);
    NSLog(@"increment= %i" , increment);
    const int channel = 0;
    Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
    int squareIndex = 0;

for (UInt32 frame = 0; frame < 401; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 401; frame < 419; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 419; frame < 468; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 468; frame < 487; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 487; frame < 536; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 536; frame < 555; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 555; frame < 604; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 604; frame < 622; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 622; frame < 671; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 671; frame < 690; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 690; frame < 739; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 739; frame < 757; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 757; frame < 806; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 806; frame < 825; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 825; frame < 874; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 874; frame < 892; frame++)
{
    buffer[frame] =  -amplitude;
}
for (UInt32 frame = 892; frame < 941; frame++)
{
    buffer[frame] =  amplitude;
}
for (UInt32 frame = 941; frame < increment; frame++)
{
    buffer[frame] =  -amplitude;
}

squareIndex += 1.0;
if(squareIndex >= theta_increment) squareIndex-=theta_increment;
viewController->theta = theta;


void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState)
{
    ToneGeneratorViewController *viewController =
    (ToneGeneratorViewController *)inClientData;

    [viewController stop];
}

@implementation ToneGeneratorViewController

@synthesize frequencySlider;
@synthesize playButton;
@synthesize frequencyLabel;

- (IBAction)sliderChanged:(UISlider *)slider
{
    frequency = 45.9;
    frequencyLabel.text = [NSString stringWithFormat:@"%4.1f Hz", frequency];
}

- (void)createToneUnit
{
    // Configure the search parameters to find the default playback output unit
    // (called the kAudioUnitSubType_RemoteIO on iOS but
    // kAudioUnitSubType_DefaultOutput on Mac OS X)
    AudioComponentDescription defaultOutputDescription;
    defaultOutputDescription.componentType = kAudioUnitType_Output;
    defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
    defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    defaultOutputDescription.componentFlags = 0;
    defaultOutputDescription.componentFlagsMask = 0;

    // Get the default playback output unit
    AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
    NSAssert(defaultOutput, @"Can't find default output");

    // Create a new unit based on this that we'll use for output
    OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
    NSAssert1(toneUnit, @"Error creating unit: %ld", err);

    // Set our tone rendering function on the unit
    AURenderCallbackStruct input;
    input.inputProc = RenderTone;
    input.inputProcRefCon = self;
    err = AudioUnitSetProperty(toneUnit,
                               kAudioUnitProperty_SetRenderCallback,
                               kAudioUnitScope_Input,
                               0,
                               &input,
                               sizeof(input));
    NSAssert1(err == noErr, @"Error setting callback: %ld", err);

    // Set the format to 32 bit, single channel, floating point, linear PCM
    const int four_bytes_per_float = 4;
    const int eight_bits_per_byte = 8;
    AudioStreamBasicDescription streamFormat;
    streamFormat.mSampleRate = sampleRate;
    streamFormat.mFormatID = kAudioFormatLinearPCM;
    streamFormat.mFormatFlags =
    kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
    streamFormat.mBytesPerPacket = four_bytes_per_float;
    streamFormat.mFramesPerPacket = 1;
    streamFormat.mBytesPerFrame = four_bytes_per_float;
    streamFormat.mChannelsPerFrame = 1;
    streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
    err = AudioUnitSetProperty (toneUnit,
                                kAudioUnitProperty_StreamFormat,
                                kAudioUnitScope_Input,
                                0,
                                &streamFormat,
                                sizeof(AudioStreamBasicDescription));
    NSAssert1(err == noErr, @"Error setting stream format: %ld", err);
}

- (IBAction)togglePlay:(UIButton *)selectedButton
{
    if (toneUnit)
    {
        AudioOutputUnitStop(toneUnit);
        AudioUnitUninitialize(toneUnit);
        AudioComponentInstanceDispose(toneUnit);
        toneUnit = nil;

        [selectedButton setTitle:NSLocalizedString(@"Play", nil) forState:0];
    }
    else
    {
        [self createToneUnit];

        // Stop changing parameters on the unit
        OSErr err = AudioUnitInitialize(toneUnit);
        NSAssert1(err == noErr, @"Error initializing unit: %ld", err);

        // Start playback
        err = AudioOutputUnitStart(toneUnit);
        NSAssert1(err == noErr, @"Error starting unit: %ld", err);

        [selectedButton setTitle:NSLocalizedString(@"Stop", nil) forState:0];
    }
}

- (void)stop
{
    if (toneUnit)
    {
        [self togglePlay:playButton];
    }
}

- (void)viewDidLoad {
    [super viewDidLoad];

    [self sliderChanged:frequencySlider];
    sampleRate = 44100;

    OSStatus result = AudioSessionInitialize(NULL, NULL, ToneInterruptionListener, self);
    if (result == kAudioSessionNoError)
    {
        UInt32 sessionCategory = kAudioSessionCategory_MediaPlayback;
        AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);
    }
    AudioSessionSetActive(true);
}

- (void)viewDidUnload {
    self.frequencyLabel = nil;
    self.playButton = nil;
    self.frequencySlider = nil;

    AudioSessionSetActive(false);
}

1 个答案:

答案 0 :(得分:3)

当Core Audio调用RenderTone回调时,它希望您为每个缓冲区提供特定数量的音频帧。 'inNumberFrames'参数告诉您这个数字是什么。

(Core Audio允许对硬件缓冲区大小进行一些调整,但可以更改此值以适应Core Audio,例如通过向上舍入到下一个2的幂。)

因此,您无法调整回调缓冲区大小以完全适合您要生成的波形的一个周期:相反,您必须跟踪波形中当前的位置,以便生成尽可能多的波形根据需要,然后继续在下一个回调中停止的地方。

在您的示例中,如果inNumberFrames为1024,那么在第一个回调中,您将提供完整的960个采样周期以及下一个周期中的另外64个采样。在第二个回调中,您将提供第二个周期的剩余896个样本和第三个周期开始时的128个样本,依此类推。