写入右通道时,iOS AudioUnit垃圾输入和输出回调错误

时间:2014-04-17 02:45:29

标签: ios core-audio exc-bad-access audiounit

我试图在左声道上输出正弦波,在AudioUnit的右声道上静音。尝试将零写入右通道时收到以下错误,

Thread 5: EXC_BAD_ACCESS(code=1, address=0x0)

发生这种情况的回调函数位于下方,其中发生错误的行标记在行尾的注释/ **** ERROR HERE ****

输出回调

static OSStatus outputCallback(void *inRefCon,
                          AudioUnitRenderActionFlags    *ioActionFlags,
                          const AudioTimeStamp          *inTimeStamp,
                          UInt32                        inBusNumber,
                          UInt32                        inNumberFrames,
                          AudioBufferList               *ioData) {
    // Scope reference to GSFSensorIOController class
    GSFSensorIOController *THIS = (__bridge GSFSensorIOController *) inRefCon;

    // Communication out on left and right channel if new communication out
    AudioSampleType *outLeftSamples = (AudioSampleType *) ioData->mBuffers[0].mData;
    AudioSampleType *outRightSamples = (AudioSampleType *) ioData->mBuffers[1].mData;

    // Set up power tone attributes
    float freq = 20000.00f;
    float sampleRate = 44100.00f;
    float phase = THIS.sinPhase;
    float sinSignal;

    double phaseInc = 2 * M_PI * freq / sampleRate;

    for (UInt32 curFrame = 0; curFrame < inNumberFrames; ++curFrame) {
        // Generate power tone on left channel
        sinSignal = sin(phase);
        outLeftSamples[curFrame] = (SInt16) ((sinSignal * 32767.0f) /2);
        outRightSamples[curFrame] = (SInt16) (0);   // **** ERROR HERE ****
        phase += phaseInc;
        if (phase >= 2 * M_PI * freq) {
            phase = phase - (2 * M_PI * freq);
        }
    }

    // Save sine wave phase wave for next callback
    THIS.sinPhase = phase;

    return noErr;
}

抛出错误时的curFrame = 0outRightSamples = NULL。这让我相信我错误地设置了频道。这是我设置AudioUnit的IO的地方,

音频设备设置

// Audio component description
AudioComponentDescription desc;
desc.componentType          = kAudioUnitType_Output;
desc.componentSubType       = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer  = kAudioUnitManufacturer_Apple;
desc.componentFlags         = 0;
desc.componentFlagsMask     = 0;

// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

// Mono ASBD
AudioStreamBasicDescription monoStreamFormat;
monoStreamFormat.mSampleRate          = 44100.00;
monoStreamFormat.mFormatID            = kAudioFormatLinearPCM;
monoStreamFormat.mFormatFlags         = kAudioFormatFlagsCanonical;
monoStreamFormat.mBytesPerPacket      = 2;
monoStreamFormat.mBytesPerFrame       = 2;
monoStreamFormat.mFramesPerPacket     = 1;
monoStreamFormat.mChannelsPerFrame    = 1;
monoStreamFormat.mBitsPerChannel      = 16;

// Stereo ASBD
AudioStreamBasicDescription stereoStreamFormat;
stereoStreamFormat.mSampleRate          = 44100.00;
stereoStreamFormat.mFormatID            = kAudioFormatLinearPCM;
stereoStreamFormat.mFormatFlags         = kAudioFormatFlagsCanonical;
stereoStreamFormat.mBytesPerPacket      = 4;
stereoStreamFormat.mBytesPerFrame       = 4;
stereoStreamFormat.mFramesPerPacket     = 1;
stereoStreamFormat.mChannelsPerFrame    = 2;
stereoStreamFormat.mBitsPerChannel      = 16;

OSErr err;
@try {
    // Get Audio units
    err = AudioComponentInstanceNew(inputComponent, &_ioUnit);
    NSAssert1(err == noErr, @"Error setting input component: %hd", err);

    // Enable input, which is disabled by default. Output is enabled by default
    UInt32 enableInput = 1;
    err = AudioUnitSetProperty(_ioUnit,
                         kAudioOutputUnitProperty_EnableIO,
                         kAudioUnitScope_Input,
                         kInputBus,
                         &enableInput,
                         sizeof(enableInput));
    NSAssert1(err == noErr, @"Error enable input: %hd", err);

    err = AudioUnitSetProperty(_ioUnit,
                         kAudioOutputUnitProperty_EnableIO,
                         kAudioUnitScope_Output,
                         kOutputBus,
                         &enableInput,
                         sizeof(enableInput));
    NSAssert1(err == noErr, @"Error setting output: %hd", err);

    // Apply format to input of ioUnit
    err = AudioUnitSetProperty(self.ioUnit,
                         kAudioUnitProperty_StreamFormat,
                         kAudioUnitScope_Input,
                         kOutputBus,
                         &monoStreamFormat,
                         sizeof(monoStreamFormat));
    NSAssert1(err == noErr, @"Error setting input ASBD: %hd", err);

    // Apply format to output of ioUnit
    err = AudioUnitSetProperty(self.ioUnit,
                         kAudioUnitProperty_StreamFormat,
                         kAudioUnitScope_Output,
                         kInputBus,
                         &stereoStreamFormat,
                         sizeof(stereoStreamFormat));
    NSAssert1(err == noErr, @"Error setting output ASBD: %hd", err);

    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = inputCallback;
    callbackStruct.inputProcRefCon = (__bridge void *)(self);
    err = AudioUnitSetProperty(self.ioUnit,
                         kAudioOutputUnitProperty_SetInputCallback,
                         kAudioUnitScope_Global,
                         kInputBus,
                         &callbackStruct,
                         sizeof(callbackStruct));
    NSAssert1(err == noErr, @"Error setting input callback: %hd", err);

    // Set output callback
    callbackStruct.inputProc = outputCallback;
    callbackStruct.inputProcRefCon = (__bridge void *)(self);
    err = AudioUnitSetProperty(self.ioUnit,
                         kAudioUnitProperty_SetRenderCallback,
                         kAudioUnitScope_Global,
                         kOutputBus,
                         &callbackStruct,
                         sizeof(callbackStruct));
    NSAssert1(err == noErr, @"Error setting output callback: %hd", err);

    // Disable buffer allocation
    UInt32 disableBufferAlloc = 0;
    err = AudioUnitSetProperty(self.ioUnit,
                               kAudioUnitProperty_ShouldAllocateBuffer,
                               kAudioUnitScope_Output,
                               kInputBus,
                               &disableBufferAlloc,
                               sizeof(disableBufferAlloc));

    // Allocate input buffers (1 channel, 16 bits per sample, thus 16 bits per frame and therefore 2 bytes per frame
    _inBuffer.mNumberChannels = 1;
    _inBuffer.mDataByteSize = 512 * 2;
    _inBuffer.mData = malloc( 512 * 2 );

    // Initialize audio unit
    err = AudioUnitInitialize(self.ioUnit);
    NSAssert1(err == noErr, @"Error initializing unit: %hd", err);
    //AudioUnitInitialize(self.ioUnit);

    // Start audio IO
    err = AudioOutputUnitStart(self.ioUnit);
    NSAssert1(err == noErr, @"Error starting unit: %hd", err);
    //AudioOutputUnitStart(self.ioUnit);
}
@catch (NSException *exception) {
    NSLog(@"Failed with exception: %@", exception);
}

我不相信我正确设置了AudioUnit,因为我在麦克风线上输入了输入的随机值(即将输入缓冲区打印到命令提示符时给出的值是不要随环境噪音而变化)。以下是我使用输入回调的方法,

输入回调

static OSStatus inputCallback(void *inRefCon,
                               AudioUnitRenderActionFlags   *ioActionFlags,
                               const AudioTimeStamp         *inTimeStamp,
                               UInt32                       inBusNumber,
                               UInt32                       inNumberFrames,
                               AudioBufferList              *ioData) {
    // Scope reference to GSFSensorIOController class
    GSFSensorIOController *THIS = (__bridge GSFSensorIOController *) inRefCon;

    // Set up buffer to hold input data
    AudioBuffer buffer;
    buffer.mNumberChannels = 1;
    buffer.mDataByteSize = inNumberFrames * 2;
    buffer.mData = malloc( inNumberFrames * 2 );

    // Place buffer in an AudioBufferList
    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;

    // Grab the samples and place them in the buffer list
    AudioUnitRender(THIS.ioUnit,
                    ioActionFlags,
                    inTimeStamp,
                    inBusNumber,
                    inNumberFrames,
                    &bufferList);

    // Process data
    [THIS processIO:&bufferList];

    // Free allocated buffer
    free(bufferList.mBuffers[0].mData);

    return noErr;
}

我搜索过示例项目,作为参考,我无法看到所有功能实现的差异。非常感谢任何帮助。

2 个答案:

答案 0 :(得分:0)

音频单元的默认设置可能是交错的立体声通道数据,而不是左右分开的缓冲区。

答案 1 :(得分:0)

这里的问题似乎是你要写入未分配的内存。 ioData-&gt; mBuffers [1]对交错格式无效。左右声道都在ioData-&gt; mBuffers [0]中交错。如果你想要非交错数据,那么mBytesPerFrame和mBytesPerPacket应该是2,而不是4.这可能是你为什么在AudioUnitInitialize上失败的原因。

如果使用CAStreamBasicDescription实用程序类,则更容易处理设置这些格式。请参阅https://developer.apple.com/library/mac/samplecode/CoreAudioUtilityClasses/Introduction/Intro.html

设置AudioStreamBasicDescription将非常简单:

CAStreamBasicDescription stereoStreamFormat(44100.0, 2, CAStreamBasicDescription::kPCMFormatInt16, false);