由于错误的音频数据导致音频破裂

时间:2016-02-08 15:41:12

标签: macos audio core-audio

我正在使用CoreAudio低级API进行音频捕获。应用目标是MAC OSX,而​​不是iOS。

在测试过程中,我们会不时地用真实的音频调制非常恼人的噪音。这种现象随着时间的推移而发展,从几乎不明显开始,变得越来越占主导地位。

分析Audacity下的捕获音频,表明音频数据包的结尾部分是错误的。

以下是示例图片: enter image description here

每40 ms进行一次入侵重复,这是配置的打包时间(就缓冲区样本而言)

更新: 随着时间的推移,差距变得更大,这是10分钟后来自同一个捕获文件的另一个快照。差距现在包含1460个样本,距离包的总共40ms是33ms! enter image description here

CODE SNIPPESTS:

捕获回调

OSStatus MacOS_AudioDevice::captureCallback(void *inRefCon,
                                            AudioUnitRenderActionFlags *ioActionFlags,
                                            const AudioTimeStamp *inTimeStamp,
                                            UInt32 inBusNumber,
                                            UInt32 inNumberFrames,
                                            AudioBufferList *ioData)
{
    MacOS_AudioDevice* _this = static_cast<MacOS_AudioDevice*>(inRefCon);

    // Get the new audio data
    OSStatus err = AudioUnitRender(_this->m_AUHAL, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, _this->m_InputBuffer);
    if (err != noErr)
    {
        ...

        return err;
    }

    // ignore callback on unexpected buffer size
    if (_this->m_params.bufferSizeSamples != inNumberFrames)
    {
        ...

        return noErr;
    }

    // Deliver audio data 
    DeviceIOMessage message;
    message.bufferSizeBytes = _this->m_deviceBufferSizeBytes;
    message.buffer = _this->m_InputBuffer->mBuffers[0].mData;
    if (_this->m_callbackFunc)
    {
       _this->m_callbackFunc(_this, message);
    }
}

打开并启动捕获设备:

void MacOS_AudioDevice::openAUHALCapture()
{
    UInt32 enableIO;
    AudioStreamBasicDescription streamFormat;
    UInt32 size;
    SInt32 *channelArr;
    std::stringstream ss;
    AudioObjectPropertyAddress deviceBufSizeProperty =
    {
        kAudioDevicePropertyBufferFrameSize,
        kAudioDevicePropertyScopeInput,
        kAudioObjectPropertyElementMaster
    };

    // AUHAL
    AudioComponentDescription cd = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple, 0, 0};
    AudioComponent HALOutput = AudioComponentFindNext(NULL, &cd);
    verify_macosapi(AudioComponentInstanceNew(HALOutput, &m_AUHAL));

    verify_macosapi(AudioUnitInitialize(m_AUHAL));

    // enable input IO
    enableIO = 1;
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)));

    // disable output IO
    enableIO = 0;
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)));

    // Setup current device
    size = sizeof(AudioDeviceID);
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &m_MacDeviceID, sizeof(AudioDeviceID)));

    // Set device native buffer length before setting AUHAL stream
    size = sizeof(m_originalDeviceBufferTimeFrames);
    verify_macosapi(AudioObjectSetPropertyData(m_MacDeviceID, &deviceBufSizeProperty, 0, NULL, size, &m_originalDeviceBufferTimeFrames));

    // Get device format
    size = sizeof(AudioStreamBasicDescription);
    verify_macosapi(AudioUnitGetProperty(m_AUHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &streamFormat, &size));

    // Setup channel map
    assert(m_params.numOfChannels <= streamFormat.mChannelsPerFrame);
    channelArr = new SInt32[streamFormat.mChannelsPerFrame];
    for (int i = 0; i < streamFormat.mChannelsPerFrame; i++)
        channelArr[i] = -1;
    for (int i = 0; i < m_params.numOfChannels; i++)
        channelArr[i] = i;

    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 1, channelArr, sizeof(SInt32) * streamFormat.mChannelsPerFrame));
    delete [] channelArr;

    // Setup stream converters
    streamFormat.mFormatID = kAudioFormatLinearPCM;
    streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;
    streamFormat.mFramesPerPacket = m_SamplesPerPacket;
    streamFormat.mBitsPerChannel = m_params.sampleDepthBits;
    streamFormat.mSampleRate = m_deviceSampleRate;
    streamFormat.mChannelsPerFrame = 1;
    streamFormat.mBytesPerFrame = 2;
    streamFormat.mBytesPerPacket = streamFormat.mFramesPerPacket * streamFormat.mBytesPerFrame;

    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &streamFormat, size));

    // Setup callbacks
    AURenderCallbackStruct input;
    input.inputProc = captureCallback;
    input.inputProcRefCon = this;
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)));

    // Calculate the size of the IO buffer (in samples)
    if (m_params.bufferSizeMS != -1)
    {
        unsigned int desiredSignalsInBuffer = (m_params.bufferSizeMS / (double)1000) * m_deviceSampleRate;

        // making sure the value stay in the device's supported range
        desiredSignalsInBuffer = std::min<unsigned int>(desiredSignalsInBuffer, m_deviceBufferFramesRange.mMaximum);
        desiredSignalsInBuffer = std::max<unsigned int>(m_deviceBufferFramesRange.mMinimum, desiredSignalsInBuffer);

        m_deviceBufferFrames = desiredSignalsInBuffer;
    }

    // Set device buffer length
    size = sizeof(m_deviceBufferFrames);
    verify_macosapi(AudioObjectSetPropertyData(m_MacDeviceID, &deviceBufSizeProperty, 0, NULL, size, &m_deviceBufferFrames));

    m_deviceBufferSizeBytes = m_deviceBufferFrames * streamFormat.mBytesPerFrame;
    m_deviceBufferTimeMS = 1000 * m_deviceBufferFrames/m_deviceSampleRate;

    // Calculate number of buffers from channels
    size = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * m_params.numOfChannels);

    // Allocate input buffer
    m_InputBuffer = (AudioBufferList *)malloc(size);
    m_InputBuffer->mNumberBuffers = m_params.numOfChannels;

    // Pre-malloc buffers for AudioBufferLists
    for(UInt32 i = 0; i< m_InputBuffer->mNumberBuffers ; i++)
    {
        m_InputBuffer->mBuffers[i].mNumberChannels = 1;
        m_InputBuffer->mBuffers[i].mDataByteSize = m_deviceBufferSizeBytes;
        m_InputBuffer->mBuffers[i].mData = malloc(m_deviceBufferSizeBytes);
    }

    // Update class properties
    m_params.sampleRateHz = streamFormat.mSampleRate;
    m_params.bufferSizeSamples = m_deviceBufferFrames;
    m_params.bufferSizeBytes = m_params.bufferSizeSamples * streamFormat.mBytesPerFrame;

}


eADMReturnCode MacOS_AudioDevice::start()
{
    eADMReturnCode ret = OK;
    LOGAPI(ret);

    if (!m_isStarted && m_isOpen)
    {
        OSStatus err = AudioOutputUnitStart(m_AUHAL);
        if (err == noErr)
            m_isStarted = true;
        else
            ret = ERROR;
    }
    return ret;
}

知道是什么导致它以及如何解决?

提前致谢!

1 个答案:

答案 0 :(得分:2)

不注意或不完全处理发送到每个音频回调的帧数会导致周期性毛刺或丢失。有效缓冲区不会始终包含预期或相同数量的样本(inNumberFrames可能不等于bufferSizeSamples或完全有效的音频缓冲区中的前一个inNumberFrames。)

这些类型的故障可能是由于尝试在某些型号的iOS设备上录制44.1k而导致的,这些设备仅支持硬件中的48k音频。

某些类型的故障也可能是由m_callbackFunc函数中的任何非硬实时代码引起的(例如任何同步文件读/写,OS调用,目标C消息调度,GC或内存分配/释放) )。