使用CoreAudio

时间:2017-09-05 18:05:42

标签: c++ macos audio signal-processing core-audio

我正在使用CoreAudio示例来连接各种不同的音频接口。我采用了CAPlayThrough示例(https://developer.apple.com/library/content/samplecode/CAPlayThrough/Introduction/Intro.html)并且一直在修改一些小东西。我想要完成的主要工作之一是在播放期间同时访问我的4通道音频接口的所有输入通道的所有输入流。在我的特定设置中,我有一个behringer uphoria 404接口,它有4个物理输入通道,我想同时访问。即如果我将麦克风/乐器插入外部接口的通道1,2,3和4中,我希望能够听到所有4个通道的输出,就像在DAW中那样,例如专业工具,逻辑等。

所以我从这个文档(https://developer.apple.com/library/content/technotes/tn2091/_index.html)中取了示例,然后继续创建我自己的频道图。

所以我做的就是写这个并为我的输入AudioUnit

设置频道图
//Create channel Map
SInt32 *channelMap = NULL;
UInt32 numOfChannels = this->streamFormat.mChannelsPerFrame;
UInt32 mapSize = numOfChannels *sizeof(SInt32);
channelMap = (SInt32 *)malloc(mapSize);

for(UInt32 i=0;i<numOfChannels;i++)
{
    channelMap[i]=-1;
}
channelMap[0] = 0;
channelMap[1] = 1;
channelMap[2] = 0;
channelMap[3] = 1;
AudioUnitSetProperty(mInputUnit,
                     kAudioOutputUnitProperty_ChannelMap,
                     kAudioUnitScope_Output,
                     1,
                     channelMap,
                     mapSize);
free(channelMap);

结果是我只能从我的界面访问来自通道1和2的数据。但是,当我将频道地图更改为:

channelMap[0] = 2;
channelMap[1] = 3;

我可以从音频接口访问第3和第4频道的数据,但不能从第1和第2频道访问数据。

所以我想我必须为我的输出AudioUnit设置频道映射。所以我将频道映射设置为我的输出AudioUnit:

SInt32 *channelMap = NULL;
UInt32 numOfChannels = this->streamFormat.mChannelsPerFrame;
UInt32 mapSize = numOfChannels *sizeof(SInt32);
channelMap = (SInt32 *)malloc(mapSize);

for(UInt32 i=0;i<numOfChannels;i++)
{
    channelMap[i]=-1;
}
channelMap[0] = 0;
channelMap[1] = 1;
channelMap[2] = 0;
channelMap[3] = 1;
AudioUnitSetProperty(this->outputUnit,
                     kAudioOutputUnitProperty_ChannelMap,
                     kAudioUnitScope_Output,
                     1,
                     channelMap,
                     mapSize);
free(channelMap);

结果仍然相同。

设置频道地图的正确方法是什么?我已经通过Apple的文档网站进行了梳理,并试图查看他们的文档,但找不到比上面发布的文章更有帮助的内容。

更新:

除了修改频道地图之外,这些是我迄今为止尝试过的一些事情:

  1. 我想也许我的4通道接口上的每个物理输入都需要一个AudioUnit来表示它,并选择从通道映射获取输入流的通道。但是当为输入设置多个AudioUnit时,最后设置的AudioUnit最终充当默认输入。此外,AUGraph只允许我添加1个输出节点。

  2. 使用AudioUnit类型kAudioUnitSubType_StereoMixer将多个流与多个输出渲染回调组合在一起。我能够设置渲染回调并将其正确链接到立体声混音器AudioUnit,但不确定如何将输入流连接到输出。

  3. 更新2:

    我之前应该添加这个。这来自上面列出的CAPlayThrough示例,用于设置kAudioUnitProperty_StreamFormat

    //Get the Stream Format (Output client side)
    propertySize = sizeof(asbd_dev1_in);
    err = AudioUnitGetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &asbd_dev1_in, &propertySize);
    checkErr(err);
    //printf("=====Input DEVICE stream format\n" ); 
    //asbd_dev1_in.Print();
    
    //Get the Stream Format (client side)
    propertySize = sizeof(asbd);
    err = AudioUnitGetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &asbd, &propertySize);       
    checkErr(err);
    //printf("=====current Input (Client) stream format\n");    
    //asbd.Print(); 
    
    //Get the Stream Format (Output client side)
    propertySize = sizeof(asbd_dev2_out);
    err = AudioUnitGetProperty(mOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd_dev2_out, &propertySize);
    checkErr(err);
    //printf("=====Output (Device) stream format\n");   
    //asbd_dev2_out.Print();
    
    //////////////////////////////////////
    //Set the format of all the AUs to the input/output devices channel count
    //For a simple case, you want to set this to the lower of count of the channels
    //in the input device vs output device
    //////////////////////////////////////
    asbd.mChannelsPerFrame =((asbd_dev1_in.mChannelsPerFrame < asbd_dev2_out.mChannelsPerFrame) ?asbd_dev1_in.mChannelsPerFrame :asbd_dev2_out.mChannelsPerFrame) ;
    
    // We must get the sample rate of the input device and set it to the stream format of AUHAL
    propertySize = sizeof(Float64);
    AudioObjectPropertyAddress theAddress = { kAudioDevicePropertyNominalSampleRate,
                                              kAudioObjectPropertyScopeGlobal,
                                              kAudioObjectPropertyElementMaster };
    
    err = AudioObjectGetPropertyData(mInputDevice.mID, &theAddress, 0, NULL, &propertySize, &rate);
    checkErr(err);
    
    asbd.mSampleRate =rate;
    propertySize = sizeof(asbd);
    
    //Set the new formats to the AUs...
    err = AudioUnitSetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &asbd, propertySize);
    checkErr(err);  
    err = AudioUnitSetProperty(mVarispeedUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, propertySize);
    checkErr(err);
    

    这是示例如何设置InputCall返回和输出回调

    InputCallBack设置:

    OSStatus err = noErr;
    AURenderCallbackStruct input;
    
    input.inputProc = InputProc;
    input.inputProcRefCon = this;
    
    //Setup the input callback. 
    err = AudioUnitSetProperty(mInputUnit, 
                              kAudioOutputUnitProperty_SetInputCallback, 
                              kAudioUnitScope_Global,
                              0,
                              &input, 
                              sizeof(input));
    

    输入回叫:

    OSStatus CAPlayThrough::InputProc(void *inRefCon,
                                    AudioUnitRenderActionFlags *ioActionFlags,
                                    const AudioTimeStamp *inTimeStamp,
                                    UInt32 inBusNumber,
                                    UInt32 inNumberFrames,
                                    AudioBufferList * ioData)
    {
        OSStatus err = noErr;
    
    CAPlayThrough *This = (CAPlayThrough *)inRefCon;
    if (This->mFirstInputTime < 0.)
        This->mFirstInputTime = inTimeStamp->mSampleTime;
    
    //Get the new audio data
    err = AudioUnitRender(This->mInputUnit,
                         ioActionFlags,
                         inTimeStamp, 
                         inBusNumber,     
                         inNumberFrames, //# of frames requested
                         This->mInputBuffer);// Audio Buffer List to hold data
    checkErr(err);
    
    if(!err) {
        err = This->mBuffer->Store(This->mInputBuffer, Float64(inNumberFrames), SInt64(inTimeStamp->mSampleTime));
    }   
    
    return err;
    }
    

    这是设置输出回调的方式:

    output.inputProc = OutputProc;
    output.inputProcRefCon = this;
    
    err = AudioUnitSetProperty(mVarispeedUnit, 
                              kAudioUnitProperty_SetRenderCallback, 
                              kAudioUnitScope_Input,
                              0,
                              &output, 
                              sizeof(output));
    checkErr(err);  
    

    这是输出过程

    OSStatus CAPlayThrough::OutputProc(void *inRefCon,
                                     AudioUnitRenderActionFlags *ioActionFlags,
                                     const AudioTimeStamp *TimeStamp,
                                     UInt32 inBusNumber,
                                     UInt32 inNumberFrames,
                                     AudioBufferList * ioData)
    {
    OSStatus err = noErr;
    CAPlayThrough *This = (CAPlayThrough *)inRefCon;
    Float64 rate = 0.0;
    AudioTimeStamp inTS, outTS;
    
    if (This->mFirstInputTime < 0.) {
        // input hasn't run yet -> silence
        MakeBufferSilent (ioData);
        return noErr;
    }
    
    //use the varispeed playback rate to offset small discrepancies in sample rate
    //first find the rate scalars of the input and output devices
    err = AudioDeviceGetCurrentTime(This->mInputDevice.mID, &inTS);
    // this callback may still be called a few times after the device has been stopped
    if (err)
    {
        MakeBufferSilent (ioData);
        return noErr;
    }
    
    err = AudioDeviceGetCurrentTime(This->mOutputDevice.mID, &outTS);
    checkErr(err);
    
    rate = inTS.mRateScalar / outTS.mRateScalar;
    err = AudioUnitSetParameter(This->mVarispeedUnit,kVarispeedParam_PlaybackRate,kAudioUnitScope_Global,0, rate,0);
    checkErr(err);
    
    //get Delta between the devices and add it to the offset
    if (This->mFirstOutputTime < 0.) {
        This->mFirstOutputTime = TimeStamp->mSampleTime;
        Float64 delta = (This->mFirstInputTime - This->mFirstOutputTime);
        This->ComputeThruOffset();   
        //changed: 3865519 11/10/04
        if (delta < 0.0)
            This->mInToOutSampleOffset -= delta;
        else
            This->mInToOutSampleOffset = -delta + This->mInToOutSampleOffset;
    
        MakeBufferSilent (ioData);
        return noErr;
    }
    
    //copy the data from the buffers    
    err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset));    
    if(err != kCARingBufferError_OK)
    {
        MakeBufferSilent (ioData);
        SInt64 bufferStartTime, bufferEndTime;
        This->mBuffer->GetTimeBounds(bufferStartTime, bufferEndTime);
        This->mInToOutSampleOffset = TimeStamp->mSampleTime - bufferStartTime;
    }
    
    return noErr;
    }
    

0 个答案:

没有答案