播放流后,音频队列无法录制音频

时间:2012-10-20 14:30:48

标签: objective-c ios audio audioqueue

我正在使用Mat Gallager的Audio Streamer来播放音频流。在程序的另一个点(一个不同的控制器)我试图从设备的麦克风录制一些东西。

这是我的设置:

void SFIdentificator::startRecord()

{     int i,bufferByteSize;     UInt32大小;

try {
    numberOfPackets = 0;

    // specify the recording format
    SetupAudioFormat(kAudioFormatLinearPCM);

    AudioQueueNewInput(  &mRecordFormat,
                       MyInputBufferHandler,
                       this /* userData */,
                       CFRunLoopGetMain() /* run loop */, kCFRunLoopCommonModes /* run loop mode */,
                       0 /* flags */, &mQueue);
    mRecordPacket = 0;

    size = sizeof(mRecordFormat);
    AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription, &mRecordFormat, &size);

    bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds);   // enough bytes for half a second


    size = sizeof(mRecordFormat);
    XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
                                        &mRecordFormat, &size), "couldn't get queue's format");

    for (i = 0; i < kNumberRecordBuffers; ++i) {
        XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]), "AudioQueueAllocateBuffer failed");
        XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL), "AudioQueueEnqueueBuffer failed");
    }
    mIsRunning = true;

    XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");


} catch (CAXException e) {
    char buf[256];
    fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}catch (...) {
    fprintf(stderr, "An unknown error occurred\n");;
}

}

void SFIdentificator::SetupAudioFormat(UInt32 inFormatID)

{     memset(&amp; mRecordFormat,0,sizeof(mRecordFormat));

UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty(  kAudioSessionProperty_CurrentHardwareSampleRate, &size, &mRecordFormat.mSampleRate), "couldn't get hardware sample rate");

size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty(  kAudioSessionProperty_CurrentHardwareInputNumberChannels, &size, &mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");

mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM){
    // if we want pcm, default to signed 16-bit little-endian

    mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
    //      mRecordFormat.mBitsPerChannel = 16;


    mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
    mRecordFormat.mFramesPerPacket = 1;

    mRecordFormat.mFormatID         = kAudioFormatLinearPCM;
    mRecordFormat.mSampleRate       = 32000.0;
    mRecordFormat.mChannelsPerFrame = 1;
    mRecordFormat.mBitsPerChannel   = 16;
    mRecordFormat.mBytesPerPacket   =  mRecordFormat.mBytesPerFrame = mRecordFormat.mChannelsPerFrame * sizeof (SInt16);
    mRecordFormat.mFramesPerPacket  = 1;
}

}

UInt32 SFIdentificator::ComputeRecordBufferSize(const AudioStreamBasicDescription *format, float seconds){
static const int maxBufferSize = 0x50000;

int maxPacketSize = format->mBytesPerPacket;
if (maxPacketSize == 0) {
    UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
    AudioQueueGetProperty (mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize, &maxVBRPacketSize);
}

Float64 numBytesForTime = DataFormat().mSampleRate * maxPacketSize * seconds;
//    *outBufferSize = (UInt32)(numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize);
return (UInt32)(numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize);

}

似乎如果我先使用AudioStreamer类并尝试稍后记录某些内容,甚至不会调用Callback。但如果我不首先使用AudioStreamer,一切都很好。

有人能指出我正确的方向吗?

1 个答案:

答案 0 :(得分:0)

回答我自己的问题,正确设置AudioSession对象的init()是一个问题。它应该每个会话只执行一次,并再次执行将导致错误,我已经确定无法记录流。但是,我可以记录流,即使第二个init失败了(第一个当然也成功了)。