我正在使用AUdioQueue从内置麦克风录制音频并通过套接字发送,我已将AudioQueue缓冲区设置为记录一次性缓冲30秒并适当分配bufferSize, 这是我用来设置AudioDataFormat的函数。
AudioStreamBasicDescription sRecordFormat;
FillOutASBDForLPCM (sRecordFormat,
16000,
1,
16,
16,
false,
false
);
以下代码计算需要分配以捕获音频的bufferSize,
int AQRecorder::ComputeRecordBufferSize(const AudioStreamBasicDescription *format, float seconds)
{
int packets, frames, bytes = 0;
try {
frames = (int)ceil(seconds * format->mSampleRate);
if (format->mBytesPerFrame > 0)
bytes = frames * format->mBytesPerFrame;
else {
UInt32 maxPacketSize;
if (format->mBytesPerPacket > 0)
maxPacketSize = format->mBytesPerPacket; // constant packet size
else {
UInt32 propertySize = sizeof(maxPacketSize);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
&propertySize), "couldn't get queue's maximum output packet size");
}
if (format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
packets = frames; // worst-case scenario: 1 frame in a packet
if (packets == 0) // sanity check
packets = 1;
bytes = packets * maxPacketSize;
}
} catch (CAXException e) {
char buf[256];
gLog<<[[NSString stringWithFormat:@"Error:%s (%s)\n",e.mOperation,e.FormatError(buf)] UTF8String]<<endl;
return 0;
}
return bytes;
}
以下代码Sample分配缓冲区,
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for 20 ms
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
是的,你猜对了,大部分代码都是从SpeakHere例子中提到的, 说到AudioCallback,我需要捕获缓冲区并通过套接字将其发送到其他机器,
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
NSLog([NSString stringWithFormat:"Inside AudioBufferCallback no of packet [%d]",inMumPackets]);
if (inNumPackets > 0) {
// write packets to file
// This is only for the test
XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
"AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
if(aqr->pInputListener){
aqr->pInputListener(aqr->pClientUserData,inBuffer->mAudioData,(int)inBuffer->mAudioDataByteSize);
}
}
// if we're not stopping, re-enqueue the buffe so that it gets filled again
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
}
}
现在,当我看到日志时,数据即将到来,但它说没有数据包是256,320这样,当我在另一端传递数据时,它听不见,有人能告诉我,我需要什么使用packetSize,我的印象是bufferSize足以发送数据,但我猜,还有一些与数据包有关。
答案 0 :(得分:0)
缓冲区管理中的一些问题, 此应用程序需要在20毫秒+ - 5毫秒内发送数据,在我的情况下,这个特殊情况没有处理。