我正在转换为以下格式:
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
_stereoGraphStreamFormat.mFormatID = kAudioFormatLinearPCM;
_stereoGraphStreamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
_stereoGraphStreamFormat.mBytesPerPacket = four_bytes_per_float;
_stereoGraphStreamFormat.mFramesPerPacket = 1;
_stereoGraphStreamFormat.mBytesPerFrame = four_bytes_per_float;
_stereoGraphStreamFormat.mChannelsPerFrame = 2;
_stereoGraphStreamFormat.mBitsPerChannel = eight_bits_per_byte * four_bytes_per_float;
_stereoGraphStreamFormat.mSampleRate = 44100;
采用以下格式:
interleavedAudioDescription.mFormatID = kAudioFormatLinearPCM;
interleavedAudioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger;
interleavedAudioDescription.mChannelsPerFrame = 2;
interleavedAudioDescription.mBytesPerPacket = sizeof(SInt16)*interleavedAudioDescription.mChannelsPerFrame;
interleavedAudioDescription.mFramesPerPacket = 1;
interleavedAudioDescription.mBytesPerFrame = sizeof(SInt16)*interleavedAudioDescription.mChannelsPerFrame;
interleavedAudioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
interleavedAudioDescription.mSampleRate = 44100;
使用以下代码:
int32_t availableBytes = 0;
void* tailL = TPCircularBufferTail(inputBufferL(), &availableBytes);
void* tailR = TPCircularBufferTail(inputBufferR(), &availableBytes);
// If we have no data in the buffer, we simply return
if (availableBytes <= 0)
{
return;
}
// ========== Non-Interleaved to Interleaved (Plus Samplerate Conversion) =========
// Get the number of frames available
UInt32 frames = availableBytes / this->mInputFormat.mBytesPerFrame;
pcmOutputBuffer->mBuffers[0].mDataByteSize = frames * interleavedAudioDescription.mBytesPerFrame;
struct complexInputDataProc_t data = (struct complexInputDataProc_t) { .self = this, .sourceL = tailL, .sourceR = tailR, .byteLength = availableBytes };
// Do the conversion
OSStatus result = AudioConverterFillComplexBuffer(interleavedAudioConverter,
complexInputDataProc,
&data,
&frames,
pcmOutputBuffer,
NULL);
// Tell the buffers how much data we consumed during the conversion so that it can be removed
TPCircularBufferConsume(inputBufferL(), availableBytes);
TPCircularBufferConsume(inputBufferR(), availableBytes);
// ========== Buffering Of Interleaved Samples =========
// If we got converted frames back from the converter, we want to add it to a separate buffer
if (frames > 0)
{
// Make sure we have enough space in the buffer to store the new data
TPCircularBufferHead(&pcmCircularBuffer, &availableBytes);
if (availableBytes > pcmOutputBuffer->mBuffers[0].mDataByteSize)
{
// Add the newly converted data to the buffer
TPCircularBufferProduceBytes(&pcmCircularBuffer, pcmOutputBuffer->mBuffers[0].mData, frames * interleavedAudioDescription.mBytesPerFrame);
}
else
{
printf("No Space in Buffer\n");
}
}
但是我得到以下输出:
它应该是一个完美的正弦波,但你可以看到它不是。
我一直在研究这几天,似乎无法找到它出错的地方。 任何人都可以看到我可能遗失的东西吗?
修改
缓冲区初始化:
TPCircularBuffer pcmCircularBuffer;
static SInt16 pcmOutputBuf[BUFFER_SIZE];
pcmOutputBuffer = (AudioBufferList*)malloc(sizeof(AudioBufferList));
pcmOutputBuffer->mNumberBuffers = 1;
pcmOutputBuffer->mBuffers[0].mNumberChannels = 2;
pcmOutputBuffer->mBuffers[0].mData = pcmOutputBuf;
TPCircularBufferInit(&pcmCircularBuffer, BUFFER_SIZE);
复杂的输入数据过程:
static OSStatus complexInputDataProc(AudioConverterRef inAudioConverter,
UInt32 *ioNumberDataPackets,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDescription,
void *inUserData) {
struct complexInputDataProc_t *arg = (struct complexInputDataProc_t*)inUserData;
BroadcastingServices::MP3Encoder *self = (BroadcastingServices::MP3Encoder*)arg->self;
if ( arg->byteLength <= 0 )
{
*ioNumberDataPackets = 0;
return 100; //kNoMoreDataErr;
}
UInt32 framesAvailable = arg->byteLength / self->interleavedAudioDescription.mBytesPerFrame;
if (*ioNumberDataPackets > framesAvailable)
{
*ioNumberDataPackets = framesAvailable;
}
ioData->mBuffers[0].mData = arg->sourceL;
ioData->mBuffers[0].mDataByteSize = arg->byteLength;
ioData->mBuffers[1].mData = arg->sourceR;
ioData->mBuffers[1].mDataByteSize = arg->byteLength;
arg->byteLength = 0;
return noErr;
}
答案 0 :(得分:2)
我看到一些引起红旗的事情。
1)正如上面评论中所提到的那样,你用左边的输入覆盖了有关的可用字段:
void* tailL = TPCircularBufferTail(inputBufferL(), &availableBytes);
void* tailR = TPCircularBufferTail(inputBufferR(), &availableBytes);
如果两个输入流与此代码异步更改,那么您肯定会遇到竞争条件。
2)截断错误:availableBytes
不一定是每帧字节的倍数。如果没有,则下面的代码可能会导致您消耗的字节数比实际转换的数量多。
void* tailL = TPCircularBufferTail(inputBufferL(), &availableBytes);
void* tailR = TPCircularBufferTail(inputBufferR(), &availableBytes);
...
UInt32 frames = availableBytes / this->mInputFormat.mBytesPerFrame;
...
TPCircularBufferConsume(inputBufferL(), availableBytes);
TPCircularBufferConsume(inputBufferR(), availableBytes);
3)如果输出缓冲区尚未准备好消耗所有输入,则只需抛弃输入缓冲区。这种情况发生在这段代码中。
if (availableBytes > pcmOutputBuffer->mBuffers[0].mDataByteSize)
{
...
}
else
{
printf("No Space in Buffer\n");
}
如果你看到打印输出,我真的很好奇。
以下是我建议的方式。它将是伪代码,因为我没有必要编译和测试它。
int32_t availableBytesInL = 0;
int32_t availableBytesInR = 0;
int32_t availableBytesOut = 0;
// figure out how many bytes are available in each buffer.
void* tailL = TPCircularBufferTail(inputBufferL(), &availableBytesInL);
void* tailR = TPCircularBufferTail(inputBufferR(), &availableBytesInR);
TPCircularBufferHead(&pcmCircularBuffer, &availableBytesOut);
// figure out how many full frames are available
UInt32 framesInL = availableBytesInL / mInputFormat.mBytesPerFrame;
UInt32 framesInR = availableBytesInR / mInputFormat.mBytesPerFrame;
UInt32 framesOut = availableBytesOut / interleavedAudioDescription.mBytesPerFrame;
// figure out how many frames to process this time.
UInt32 frames = min(min(framesInL, framesInL), framesOut);
if (frames == 0)
return;
int32_t bytesConsumed = frames * mInputFormat.mBytesPerFrame;
struct complexInputDataProc_t data = (struct complexInputDataProc_t) {
.self = this, .sourceL = tailL, .sourceR = tailR, .byteLength = bytesConsumed };
// Do the conversion
OSStatus result = AudioConverterFillComplexBuffer(interleavedAudioConverter,
complexInputDataProc,
&data,
&frames,
pcmOutputBuffer,
NULL);
int32_t bytesProduced = frames * interleavedAudioDescription.mBytesPerFrame;
// Tell the buffers how much data we consumed during the conversion so that it can be removed
TPCircularBufferConsume(inputBufferL(), bytesConsumed);
TPCircularBufferConsume(inputBufferR(), bytesConsumed);
TPCircularBufferProduceBytes(&pcmCircularBuffer, pcmOutputBuffer->mBuffers[0].mData, bytesProduced);
基本上我在这里做的是预先确定应该处理多少帧,以确保我只处理输出缓冲区可以处理的帧数。如果是我,我还会在输出上添加一些缓冲区欠载检查和输入上的缓冲区溢出。最后,我并不完全确定AudioConverterFillComplexBuffer
wrt传入和传出的frame
参数的语义。可以想象,#frames out会少于或多于帧数。虽然,因为你没有进行采样率转换,这可能不会发生。我试图通过在转换后分配bytesProduced
来解释这种情况。
希望这会有所帮助。如果没有,你还有其他两条线索。一个是辍学是周期性的,两个是辍学的大小看起来大致相同。如果您可以计算每个样本的数量,那么您可以在代码中查找这些数字。
答案 1 :(得分:0)
我不认为您的输出缓冲区pcmCircularBuffer
足够大。
尝试替换
TPCircularBufferInit(&pcmCircularBuffer, BUFFER_SIZE);
与
TPCircularBufferInit(&pcmCircularBuffer, sizeof(pcmOutputBuf));
即使是解决方案,我认为您的代码存在一些问题。我不确切知道你在做什么,我猜编码mp3(这本身就是iOS上的一场艰苦战斗,为什么不使用硬件AAC?),但除非你对输入和输出都有实时要求,为什么要使用环形缓冲区呢?此外,我建议使用单位直观地捕获类型的帧/字节大小不匹配:例如BUFFER_SIZE_IN_FRAMES
如果它不解决方案,那么我想看到正弦发生器。