核心音频内存问题

时间:2012-03-21 10:25:52

标签: ios xcode core-audio

我的音频单元分析项目存在一些内存问题,每次呈现一个音频单元(或在其周围)时,它正在分配一堆未被释放的内存,导致内存使用膨胀,该应用程序最终崩溃。

在乐器中,我注意到以下字符串的32字节malloc重复出现,它们保持活动状态:

BufferedAudioConverter :: AllocateBuffers()x6 BufferedInputAudioConverter:BufferedInputAudioConverter(StreamDescPair const&)x 3

问题可能存在的任何想法?什么时候在进程中分配内存以及如何安全地释放它?

非常感谢。

该代码基于一些非Apple示例代码,来自sleepyleaf.com的PitchDetector

某些代码会提取问题所在的位置.....如果需要更多代码,请与我们联系。

renderErr = AudioUnitRender(rioUnit, ioActionFlags, 
                            inTimeStamp, bus1, inNumberFrames, THIS->bufferList); //128 inNumberFrames
if (renderErr < 0) {
    return renderErr;
}

// Fill the buffer with our sampled data. If we fill our buffer, run the
// fft.
int read = bufferCapacity - index;
if (read > inNumberFrames) {
    memcpy((SInt16 *)dataBuffer + index, THIS->bufferList->mBuffers[0].mData, inNumberFrames*sizeof(SInt16));
    THIS->index += inNumberFrames;
} else {  DO ANALYSIS

memset(outputBuffer,0,n * sizeof(SInt16));


- (void)createAUProcessingGraph {
OSStatus err;
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription ioUnitDescription;
ioUnitDescription.componentType = kAudioUnitType_Output;
ioUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
ioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioUnitDescription.componentFlags = 0;
enter code here

ioUnitDescription.componentFlagsMask = 0;

// Declare and instantiate an audio processing graph
NewAUGraph(&processingGraph);

// Add an audio unit node to the graph, then instantiate the audio unit.
/* 
 An AUNode is an opaque type that represents an audio unit in the context
 of an audio processing graph. You receive a reference to the new audio unit
 instance, in the ioUnit parameter, on output of the AUGraphNodeInfo 
 function call.
 */
AUNode ioNode;
AUGraphAddNode(processingGraph, &ioUnitDescription, &ioNode);

AUGraphOpen(processingGraph); // indirectly performs audio unit instantiation

// Obtain a reference to the newly-instantiated I/O unit. Each Audio Unit
// requires its own configuration.
AUGraphNodeInfo(processingGraph, ioNode, NULL, &ioUnit);

// Initialize below.
AURenderCallbackStruct callbackStruct = {0};
UInt32 enableInput;
UInt32 enableOutput;

// Enable input and disable output.
enableInput = 1; enableOutput = 0;
callbackStruct.inputProc = RenderFFTCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;

err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO, 
                           kAudioUnitScope_Input, 
                           kInputBus, &enableInput, sizeof(enableInput));

err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO, 
                           kAudioUnitScope_Output, 
                           kOutputBus, &enableOutput, sizeof(enableOutput));

err = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_SetInputCallback, 
                           kAudioUnitScope_Input, 
                           kOutputBus, &callbackStruct, sizeof(callbackStruct));


// Set the stream format.
size_t bytesPerSample = [self ASBDForSoundMode];

err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat, 
                           kAudioUnitScope_Output, 
                           kInputBus, &streamFormat, sizeof(streamFormat));

err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_StreamFormat, 
                           kAudioUnitScope_Input, 
                           kOutputBus, &streamFormat, sizeof(streamFormat));




// Disable system buffer allocation. We'll do it ourselves.
UInt32 flag = 0;
err = AudioUnitSetProperty(ioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
                           kAudioUnitScope_Output, 
                           kInputBus, &flag, sizeof(flag));


// Allocate AudioBuffers for use when listening.
// TODO: Move into initialization...should only be required once.
    bufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));
    bufferList->mNumberBuffers = 1;
    bufferList->mBuffers[0].mNumberChannels = 1;

    bufferList->mBuffers[0].mDataByteSize = 512*bytesPerSample;
    bufferList->mBuffers[0].mData = calloc(512, bytesPerSample);

}

1 个答案:

答案 0 :(得分:0)

我设法找到并解决了问题,该问题位于上面未公布的代码区域。

在下一步中,使用AudioConverter对象将输出缓冲区转换为不同的数字格式。但是,转换器对象没有被丢弃,并且保留在存储器中。我使用AudioConverterDispose修复它,如下所示:

err = AudioConverterNew(&inFormat, &outFormat, &converter);
err = AudioConverterConvertBuffer(converter, inSize, buf, &outSize, outputBuf);

err = AudioConverterDispose (converter);