所以我尝试用Accelerate.framework读取关于FFT的所有内容,并得到一个与MTAudioProcessingTap一起工作的示例,但我觉得我做错了,我的绘图点看起来不应该是这样的
#import "AudioTap.h"
#pragma mark - TapContext
typedef struct TapContext {
void *audioTap;
Float64 sampleRate;
UInt32 numSamples;
FFTSetup fftSetup;
COMPLEX_SPLIT split;
float *window;
float *inReal;
} TapContext;
#pragma mark - AudioTap Callbacks
static void TapInit(MTAudioProcessingTapRef tap, void *clientInfo, void **tapStorageOut)
{
TapContext *context = calloc(1, sizeof(TapContext));
context->audioTap = clientInfo;
context->sampleRate = NAN;
context->numSamples = 4096;
vDSP_Length log2n = log2f((float)context->numSamples);
int nOver2 = context->numSamples/2;
context->inReal = (float *) malloc(context->numSamples * sizeof(float));
context->split.realp = (float *) malloc(nOver2*sizeof(float));
context->split.imagp = (float *) malloc(nOver2*sizeof(float));
context->fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);
context->window = (float *) malloc(context->numSamples * sizeof(float));
vDSP_hann_window(context->window, context->numSamples, vDSP_HANN_DENORM);
*tapStorageOut = context;
}
static void TapPrepare(MTAudioProcessingTapRef tap, CMItemCount numberFrames, const AudioStreamBasicDescription *format)
{
TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap);
context->sampleRate = format->mSampleRate;
if (format->mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
NSLog(@"is Non Interleaved");
}
if (format->mFormatFlags & kAudioFormatFlagIsSignedInteger) {
NSLog(@"dealing with integers");
}
}
static void TapProcess(MTAudioProcessingTapRef tap, CMItemCount numberFrames, MTAudioProcessingTapFlags flags,
AudioBufferList *bufferListInOut, CMItemCount *numberFramesOut, MTAudioProcessingTapFlags *flagsOut)
{
OSStatus status;
status = MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, flagsOut, NULL, numberFramesOut);
if (status != noErr) {
NSLog(@"MTAudioProcessingTapGetSourceAudio: %d", (int)status);
return;
}
//UInt32 bufferCount = bufferListInOut->mNumberBuffers;
AudioBuffer *firstBuffer = &bufferListInOut->mBuffers[1];
float *bufferData = firstBuffer->mData;
//UInt32 dataSize = firstBuffer->mDataByteSize;
//printf(": %li", dataSize);
TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap);
vDSP_vmul(bufferData, 1, context->window, 1, context->inReal, 1, context->numSamples);
vDSP_ctoz((COMPLEX *)context->inReal, 2, &context->split, 1, context->numSamples/2);
vDSP_Length log2n = log2f((float)context->numSamples);
vDSP_fft_zrip(context->fftSetup, &context->split, 1, log2n, FFT_FORWARD);
context->split.imagp[0] = 0.0;
UInt32 i;
NSMutableArray *outData = [NSMutableArray array];
[outData addObject:[NSNumber numberWithFloat:0]];
for( i = 1; i < context->numSamples; i++) {
float power = context->split.realp[i] * context->split.realp[i] + context->split.imagp[i] * context->split.imagp[i];
//amp[i] = sqrtf(power);
[outData addObject:[NSNumber numberWithFloat:sqrtf(power)]];
}
AudioTap *audioTap = (__bridge AudioTap *)context->audioTap;
[audioTap updateSpectrum:outData];
}
static void TapUnprepare(MTAudioProcessingTapRef tap)
{
}
static void TapFinalize(MTAudioProcessingTapRef tap)
{
TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap);
free(context->split.realp);
free(context->split.imagp);
free(context->inReal);
free(context->window);
context->fftSetup = nil;
context->audioTap = nil;
free(context);
}
#pragma mark - AudioTap Implementation
@implementation AudioTap
- (id)initWithTrack:(AVAssetTrack *)track frameSize:(UInt32)frameSize
{
self = [super init];
if (self) {
_assetTrack = track;
_frameSize = frameSize;
[self setupAudioTap];
}
return self;
}
- (void)setupAudioTap
{
//MTAudioProcessingTap
MTAudioProcessingTapCallbacks callbacks;
callbacks.version = kMTAudioProcessingTapCallbacksVersion_0;
callbacks.init = TapInit;
callbacks.prepare = TapPrepare;
callbacks.process = TapProcess;
callbacks.unprepare = TapUnprepare;
callbacks.finalize = TapFinalize;
callbacks.clientInfo = (__bridge void *)self;
MTAudioProcessingTapRef tapRef;
OSStatus err = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks,
kMTAudioProcessingTapCreationFlag_PostEffects, &tapRef);
if (err || !tapRef) {
NSLog(@"Unable to create AudioProcessingTap.");
return;
}
//Audio Mix
AVMutableAudioMixInputParameters *inputParams = [AVMutableAudioMixInputParameters
audioMixInputParametersWithTrack:_assetTrack];
inputParams.audioTapProcessor = tapRef;
AVMutableAudioMix *audioMix = [AVMutableAudioMix audioMix];
audioMix.inputParameters = @[inputParams];
_audioMix = audioMix;
}
- (void)updateSpectrum:(NSArray *)data
{
@autoreleasepool
{
dispatch_async(dispatch_get_main_queue(), ^{
// Forward left and right channel volume to delegate.
if (_delegate && [_delegate respondsToSelector:@selector(updateSpectrum:)]) {
[_delegate updateSpectrum:data];
}
});
}
}
@end
我正在读的是audioBuffer-&gt; mData属性可能是浮点数以外的其他东西(即SInt32等?),如果这是真的,如何确保在尝试FFT之前正确转换它?< / p>
答案 0 :(得分:0)
绘图长度和实际FFT幅度结果长度(2 ^ log2n)/ 2不相同。