任何人都可以帮助我通过音频单元录制iPhone输出声音

时间:2011-12-23 11:13:21

标签: iphone ios5 core-audio audiounit

这是我的代码: 我使用此代码通过使用音频单元录制iPhone输出音频 然后将输出保存在output.caf中,但output.caf文件为空 任何人都知道我该怎么办? 输出音频文件为空

这是音频单元的初始化

-(void) initializaeOutputUnit
{
    OSStatus status;

    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);

    // Enable IO for recording
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));

    // Enable IO for playback
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Output, 
                                  kOutputBus,
                                  &flag, 
                                  sizeof(flag));

    // Describe format
    AudioStreamBasicDescription audioFormat={0};
    audioFormat.mSampleRate         = 44100.00;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket     = 2;
    audioFormat.mBytesPerFrame      = 2;

    // Apply format
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));


    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Global, 
                                  kInputBus, 
                                  &callbackStruct, 
                                  sizeof(callbackStruct));

    // Set output callback
    callbackStruct.inputProc = playbackCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_SetRenderCallback, 
                                  kAudioUnitScope_Global, 
                                  kOutputBus,
                                  &callbackStruct, 
                                  sizeof(callbackStruct));

    // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
    flag = 0;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));


    AudioUnitInitialize(audioUnit);
    AudioOutputUnitStart(audioUnit);


    // On initialise le fichier audio
    NSArray  *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
    NSString *documentsDirectory = [paths objectAtIndex:0];
    NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] autorelease];
    NSLog(@">>> %@", destinationFilePath);
    CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);

    OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);  
    CFRelease(destinationURL);
    NSAssert(setupErr == noErr, @"Couldn't create file for writing");

    setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
    NSAssert(setupErr == noErr, @"Couldn't create file for format");

    setupErr =  ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
    NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");

   }

录音回电

static OSStatus recordingCallback       (void *                         inRefCon,
                                         AudioUnitRenderActionFlags *      ioActionFlags,
                                         const AudioTimeStamp *            inTimeStamp,
                                         UInt32                            inBusNumber,
                                         UInt32                            inNumberFrames,
                                         AudioBufferList *                 ioData) {
    NSLog(@"callback");
   if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0) 
   {
        AudioBufferList *bufferList; // <- Fill this up with buffers (you will want to malloc it, as it's a dynamic-length list)

        EffectState *effectState = (EffectState *)inRefCon;
       AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];

        OSStatus status;
        NSLog(@"de5eal el call back ");
        // BELOW I GET THE ERROR
        status = AudioUnitRender( rioUnit,     
                                 ioActionFlags, 
                                 inTimeStamp, 
                                 inBusNumber, 
                                 inNumberFrames, 
                                 bufferList);

        if (noErr != status) { NSLog(@"AudioUnitRender error"); return noErr;}

        // Now, we have the samples we just read sitting in buffers in bufferList
        ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);

    }
    return noErr;     
}




// then stop Recording 
- (void) stopRecord
{

    AudioOutputUnitStop(audioUnit);
    AudioUnitUninitialize(audioUnit);
}

2 个答案:

答案 0 :(得分:14)

initializaeOutputUnit 中,您只创建了音频文件:

OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);

传递 0 (帧)和 NULL (audiobuffer)仅适用于init内部缓冲区:

setupErr =  ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);

这就是 recordingCallback

中出现的问题

1)ioActionFlags始终为0且inBusNumber始终为1,因为这就是你如何设置回调(kInputBus = 1):

if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)

所以只需删除if语句。

2)从 AudioUnitRender ,您将收到 -50错误,它在CoreAudioTypes.h中定义为 kAudio_ParamError 错误。这种情况发生在bufferList没有定义和NULL!

 OSStatus status; 
 status = AudioUnitRender(THIS->mAudioUnit,     
                          ioActionFlags, 
                          inTimeStamp, 
                          kInputBus, 
                          inNumberFrames, 
                          &bufferList);

 if (noErr != status) {
      printf("AudioUnitRender error: %ld", status);   
      return noErr; 
 }

您只需定义一个有效的AudioBuffer并将其传递给AudioUnitRender,这是我的工作RenderCallback:

  static OSStatus recordingCallback       (void *                      inRefCon,
                                           AudioUnitRenderActionFlags *      ioActionFlags,
                                           const AudioTimeStamp *            inTimeStamp,
                                           UInt32                            inBusNumber,
                                           UInt32                            inNumberFrames,
                                           AudioBufferList *                 ioData)  {
      double timeInSeconds = inTimeStamp->mSampleTime / kSampleRate;
      printf("\n%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);
      //printAudioUnitRenderActionFlags(ioActionFlags);

      AudioBufferList bufferList;

      SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
      memset (&samples, 0, sizeof (samples));

      bufferList.mNumberBuffers = 1;
      bufferList.mBuffers[0].mData = samples;
      bufferList.mBuffers[0].mNumberChannels = 1;
      bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);

      ViewController* THIS = THIS = (__bridge ViewController *)inRefCon;

      OSStatus status;
      status = AudioUnitRender(THIS->mAudioUnit,     
                               ioActionFlags, 
                               inTimeStamp, 
                               kInputBus, 
                               inNumberFrames, 
                               &bufferList);

      if (noErr != status) {

          printf("AudioUnitRender error: %ld", status); 
          return noErr;
      }

      // Now, we have the samples we just read sitting in buffers in bufferList
      ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList);

      return noErr;      
 }

stopRecord 中,您应该使用 ExtAudioFileDispose 关闭音频文件:

  - (void)stopRecording:(NSTimer*)theTimer {
      printf("\nstopRecording\n");
      AudioOutputUnitStop(mAudioUnit);
      AudioUnitUninitialize(mAudioUnit);

      OSStatus status = ExtAudioFileDispose(mAudioFileRef);
      printf("OSStatus(ExtAudioFileDispose): %ld\n", status); 
 }

完整源代码:http://pastebin.com/92Fyjaye

答案 1 :(得分:3)

RemoteIO音频单元不录制音频输出,仅录制麦克风输入。输出未连接到麦克风输入。

如果使用RemoteIO播放音频,则可以将复制相同的缓冲区复制到输出回调中,以便将音频播放到文件编写器。但这仅适用于您通过RemoteIO播放的原始音频内容。