我有一个奇怪的问题,使用AVAudioPlayer在后台的iPhone上播放声音文件(wav文件)。我使用以下代码:
AVAudioPlayer* audioplayer;
NSError* error;
audioplayer = [[AVAudioPlayer alloc] initWithData:soundfile error:&error];
if (error) {
NSLog(@"an error occured while init audioplayer...");
NSLog(@"%@", [error localizedDescription]);
}
audioplayer.currentTime = 0;
if (![audioplayer prepareToPlay])
NSLog(@"could not preparetoPlay");
audioplayer.volume = 1.0;
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayback error:nil];
[[AVAudioSession sharedInstance] setActive: YES error: &error];
if (![audioplayer play])
NSLog(@"could not play sound");
audioplayer.delegate = [myApp sharedInstance];
当应用程序处于前台时,此工作正常。但是,将应用程序移动到后台时[audioplayer prepareToPlay]返回NO。
如果没有将“App播放音频”添加到“必需的背景模式”,则会发生这种情况。有没有办法如何从[audioplayer prepareToPlay]获得更精确的错误报告?或者你有什么提示我做错了什么或忘记了吗?
答案 0 :(得分:1)
您需要在准备AVAudioPlayer实例之前初始化音频会话。理想情况下,将音频会话调用移动到应用程序委托的didFinishLaunchingWithOptions方法。
答案 1 :(得分:0)
我不完全确定单独使用AVFoundation可以实现这一点,您可能需要使用AudioUnit框架并创建一个流。将.WAV文件的内容发送到音频缓冲区应该相对简单。
这就是我在Piti Piti Pa中的表现。另一个好处是你可以更好地控制音频的延迟,以便同步音频和视频动画(使用蓝牙时更明显)。 / p>
这是我用来初始化音频单元的代码:
+(BOOL)_createAudioUnitInstance
{
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
OSStatus status = AudioComponentInstanceNew(inputComponent, &_audioUnit);
[self _logStatus:status step:@"instantiate"];
return (status == noErr );
}
+(BOOL)_setupAudioUnitOutput
{
UInt32 flag = 1;
OSStatus status = AudioUnitSetProperty(_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
_outputAudioBus,
&flag,
sizeof(flag));
[self _logStatus:status step:@"set output bus"];
return (status == noErr );
}
+(BOOL)_setupAudioUnitFormat
{
AudioStreamBasicDescription audioFormat = {0};
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 2;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
OSStatus status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
_outputAudioBus,
&audioFormat,
sizeof(audioFormat));
[self _logStatus:status step:@"set audio format"];
return (status == noErr );
}
+(BOOL)_setupAudioUnitRenderCallback
{
AURenderCallbackStruct audioCallback;
audioCallback.inputProc = playbackCallback;
audioCallback.inputProcRefCon = (__bridge void *)(self);
OSStatus status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
_outputAudioBus,
&audioCallback,
sizeof(audioCallback));
[self _logStatus:status step:@"set render callback"];
return (status == noErr);
}
+(BOOL)_initializeAudioUnit
{
OSStatus status = AudioUnitInitialize(_audioUnit);
[self _logStatus:status step:@"initialize"];
return (status == noErr);
}
+(void)start
{
[self clearFeeds];
[self _startAudioUnit];
}
+(void)stop
{
[self _stopAudioUnit];
}
+(BOOL)_startAudioUnit
{
OSStatus status = AudioOutputUnitStart(_audioUnit);
[self _logStatus:status step:@"start"];
return (status == noErr);
}
+(BOOL)_stopAudioUnit
{
OSStatus status = AudioOutputUnitStop(_audioUnit);
[self _logStatus:status step:@"stop"];
return (status == noErr);
}
+(void)_logStatus:(OSStatus)status step:(NSString *)step
{
if( status != noErr )
{
NSLog(@"AudioUnit failed to %@, error: %d", step, (int)status);
}
}
#pragma mark - Mixer
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
@autoreleasepool {
AudioBuffer *audioBuffer = ioData->mBuffers;
_lastPushedFrame = _nextFrame;
[SIOAudioMixer _generateAudioFrames:inNumberFrames into:audioBuffer->mData];
}
return noErr;
}
现在您只需要提取.Wav文件的内容(如果将它们导出为RAW格式,则更容易)并通过回调将其发送到缓冲区。
我希望有所帮助!
答案 2 :(得分:0)
在AppDelegate中设置AVAudioSession类别如下:(Swift 2)
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord, withOptions: AVAudioSessionCategoryOptions.MixWithOthers)
}catch{
self.fireAnAlert("Set Category Failed", theMessage: "Failed to set AVAudioSession Category")
}
将选项设置为"与其他人混合"是一件重要的事情!
然后你要播放声音,确保你调用beginReceivingRemoteControlEvents,然后将AVAudioSession设置为活动状态,如下所示:
do{
UIApplication.sharedApplication().beginReceivingRemoteControlEvents()
try AVAudioSession.sharedInstance().setActive(true)
}catch{
let e = error as NSError
self.appDelegate?.fireAnAlert("Error", theMessage: "\(e.localizedDescription)")
}