所以,我想获得一个声音文件并将其转换为数据包,并将其发送到另一台计算机。我希望另一台计算机能够在数据包到达时播放它们。
我正在使用AVAudioPlayer尝试播放此数据包,但我找不到一种正确的方法来序列化peer1可以播放的peer1上的数据。
场景是,peer1有一个音频文件,将音频文件拆分成许多小包,将它们放在NSData上并发送给peer2。对等方2收到数据包并在它们到达时逐一播放。
有谁知道怎么做?或者即使有可能吗?
编辑:
这里有一段代码来说明我想要实现的目标。
// This code is part of the peer1, the one who sends the data
- (void)sendData
{
int packetId = 0;
NSString *soundFilePath = [[NSBundle mainBundle] pathForResource:@"myAudioFile" ofType:@"wav"];
NSData *soundData = [[NSData alloc] initWithContentsOfFile:soundFilePath];
NSMutableArray *arraySoundData = [[NSMutableArray alloc] init];
// Spliting the audio in 2 pieces
// This is only an illustration
// The idea is to split the data into multiple pieces
// dependin on the size of the file to be sent
NSRange soundRange;
soundRange.length = [soundData length]/2;
soundRange.location = 0;
[arraySoundData addObject:[soundData subdataWithRange:soundRange]];
soundRange.length = [soundData length]/2;
soundRange.location = [soundData length]/2;
[arraySoundData addObject:[soundData subdataWithRange:soundRange]];
for (int i=0; i<[arraySoundData count]; i++)
{
NSData *soundPacket = [arraySoundData objectAtIndex:i];
if(soundPacket == nil)
{
NSLog(@"soundData is nil");
return;
}
NSMutableData* message = [[NSMutableData alloc] init];
NSKeyedArchiver* archiver = [[NSKeyedArchiver alloc] initForWritingWithMutableData:message];
[archiver encodeInt:packetId++ forKey:PACKET_ID];
[archiver encodeObject:soundPacket forKey:PACKET_SOUND_DATA];
[archiver finishEncoding];
NSError* error = nil;
[connectionManager sendMessage:message error:&error];
if (error) NSLog (@"send greeting failed: %@" , [error localizedDescription]);
[message release];
[archiver release];
}
[soundData release];
[arraySoundData release];
}
// This is the code on peer2 that would receive and play the piece of audio on each packet
- (void) receiveData:(NSData *)data
{
NSKeyedUnarchiver* unarchiver = [[NSKeyedUnarchiver alloc] initForReadingWithData:data];
if ([unarchiver containsValueForKey:PACKET_ID])
NSLog(@"DECODED PACKET_ID: %i", [unarchiver decodeIntForKey:PACKET_ID]);
if ([unarchiver containsValueForKey:PACKET_SOUND_DATA])
{
NSLog(@"DECODED sound");
NSData *sound = (NSData *)[unarchiver decodeObjectForKey:PACKET_SOUND_DATA];
if (sound == nil)
{
NSLog(@"sound is nil!");
}
else
{
NSLog(@"sound is not nil!");
AVAudioPlayer *audioPlayer = [AVAudioPlayer alloc];
if ([audioPlayer initWithData:sound error:nil])
{
[audioPlayer prepareToPlay];
[audioPlayer play];
} else {
[audioPlayer release];
NSLog(@"Player couldn't load data");
}
}
}
[unarchiver release];
}
所以,这就是我想要实现的......所以,我真正需要知道的是如何创建数据包,所以peer2可以播放音频。
这将是一种流媒体。是的,现在我并不担心接收或播放数据包的顺序......我只需要将声音切成片,并且它们能够播放每个片段,每个片段,而无需等待整个文件被由peer2收到。
谢谢!
答案 0 :(得分:8)
这是使用AQ播放文件的最简单的类 请注意,您可以从任何点播放它(只需设置currentPacketNumber)
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
@interface AudioFile : NSObject {
AudioFileID fileID; // the identifier for the audio file to play
AudioStreamBasicDescription format;
UInt64 packetsCount;
UInt32 maxPacketSize;
}
@property (readwrite) AudioFileID fileID;
@property (readwrite) UInt64 packetsCount;
@property (readwrite) UInt32 maxPacketSize;
- (id) initWithURL: (CFURLRef) url;
- (AudioStreamBasicDescription *)audioFormatRef;
@end
// AudioFile.m
#import "AudioFile.h"
@implementation AudioFile
@synthesize fileID;
@synthesize format;
@synthesize maxPacketSize;
@synthesize packetsCount;
- (id)initWithURL:(CFURLRef)url{
if (self = [super init]){
AudioFileOpenURL(
url,
0x01, //fsRdPerm, read only
0, //no hint
&fileID
);
UInt32 sizeOfPlaybackFormatASBDStruct = sizeof format;
AudioFileGetProperty (
fileID,
kAudioFilePropertyDataFormat,
&sizeOfPlaybackFormatASBDStruct,
&format
);
UInt32 propertySize = sizeof (maxPacketSize);
AudioFileGetProperty (
fileID,
kAudioFilePropertyMaximumPacketSize,
&propertySize,
&maxPacketSize
);
propertySize = sizeof(packetsCount);
AudioFileGetProperty(fileID, kAudioFilePropertyAudioDataPacketCount, &propertySize, &packetsCount);
}
return self;
}
-(AudioStreamBasicDescription *)audioFormatRef{
return &format;
}
- (void) dealloc {
AudioFileClose(fileID);
[super dealloc];
}
// AQPlayer.h
#import <Foundation/Foundation.h>
#import "AudioFile.h"
#define AUDIOBUFFERS_NUMBER 3
#define MAX_PACKET_COUNT 4096
@interface AQPlayer : NSObject {
@public
AudioQueueRef queue;
AudioQueueBufferRef buffers[AUDIOBUFFERS_NUMBER];
NSInteger bufferByteSize;
AudioStreamPacketDescription packetDescriptions[MAX_PACKET_COUNT];
AudioFile * audioFile;
SInt64 currentPacketNumber;
UInt32 numPacketsToRead;
}
@property (nonatomic) SInt64 currentPacketNumber;
@property (nonatomic, retain) AudioFile * audioFile;
-(id)initWithFile:(NSString *)file;
-(NSInteger)fillBuffer:(AudioQueueBufferRef)buffer;
-(void)play;
@end
// AQPlayer.m
#import "AQPlayer.h"
static void AQOutputCallback(void * inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) {
AQPlayer * aqp = (AQPlayer *)inUserData;
[aqp fillBuffer:(AudioQueueBufferRef)inBuffer];
}
@implementation AQPlayer
@synthesize currentPacketNumber;
@synthesize audioFile;
-(id)initWithFile:(NSString *)file{
if ([self init]){
audioFile = [[AudioFile alloc] initWithURL:[NSURL fileURLWithPath:file]];
currentPacketNumber = 0;
AudioQueueNewOutput ([audioFile audioFormatRef], AQOutputCallback, self, CFRunLoopGetCurrent (), kCFRunLoopCommonModes, 0, &queue);
bufferByteSize = 4096;
if (bufferByteSize < audioFile.maxPacketSize) bufferByteSize = audioFile.maxPacketSize;
numPacketsToRead = bufferByteSize/audioFile.maxPacketSize;
for(int i=0; i<AUDIOBUFFERS_NUMBER; i++){
AudioQueueAllocateBuffer (queue, bufferByteSize, &buffers[i]);
}
}
return self;
}
-(void) dealloc{
[audioFile release];
if (queue){
AudioQueueDispose(queue, YES);
queue = nil;
}
[super dealloc];
}
- (void)play{
for (int bufferIndex = 0; bufferIndex < AUDIOBUFFERS_NUMBER; ++bufferIndex){
[self fillBuffer:buffers[bufferIndex]];
}
AudioQueueStart (queue, NULL);
}
-(NSInteger)fillBuffer:(AudioQueueBufferRef)buffer{
UInt32 numBytes;
UInt32 numPackets = numPacketsToRead;
BOOL isVBR = [audioFile audioFormatRef]->mBytesPerPacket == 0 ? YES : NO;
AudioFileReadPackets(
audioFile.fileID,
NO,
&numBytes,
isVBR ? packetDescriptions : 0,
currentPacketNumber,
&numPackets,
buffer->mAudioData
);
if (numPackets > 0) {
buffer->mAudioDataByteSize = numBytes;
AudioQueueEnqueueBuffer (
queue,
buffer,
isVBR ? numPackets : 0,
isVBR ? packetDescriptions : 0
);
}
else{
// end of present data, check if all packets are played
// if yes, stop play and dispose queue
// if no, pause queue till new data arrive then start it again
}
return numPackets;
}
答案 1 :(得分:5)
似乎你正在解决错误的任务,因为 AVAudioPlayer 能够只播放整个音频文件。您应该使用 AudioToolbox 框架中的音频队列服务,以逐个数据包的方式播放音频。实际上你不需要将音频文件分成真正的声音数据包,你可以使用上面你自己的例子中的任何数据块,但是你应该使用 Audiofile服务或音频文件流来阅读收到的数据块服务功能(来自 AudioToolbox )并将它们提供给音频队列缓冲区。
如果您仍想将音频信息分成声音包,您可以使用Audiofile服务功能轻松完成。 Audiofile由标题组成,其中包含数据包数,采样率,通道数等属性,以及原始声音数据。
使用 AudioFileOpenURL 打开audiofile并使用 AudioFileGetProperty 功能获取其所有属性。基本上你只需要kAudioFilePropertyDataFormat和kAudioFilePropertyAudioDataPacketCount属性:
AudioFileID fileID; // the identifier for the audio file
CFURLRef fileURL = ...; // file URL
AudioStreamBasicDescription format; // structure containing audio header info
UInt64 packetsCount;
AudioFileOpenURL(fileURL,
0x01, //fsRdPerm, // read only
0, //no hint
&fileID
);
UInt32 sizeOfPlaybackFormatASBDStruct = sizeof format;
AudioFileGetProperty (
fileID,
kAudioFilePropertyDataFormat,
&sizeOfPlaybackFormatASBDStruct,
&format
);
propertySize = sizeof(packetsCount);
AudioFileGetProperty(fileID, kAudioFilePropertyAudioDataPacketCount, &propertySize, &packetsCount);
然后您可以使用以下方式获取任何范围的音频包装数据:
OSStatus AudioFileReadPackets (
AudioFileID inAudioFile,
Boolean inUseCache,
UInt32 *outNumBytes,
AudioStreamPacketDescription *outPacketDescriptions,
SInt64 inStartingPacket,
UInt32 *ioNumPackets,
void *outBuffer
);
答案 2 :(得分:0)
Apple已经写了一些可以做到这一点的东西:AUNetSend和AUNetReceive。 AUNetSend是一个效果AudioUnit,它将音频发送到另一台计算机上的AUNetReceive AudioUnit。
不幸的是,iPhone上没有这些AU。