客户端无法在iOS中播放流数据

时间:2017-05-10 10:11:04

标签: ios audio-streaming audiotoolbox gcdasyncsocket

使用下面的代码服务器开始录制并开始流式传输到连接的客户端及其工作状态。

AudioServer.h

#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>

@interface AudioServer : NSObject<GCDAsyncSocketDelegate>
   @property (nonatomic, strong)GCDAsyncSocket * serverSocket;
   @property (nonatomic, strong)NSMutableArray *connectedClients;
   @property (nonatomic) AudioComponentInstance audioUnit;

  -(void) start;
  -(void) stop;
  `enter code here`-(void) writeDataToClients:(NSData*)data;
@end

AudioServer.m

#import "AudioServer.h"
#define kOutputBus 0
#define kInputBus 1

static OSStatus recordingCallback(void *inRefCon,
                              AudioUnitRenderActionFlags *ioActionFlags,
                              const AudioTimeStamp *inTimeStamp,
                              UInt32 inBusNumber,
                              UInt32 inNumberFrames,
                              AudioBufferList *ioData) {

// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.

AudioServer *server = (__bridge AudioServer*)inRefCon;

AudioBufferList bufferList;

SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));

bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);

// Then:
// Obtain recorded samples

OSStatus status;

status = AudioUnitRender(server.audioUnit,
                         ioActionFlags,
                         inTimeStamp,
                         inBusNumber,
                         inNumberFrames,
                         &bufferList);

NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];


[server writeDataToClients:dataToSend];

return noErr;
}


@implementation AudioServer

-(id) init
{
  return [super init];
}

-(void) start
{


[UIApplication sharedApplication].idleTimerDisabled = YES;
// Create a new instance of AURemoteIO

AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;

AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, &_audioUnit);

//  Enable input and output on AURemoteIO
//  Input is enabled on the input scope of the input element
//  Output is enabled on the output scope of the output element

UInt32 one = 1;
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));

AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));

// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point

AudioStreamBasicDescription audioFormat = [self getAudioDescription];
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));

// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));

// Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);


AURenderCallbackStruct renderCallback;
renderCallback.inputProc = recordingCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);

AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));


// Initialize the AURemoteIO instance
AudioUnitInitialize(_audioUnit);

AudioOutputUnitStart(_audioUnit);

_connectedClients = [[NSMutableArray alloc] init];
_serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];

[self startAcceptingConnections];
 }

- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID          = kAudioFormatLinearPCM;
audioDescription.mFormatFlags       = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame  = 1;
audioDescription.mBytesPerPacket    = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket   = 1;
audioDescription.mBytesPerFrame     = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel    = 8 * sizeof(SInt16);
audioDescription.mSampleRate        = 44100.0;
return audioDescription;
}

-(void) startAcceptingConnections
{
NSError *error = nil;
if(_serverSocket)
    //[_serverSocket acceptOnPort:<#(uint16_t)#> error:<#(NSError *__autoreleasing *)#>]
    [_serverSocket acceptOnPort:2030 error:&error];
//TODO:- Change Here Port numbers
}


-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(_connectedClients)
    [_connectedClients removeObject:sock];
}

- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {

NSLog(@"Accepted New Socket from %@:%hu", [newSocket connectedHost], [newSocket connectedPort]);

@synchronized(_connectedClients)
{
    dispatch_async(dispatch_get_main_queue(), ^{
        if(_connectedClients)
            [_connectedClients addObject:newSocket];
    });
}

NSError *error = nil;
if(_serverSocket)
    //[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
    [_serverSocket acceptOnPort:2030 error:&error];
//TODO:- Change Here Port numbers
}

-(void) writeDataToClients:(NSData *)data
{
if(_connectedClients)
{
    for (GCDAsyncSocket *socket in _connectedClients) {
        if([socket isConnected])
        {
            [socket writeData:data withTimeout:-1 tag:0];
        }
        else{
            if([_connectedClients containsObject:socket])
                [_connectedClients removeObject:socket];
        }
    }
}
}

-(void) stop
{
if(_serverSocket)
{
    _serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}

-(void) dealloc
{
if(_serverSocket)
{
    _serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}


@end

这是客户端代码:

AudioClient.h

#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import  <AudioToolbox/AudioToolbox.h>

#import "TPCircularBuffer.h"

@protocol AudioClientDelegate <NSObject>

    -(void) connected;
    -(void) animateSoundIndicator:(float) rms;

@end


@interface AudioClient : NSObject<GCDAsyncSocketDelegate>
{
    NSString *ipAddress;
    BOOL stopped;
}

    @property (nonatomic) TPCircularBuffer circularBuffer;
    @property (nonatomic) AudioComponentInstance audioUnit;
    @property (nonatomic, strong) GCDAsyncSocket *socket;
    @property (nonatomic, strong) id<AudioClientDelegate> delegate;

    -(id) initWithDelegate:(id)delegate;
    -(void) start:(NSString *)ip;
    -(void) stop;
    -(TPCircularBuffer *) outputShouldUseCircularBuffer;


@end

AudioClient.m

#define kOutputBus 0
#define kInputBus 1
#import "AudioClient.h"

@implementation AudioClient

static OSStatus OutputRenderCallback(void                        *inRefCon,
                                 AudioUnitRenderActionFlags  *ioActionFlags,
                                 const AudioTimeStamp        *inTimeStamp,
                                 UInt32                      inBusNumber,
                                 UInt32                      inNumberFrames,
                                 AudioBufferList             *ioData){


AudioClient *output = (__bridge AudioClient*)inRefCon;


TPCircularBuffer *circularBuffer = [output outputShouldUseCircularBuffer];
if( !circularBuffer ){
    AudioUnitSampleType *left  = (AudioUnitSampleType*)ioData->mBuffers[0].mData;
    for(int i = 0; i < inNumberFrames; i++ ){
        left[  i ] = 0.0f;
    }
    return noErr;
};

int32_t bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16* outputBuffer = ioData->mBuffers[0].mData;

int32_t availableBytes;
SInt16 *sourceBuffer = TPCircularBufferTail(circularBuffer, &availableBytes);

int32_t amount = MIN(bytesToCopy,availableBytes);
memcpy(outputBuffer, sourceBuffer, amount);

TPCircularBufferConsume(circularBuffer,amount);

NSLog(@"Bufferiiii");

return noErr;
}


-(id) initWithDelegate:(id)delegate
{
    if(!self)
    {
        self = [super init];
    }

    [self circularBuffer:&_circularBuffer withSize:24576*5];
    _delegate = delegate;
    stopped = NO;
    return self;
}

-(void) start:(NSString *)ip
{
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue: dispatch_get_main_queue()];

NSError *err;

ipAddress = ip;

[UIApplication sharedApplication].idleTimerDisabled = YES;

//if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
if(![_socket connectToHost:ipAddress onPort:2030 error:&err])
{

}

[self setupAudioUnit];
}

-(void) setupAudioUnit
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;

AudioComponent comp = AudioComponentFindNext(NULL, &desc);

OSStatus status;

status = AudioComponentInstanceNew(comp, &_audioUnit);

if(status != noErr)
{
    NSLog(@"Error creating AudioUnit instance");
}

//  Enable input and output on AURemoteIO
//  Input is enabled on the input scope of the input element
//  Output is enabled on the output scope of the output element

UInt32 one = 1;

status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &one, sizeof(one));


if(status != noErr)
{
    NSLog(@"Error enableling AudioUnit output bus");
}

// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 16 bit int point

AudioStreamBasicDescription audioFormat = [self getAudioDescription];

status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat));

if(status != noErr)
{
    NSLog(@"Error setting audio format");
}

AURenderCallbackStruct renderCallback;
renderCallback.inputProc = OutputRenderCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);

status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &renderCallback, sizeof(renderCallback));

if(status != noErr)
{
    NSLog(@"Error setting rendering callback");
}

// Initialize the AURemoteIO instance
status = AudioUnitInitialize(_audioUnit);

if(status != noErr)
{
    NSLog(@"Error initializing audio unit");
}
}

- (AudioStreamBasicDescription)getAudioDescription {


AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID          = kAudioFormatLinearPCM;
audioDescription.mFormatFlags       = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame  = 1;
audioDescription.mBytesPerPacket    = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket   = 1;
audioDescription.mBytesPerFrame     = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel    = 8 * sizeof(SInt16);
audioDescription.mSampleRate        = 44100.0;
return audioDescription;


}

-(void) socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(!stopped)
    //if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
    if(![_socket connectToHost:ipAddress onPort:2030 error:&err])
    {

    }
}

-(void) socket:(GCDAsyncSocket *)socket didReadData:(NSData *)data withTag:(long)tag
{
if(data.length > 0)
{
    unsigned long len = [data length];

    SInt16* byteData = (SInt16*)malloc(len);
    memcpy(byteData, [data bytes], len);

    double sum = 0.0;
    for(int i = 0; i < len/2; i++) {
        sum += byteData[i] * byteData[i];
    }

    double average = sum / len;
    double rms = sqrt(average);

    [_delegate animateSoundIndicator:rms];

    Byte* soundData = (Byte*)malloc(len);
    memcpy(soundData, [data bytes], len);

    if(soundData)
    {
        AudioBufferList *theDataBuffer = (AudioBufferList*) malloc(sizeof(AudioBufferList) *1);
        theDataBuffer->mNumberBuffers = 1;
        theDataBuffer->mBuffers[0].mDataByteSize = (UInt32)len;
        theDataBuffer->mBuffers[0].mNumberChannels = 1;
        theDataBuffer->mBuffers[0].mData = (SInt16*)soundData;

        [self appendDataToCircularBuffer:&_circularBuffer fromAudioBufferList:theDataBuffer];
    }
}

[socket readDataToLength:18432 withTimeout:-1 tag:0];
}

-(void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size {
TPCircularBufferInit(circularBuffer,size);
}

-(void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer
          fromAudioBufferList:(AudioBufferList*)audioBufferList {
TPCircularBufferProduceBytes(circularBuffer,
                             audioBufferList->mBuffers[0].mData,
                             audioBufferList->mBuffers[0].mDataByteSize);
}

-(void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer {
TPCircularBufferClear(circularBuffer);
TPCircularBufferCleanup(circularBuffer);
}

-(void) socket:(GCDAsyncSocket *)socket didConnectToHost:(NSString *)host port:(uint16_t)port
{
OSStatus status = AudioOutputUnitStart(_audioUnit);

if(status != noErr)
{
    NSLog(@"Error starting audio unit");
}

[socket readDataToLength:18432 withTimeout:-1 tag:0];

[_delegate connected];
}

-(TPCircularBuffer *) outputShouldUseCircularBuffer
{
return &_circularBuffer;
}

-(void) stop
{

OSStatus status = AudioOutputUnitStop(_audioUnit);

if(status != noErr)
{
    NSLog(@"Error stopping audio unit");
}

[UIApplication sharedApplication].idleTimerDisabled = NO;

TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}

-(void) dealloc {
OSStatus status = AudioOutputUnitStop(_audioUnit);

if(status != noErr)
{
    NSLog(@"Error stopping audio unit");
}

[UIApplication sharedApplication].idleTimerDisabled = NO;

TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}


@end

我想像上面那样流式播放Apple音乐库歌曲,所以请使用以下代码:

Output.h

#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>

@class Output;

@protocol OutputDataSource <NSObject>

- (void)readFrames:(UInt32)frames
   audioBufferList:(AudioBufferList *)audioBufferList
    bufferSize:(UInt32 *)bufferSize songData:(NSData *)songData;

@end

@interface Output : NSObject

    @property (strong, nonatomic) id outputDataSource;

    - (void)startOutputUnit;
    - (void)stopOutputUnit;
@end

Output.m

#import "Output.h"
#import "Utilities.m"
#import "AudioServer.h"

static OSStatus OutputRenderCallback (void *inRefCon,
                                  AudioUnitRenderActionFlags    * ioActionFlags,
                                  const AudioTimeStamp * inTimeStamp,
                                  UInt32 inOutputBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList * ioData)
{

AudioBufferList bufferList;

SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));

bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);

NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];

Output *self = (__bridge Output*)inRefCon;

if (self.outputDataSource)
{


    if ([self.outputDataSource respondsToSelector:@selector(readFrames:audioBufferList:bufferSize:songData:)])
    {
        @autoreleasepool
        {
            UInt32 bufferSize;
            //[self.outputDataSource readFrames:inNumberFrames audioBufferList:ioData bufferSize:&bufferSize];
            [self.outputDataSource readFrames:inNumberFrames audioBufferList:ioData bufferSize:&bufferSize songData:dataToSend];
        }
    }                       
}

return noErr;
}

@interface Output()
    @property (nonatomic) AudioUnit audioUnit;
@end

@implementation Output

- (id)init
{
self = [super init];
if (!self) {
    return nil;
}

[self createAudioUnit];
return self;
}

#pragma mark - Audio Unit Setup
- (void)createAudioUnit
{
// create a component description
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
// use the description to find the component we're looking for
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &desc);
// create an instance of the component and have our _audioUnit property point to it
CheckError(AudioComponentInstanceNew(defaultOutput, &_audioUnit),
           "AudioComponentInstanceNew Failed");
// describe the output audio format... here we're using LPCM 32 bit floating point samples
AudioStreamBasicDescription outputFormat;
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mFormatFlags       = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsFloat;
outputFormat.mSampleRate        = 44100;
outputFormat.mChannelsPerFrame  = 2;
outputFormat.mBitsPerChannel    = 32;
outputFormat.mBytesPerPacket    = (outputFormat.mBitsPerChannel / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mFramesPerPacket   = 1;
outputFormat.mBytesPerFrame     = outputFormat.mBytesPerPacket;
// set the audio format on the input scope (kAudioUnitScope_Input) of the output bus (0) of the output unit - got that?
CheckError(AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputFormat, sizeof(outputFormat)),
           "AudioUnitSetProperty StreamFormat Failed");
// set up a render callback struct consisting of our output render callback (above) and a reference to self (so we can access our outputDataSource reference from within the callback)
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = OutputRenderCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
// add the callback struct to the output unit (again, that's to the input scope of the output bus)
CheckError(AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct)),
           "AudioUnitSetProperty SetRenderCallback Failed");
// initialize the unit
CheckError(AudioUnitInitialize(_audioUnit),
           "AudioUnitInitializeFailed");
}

#pragma mark - Start/Stop
- (void)startOutputUnit
{
    CheckError(AudioOutputUnitStart(_audioUnit), "Audio Output Unit         Failed To Start");
}

- (void)stopOutputUnit
{
    CheckError(AudioOutputUnitStop(_audioUnit), "Audio Output Unit Failed To Stop");
}

@end

问题是客户端接收歌曲数据但没有播放,我不知道我做错了什么。 请纠正我,我被困在这里。

0 个答案:

没有答案