使用iOS中的音频队列将音频发送到Web服务器

时间:2016-05-03 13:39:10

标签: ios audioqueue data-stream

您好我一直在尝试实现一个应用程序,它将iOS设备中录制的音频流式传输到Web服务。我希望音频能够直播。为此,我使用音频队列实现。我能够录制音频,调用AudioInputCallBack但不会触发发送数据的请求。

请帮我解决这个问题。我在这里发布示例代码

  - (void)setupAudioFormat:(AudioStreamBasicDescription*)format {
    format->mSampleRate = 16000.0;
format->mFormatID = kAudioFormatLinearPCM;
format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
format->mFramesPerPacket  = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame    = sizeof(Float32)*2;
format->mBytesPerPacket   = sizeof(Float32)*2;
format->mBitsPerChannel   = sizeof(Float32) * 16;
 }

 - (void)startRecording {
[self setupAudioFormat:&recordState.dataFormat];

recordState.currentPacket = 0;
NSLog(@"Started recording1");
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
                            AudioInputCallback,
                            &recordState,
                            CFRunLoopGetCurrent(),
                            kCFRunLoopCommonModes,
                            0,
                            &recordState.queue);

if (status == 0) {

    for (int i = 0; i < NUM_BUFFERS; i++) {
        NSLog(@"Allocated buufer %d",i);
        AudioQueueAllocateBuffer(recordState.queue, 256, &recordState.buffers[i]);
        AudioQueueEnqueueBuffer(recordState.queue, recordState.buffers[i], 0, nil);
    }

    recordState.recording = true;

    status = AudioQueueStart(recordState.queue, NULL);
}
   }
  //This is the callback function
   void AudioInputCallback(void * inUserData,  // Custom audio metadata
                    AudioQueueRef inAQ,
                    AudioQueueBufferRef inBuffer,
                    const AudioTimeStamp * inStartTime,
                    UInt32 inNumberPacketDescriptions,
                    const AudioStreamPacketDescription * inPacketDescs) {
NSLog(@"Audio input called");
RecordState * recordState = (RecordState*)inUserData;
recordState->currentPacket += inNumberPacketDescriptions;
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
AUDIO_DATA_TYPE_FORMAT *samples = (AUDIO_DATA_TYPE_FORMAT*)inBuffer->mAudioData;
//Do something with the samples
NSData * data = [NSData dataWithBytes:samples length:sizeof(float)];
// Create the request.
NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:[NSURL URLWithString:@"https://stream.watsonplatform.net/speech-to-text/api/v1/recognize"]];
NSString *loginString = (@"e718d0a1-11e0-401c-a430-b552d692f04b:uXqASpwQDuHc");
NSData *loginData = [loginString dataUsingEncoding:NSUTF8StringEncoding];
NSString *base64LoginString = [[NSString alloc]initWithFormat:@"Basic\(%@)",[loginData  base64EncodedStringWithOptions: 0]];

request.HTTPMethod = @"POST";
[request setValue:base64LoginString forHTTPHeaderField: @"Authorization"];
[request setValue:@"audio/l16;rate=16000" forHTTPHeaderField: @"content-type"];

request.HTTPBody = data;

// Create url connection and fire request
ViewController *recorder=(__bridge ViewController*)refToSelf;
NSURLConnection *conn = [[NSURLConnection alloc] initWithRequest:request delegate:recorder];
   }

0 个答案:

没有答案