我已经在iOS Alexa应用程序上工作了一段时间,但我很难将麦克风音频作为流发送到AVS API。
我成功预先录制了一个音频样本并将其作为一个整体发送并得到回复。
我只想知道如何使用NSURLSession http / 2连接将数据流式传输到AVS。
以下是我正在做的代码片段:
if (isset($payload->iat) && $payload->iat > ($timestamp + static::$leeway +
**720**)) {
throw new BeforeValidException('Cannot handle token prior to ' . date(DateTime::ISO8601, $payload->iat)
);
}
每320字节的音频数据调用此函数,因为这是亚马逊推荐用于流媒体的大小:)
电贺!
答案 0 :(得分:0)
您应该在对话请求的开头send the JSON metadata headers only once(例如,麦克风打开并开始录制的那一刻)。
每次为同一个流调用sendData方法时,您还希望使用相同的边界值。对整个请求使用相同的HTTP / 2流,这意味着您需要“内部”重构sendData方法以适应这种情况。使用uploadTask:withStreamedRequest的示例可能会有所帮助(您可能需要使用它)。
我不熟悉Swift HTTP / 2 API,因此我不知道是否会为您处理延续帧,或者您是否需要自己管理,因此需要注意。祝你好运,希望这会有所帮助。
答案 1 :(得分:0)
像这样:
public func send(event: AlexaEvent?) {
self.queue.async {[weak self] in
guard let self = self else { return }
let urlStr = self.host.appending(AlexaConstant.ServiceUrl.eventsURL)
var eventRequest: URLRequest = URLRequest(url: URL(string: urlStr)!)
eventRequest.httpMethod = "POST"
eventRequest.setValue("multipart/form-data; boundary=\(AlexaConstant.HttpBodyData.boundary)", forHTTPHeaderField: "Content-Type")
self.addAuthHeader(request: &eventRequest)
guard let bodyData = event?.HTTPBodyData else { return }
eventRequest.httpBody = bodyData
// self.bodyStream = InputStream(data: bodyData)
eventRequest.httpBodyStream = self.bodyStream
let uploadTask = self.session?.uploadTask(withStreamedRequest: eventRequest)
guard let task = uploadTask else { return }
self.state = .started(.init(task: task))
task.resume()
}
}
然后将输入和输出流绑定到URLSessionDataDelegate代理方法中:
func urlSession(_ session: URLSession, task: URLSessionTask, needNewBodyStream completionHandler: @escaping (InputStream?) -> Void) {
let sendTimer = Timer(timeInterval: TimeInterval(1), target: self, selector: #selector(didFire(sendTimer:)), userInfo: nil, repeats: true)
let streamingState = StreamingState(task: task, sendTimer: sendTimer)
self.state = .streaming(streamingState)
var bodyData = Data()
let data = AlexaHttpBodyData.jsonContent()
guard let jsonObj = try? JSONSerialization.jsonObject(with: data, options: []) else { return }
guard let valueData = try? JSONSerialization.data(withJSONObject: jsonObj, options: []) else { return }
bodyData.append(AlexaHttpBodyData.boundaryBegin)
bodyData.append(AlexaHttpBodyData.jsonHeaders)
bodyData.append(AlexaHttpBodyData.jsonContent(data: valueData))
bodyData.append(AlexaHttpBodyData.boundaryBegin)
bodyData.append(AlexaHttpBodyData.AudioHeaders)
// bodyStream = InputStream(data: bodyData)
let streams = Stream.boundPair(bufferSize: BufferSize, inputStream: bodyStream)
self.bodyStream = streams.inputStream
self.outputStream = streams.outputStream
// RunLoop.current.add(streamingState.sendTimer, forMode: .default)
outputStream?.delegate = self
outputStream?.schedule(in: .current, forMode: .default)
outputStream?.open()
completionHandler(self.bodyStream)
outputStream?.write(Array(bodyData), maxLength: bodyData.count)
// while (true) {
outputStream?.write(Array(self.audioQueue.dequeue()), maxLength: BufferSize)
let rndata = "\r\n".data(using: .utf8)
outputStream?.write(Array(rndata!), maxLength: rndata!.count)
outputStream?.write(Array(AlexaHttpBodyData.boundaryEnd), maxLength: AlexaHttpBodyData.boundaryEnd.count)
// }
stop(error: nil)
}
extension Stream {
static func boundPair(bufferSize: Int, inputStream: InputStream?) -> (inputStream: InputStream?, outputStream: OutputStream?) {
var inStream: InputStream? = inputStream
var outStream: OutputStream? = nil
Stream.getBoundStreams(withBufferSize: bufferSize, inputStream: &inStream, outputStream: &outStream)
return (inStream, outStream)
}
}