class VoiceRecogViewController: UIViewController, AVAudioPlayerDelegate, AVAudioRecorderDelegate {
var audioPlayer: AVAudioPlayer?
var audioRecorder: AVAudioRecorder?
var error: NSError?
var soundFileURL: URL?
var soundFilePath: String = ""
var data : NSData?
@IBOutlet weak var startRocordButton: UIButton!
@IBOutlet weak var stopRecordButton: UIButton!
@IBOutlet weak var playRecordButton: UIButton!
@IBOutlet weak var continueButton: UIButton!
override func viewDidLoad() {
super.viewDidLoad()
playRecordButton.isEnabled = false
stopRecordButton.isEnabled = false
let dirPaths =
NSSearchPathForDirectoriesInDomains(.documentDirectory,
.userDomainMask, true)
let docsDir = dirPaths[0]
soundFilePath = (docsDir as NSString).appendingPathComponent("sound.wav")
soundFileURL = URL(fileURLWithPath: soundFilePath)
let recordSettings = [AVEncoderAudioQualityKey: AVAudioQuality.min.rawValue,
AVEncoderBitRateKey: 16,
AVNumberOfChannelsKey: 2,
AVSampleRateKey: 44100.0] as [String : Any]
let audioSession = AVAudioSession.sharedInstance()
try! audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, with: [])
try! audioSession.setActive(true)
if let err = error {
print("audioSession error: \(err.localizedDescription)")
}
do {
audioRecorder = try AVAudioRecorder(url: soundFileURL!,
settings: recordSettings as [String : AnyObject])
} catch {
audioRecorder = nil
}
audioRecorder?.prepareToRecord()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
@IBAction func startRecord(_ sender: AnyObject) {
if audioRecorder?.isRecording == false {
playRecordButton.isEnabled = false
startRocordButton.isEnabled = false
stopRecordButton.isEnabled = true
audioRecorder?.record()
}
}
@IBAction func stopRecord(_ sender: AnyObject) {
stopRecordButton.isEnabled = false
playRecordButton.isEnabled = true
startRocordButton.isEnabled = true
if audioRecorder?.isRecording == true {
audioRecorder?.stop()
} else {
audioPlayer?.stop()
}
}
@IBAction func playRecord(_ sender: AnyObject) {
if audioRecorder?.isRecording == false {
stopRecordButton.isEnabled = true
startRocordButton.isEnabled = false
}
do {
try audioPlayer = AVAudioPlayer(contentsOf: soundFileURL!)
audioPlayer?.delegate = self
audioPlayer?.prepareToPlay()
audioPlayer?.play()
} catch {
print("audioPlayer error")
}
}
@IBAction func continueRegist(_ sender: AnyObject) {
let headers: HTTPHeaders = ["Authorization": "Token ___(**token**)_____",
"Accept": "application/json"]
data = NSData (contentsOf: soundFileURL!)
let parameters: Parameters = ["from_account_id": "3",
"to_account_id": "4",
"file": data!,
]
let URL = "http://leaofımjpüsmfweüdıpckfw"
Alamofire.request(URL, method: .put, parameters: parameters, headers: headers).responseJSON { response in
if let data = response.result.value {
print(data)
}
}
}
func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) {
startRocordButton.isEnabled = true
stopRecordButton.isEnabled = false
}
func audioPlayerDecodeErrorDidOccur(_ player: AVAudioPlayer, error: Error?) {
print("Audio Play Decode Error")
}
func audioRecorderDidFinishRecording(_ recorder: AVAudioRecorder, successfully flag: Bool) {
}
func audioRecorderEncodeErrorDidOccur(_ recorder: AVAudioRecorder, error: Error?) {
print("Audio Record Encode Error")
}
我在上面分享了我的代码。这里的重点是录制音频为.wav,在应用程序中播放,在continueRegist部分我想用alamofire调用put方法并将音频上传到我们的amazons3server。录制和播放音频部分完全正常。问题出在continueRegist方法中。调用方法后,我得到了正确的响应,看起来很成功。然后,我从我们的s3服务器的url检查它。音频似乎已经上传,但是当我下载并播放它时,它无法正常工作。我无法弄清楚问题出在哪里。
此外,当我尝试从Postman上传选择文件并提供正确的表单数据信息时,我可以听到我上传到s3服务器的声音。这可能有什么问题?
以下您可以通过Postman找到我的请求:
我在拍摄截图时忘了选择文件,但它只是一个.wav文件。
请不要犹豫,问我那些不满足你的问题。
希望你能帮助我。
谢谢!
答案 0 :(得分:3)
问题在于您的Alamofire请求:您正在使用JSON中的音频数据构建JSON。但是,您可以在Postman中检查请求是多部分表单数据的HTTP代码(右上角/下发送)。
如何实施多部分Alamofire:
它应该类似于我对appendBodyPart
语句不确定的内容。他们取决于你的情况
let audioData: NSData = ...//should be loaded from the file
Alamofire.Manager.upload(.PUT,
URL,
headers: headers,
multipartFormData: { multipartFormData in
multipartFormData.appendBodyPart(data: "3".dataUsingEncoding(NSUTF8StringEncoding), name: "from_account_id")
multipartFormData.appendBodyPart(data: "4".dataUsingEncoding(NSUTF8StringEncoding), name: "to_account_id")
multipartFormData.appendBodyPart(data: audioData, name: "file", fileName: "file", mimeType: "application/octet-stream")
},
encodingCompletion: { encodingResult in
switch encodingResult {
case .Success(let upload, _, _):
upload.responseJSON { response in
}
case .Failure(let encodingError):
// Error while encoding request:
}
})
答案 1 :(得分:0)
以下是上传音频文件的代码:
let uploadAudioURL = "http://<your post API url>"
let header : [String:String] = [
"Authorization" : "<your authorisation token>"
]
let voiceData = try? Data(contentsOf: <url of audio file to upload>)
let params : [String:String] = [
"length" : "39000",
"title" : "Trying to upload",
]
Alamofire.upload(
multipartFormData: { (multipartFormData) in
multipartFormData.append(voiceData!, withName: "voice", fileName: "TempRecordedAudioFile", mimeType: "audio/m4a")
for (key, value) in params {
multipartFormData.append(value.data(using: String.Encoding.utf8)!, withName: key)
}
},
usingThreshold : SessionManager.multipartFormDataEncodingMemoryThreshold,
to : uploadAudioURL,
method: .post,
headers: header){ (result) in
switch result {
case .success(let upload, _, _):
upload.uploadProgress(closure: { (Progress) in
print("Upload Progress: \(Progress.fractionCompleted)")
})
upload.responseJSON { response in
if let JSON = response.result.value {
print("Response : ",JSON)
}
}
case .failure(let encodingError):
print(encodingError)
}
}
希望它会有所帮助。
使用上面的代码段要注意的一些要点:
withName
设置为您的上传API参数名称。我使用voice
,您可能有不同的参数名称。upload.uploadProgress()
是可选的。如果您不需要,可以将其删除。