我正在尝试使用AWS分段上传将文件上传到我的AWS桶。这适用于较小的文件,但是现在我正在尝试添加一个大文件(分成170个部分),我收到以下错误:
multiErr, upload part error: { [RequestTimeTooSkewed: The difference between the request time and the current time is too large.]
或者这个错误:
multiErr, upload part error: { [TimeoutError: Connection timed out after 120000ms]
知道如何解决这个问题吗?这是我的代码:
var fs = require('fs');
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./config.json')
var s3 = new AWS.S3();
// File
var fileName = 'atom.mov';
var filePath = './' + fileName;
var fileKey = fileName;
var buffer = fs.readFileSync('./' + filePath);
// S3 Upload options
var bucket = 'test.bucket.1234';
// Upload
var startTime = new Date();
var partNum = 0;
var partSize = 1024 * 1024 * 5; // Minimum 5MB per chunk (except the last part) http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
var numPartsLeft = Math.ceil(buffer.length / partSize);
var maxUploadTries = 3;
var multiPartParams = {
Bucket: bucket,
Key: fileKey,
ContentType: 'application/mov'
};
var multipartMap = {
Parts: []
};
function completeMultipartUpload(s3, doneParams) {
s3.completeMultipartUpload(doneParams, function(err, data) {
if (err) {
console.log("An error occurred while completing the multipart upload");
console.log(err);
} else {
var delta = (new Date() - startTime) / 1000;
console.log('Completed upload in', delta, 'seconds');
console.log('Final upload data:', data);
}
});
}
function uploadPart(s3, multipart, partParams, tryNum) {
var tryNum = tryNum || 1;
s3.uploadPart(partParams, function(multiErr, mData) {
if (multiErr){
console.log('multiErr, upload part error:', multiErr);
if (tryNum < maxUploadTries) {
console.log('Retrying upload of part: #', partParams.PartNumber)
uploadPart(s3, multipart, partParams, tryNum + 1);
} else {
console.log('Failed uploading part: #', partParams.PartNumber)
}
return;
}
.Parts[this.request.params.PartNumber - 1] = {
ETag: mData.ETag,
PartNumber: Number(this.request.params.PartNumber)
};
console.log("Completed part", this.request.params.PartNumber);
console.log('mData', mData);
if (--numPartsLeft > 0) return; // complete only when all parts uploaded
var doneParams = {
Bucket: bucket,
Key: fileKey,
MultipartUpload: multipartMap,
UploadId: multipart.UploadId
};
console.log("Completing upload...");
completeMultipartUpload(s3, doneParams);
});
}
// Multipart
console.log("Creating multipart upload for:", fileKey);
s3.createMultipartUpload(multiPartParams, function(mpErr, multipart){
if (mpErr) { console.log('Error!', mpErr); return; }
console.log("Got upload ID", multipart.UploadId);
// Grab each partSize chunk and upload it as a part
for (var rangeStart = 0; rangeStart < buffer.length; rangeStart += partSize) {
partNum++;
var end = Math.min(rangeStart + partSize, buffer.length),
partParams = {
Body: buffer.slice(rangeStart, end),
Bucket: bucket,
Key: fileKey,
PartNumber: String(partNum),
UploadId: multipart.UploadId
};
// Send a single part
console.log('Uploading part: #', partParams.PartNumber, ', Range start:', rangeStart);
uploadPart(s3, multipart, partParams);
}
});
答案 0 :(得分:0)
尝试在配置中设置超时属性。 这适用于我的500MB +文件,30分钟可能有点矫枉过正但它有效;)
var aws = require('aws-sdk');
aws.config.update({
region: "your region",
accessKeyId: "your access key",
secretAccessKey: "your secret",
httpOptions: {"timeout: 1800000"}
});
答案 1 :(得分:0)
s3.upload
的默认超时为120000毫秒(两分钟)。您可能希望在上传大文件时增加它,或者连接不良。
使用S3构造函数设置默认超时。
const s3 = new AWS.S3({
httpOptions : {
timeout: 300000
}
//... other config
});