我有一个S3存储桶,用户可以将非常大的文件上传到(1-10GB)。然后我处理文件并将它们从上传文件夹中复制出来。这适用于小文件,但对于大于5GB的文件,我收到以下错误:
The specified copy source is larger than the maximum allowable size for a copy source: 5368709120 (AWS::S3::Errors::InvalidRequest)
我原本想将处理过的文件复制到一个单独的存储桶中,但如果它们保留在另一个目录中的同一存储桶中,我会没事的。我只需要将它们从上传文件夹中复制出来,这样用户就不会打扰它们(我想将它们归档,最终将它们移动到冰川作为一个非常慢的后台进程)。
提前致谢!
答案 0 :(得分:3)
您可以使用多部分副本执行此操作。具体取决于您使用的语言和API。
答案 1 :(得分:0)
现在您需要做的就是使用如下CLI命令:
aws s3 cp s3://<source> s3://<destination>
它使您可以对大于5 GB的文件执行操作。 此处介绍了另一种方法:https://aws.amazon.com/ru/premiumsupport/knowledge-center/s3-multipart-upload-cli/
答案 2 :(得分:0)
我在NodeJS项目中编写了此功能,以便在存储桶之间复制超过5GB的文件,它也应适用于您的用例(最初改编自this gist)。
function copyS3MP(from_bucket, from_key, to_bucket, to_key) {
const AWS = require('aws-sdk');
AWS.config.update({region: 'us-west-2'});
let s3 = new AWS.S3();
let head, uploadId, numParts, fileSize;
let startTime = new Date();
let partNum = 0;
let partSize = 1024 * 1024 * 10; // 10mb chunks except last part
let maxUploadTries = 3;
let multiPartParams = {
Bucket: to_bucket,
Key: to_key,
ContentType: getContentType(to_key)
};
let multipartMap = {
Parts: []
};
function getHead() {
return new Promise(async (resolve, reject) => {
try {
const h = await s3.headObject({
Bucket: from_bucket,
Key: from_key
}).promise();
resolve(h);
} catch (e) {
reject(e);
}
});
}
function createMultipartUpload() {
return new Promise(async (resolve, reject) => {
try {
s3.createMultipartUpload(multiPartParams, function(mpErr, multipart) {
if (mpErr) {
console.error(mpErr);
return reject(mpErr);
}
console.log('Got upload ID', multipart.UploadId);
return resolve(multipart.UploadId);
});
} catch (e) {
reject(e);
}
});
}
function copyPart(start, partNum) {
let tryNum = 1;
function copyLogic(copyParams) {
return new Promise((resolve, reject) => {
s3.uploadPartCopy(copyParams, function(multiErr, mData) {
if (multiErr) {
console.log('Upload part error:', multiErr);
return reject(multiErr);
} else {
multipartMap.Parts[this.request.params.PartNumber - 1] = {
ETag: mData.ETag,
PartNumber: Number(this.request.params.PartNumber)
};
console.log('Completed part', this.request.params.PartNumber);
console.log('mData', mData);
return resolve();
}
}).on('httpUploadProgress', function(progress) { console.log(Math.round(progress.loaded/progress.total*100)+ '% done') });
});
}
return new Promise(async (resolve, reject) => {
let end = Math.min(start + partSize, fileSize);
try {
let partParams = {
Bucket: to_bucket,
Key: to_key,
PartNumber: String(partNum),
UploadId: uploadId,
CopySource: `${from_bucket}/${from_key}`,
CopySourceRange: `bytes=${start}-${end - 1}`
};
while (tryNum <= maxUploadTries) {
try {
await copyLogic(partParams);
return resolve();
} catch (e) {
tryNum++;
if (tryNum <= maxUploadTries) {
console.log('Retrying copy of part: #', partParams.PartNumber);
await module.exports.sleep(1);
} else {
console.log('Failed uploading part: #', partParams.PartNumber);
return reject(e);
}
}
}
resolve();
} catch (e) {
return reject(e);
}
});
}
function completeMultipartUpload() {
return new Promise((resolve, reject) => {
let doneParams = {
Bucket: to_bucket,
Key: to_key,
MultipartUpload: multipartMap,
UploadId: uploadId
};
s3.completeMultipartUpload(doneParams, function(err, data) {
if (err) {
return reject(err);
}
var delta = (new Date() - startTime) / 1000;
console.log('Completed upload in', delta, 'seconds');
console.log('Final upload data:', data);
return resolve();
});
});
}
return new Promise(async (resolve, reject) => {
try {
head = await getHead();
fileSize = head.ContentLength;
} catch (e) {
return reject(e);
}
numParts = Math.ceil(fileSize / partSize);
console.log('Creating multipart upload for:', to_key);
try {
uploadId = await createMultipartUpload();
} catch (e) {
return reject(e);
}
for (let start = 0; start < fileSize; start += partSize) {
partNum++;
console.log("Part Num: " + partNum);
try {
await copyPart(start, partNum);
} catch (e) {
console.error(e);
return reject(e);
}
}
try {
await completeMultipartUpload();
} catch (e) {
return reject(e);
}
resolve();
});
}