在NodeJS中,我从文件上传中获取了大量数据,这些数据部分保存了文件。我想通过执行新的Buffer()然后将其上传到Amazon s3
来转换它如果只有一个块,但是当有多个时,这将有效,我无法弄清楚如何做新的Buffer()
目前我的解决方案是将数据块写入我自己服务器上的真实文件中,然后将该文件的PATH发送到Amazon s3。
如何跳过文件创建步骤并实际发送亚马逊s3的缓冲区?
答案 0 :(得分:0)
我猜你需要使用 streaming-s3
var streamingS3 = require('streaming-s3');
var uploadFile = function (fileReadStream, awsHeader, cb) {
//set options for the streaming module
var options = {
concurrentParts: 2,
waitTime: 20000,
retries: 2,
maxPartSize: 10 * 1024 * 1024
};
//call stream function to upload the file to s3
var uploader = new streamingS3(fileReadStream, aws.accessKey, aws.secretKey, awsHeader, options);
//start uploading
uploader.begin();// important if callback not provided.
// handle these functions
uploader.on('data', function (bytesRead) {
console.log(bytesRead, ' bytes read.');
});
uploader.on('part', function (number) {
console.log('Part ', number, ' uploaded.');
});
// All parts uploaded, but upload not yet acknowledged.
uploader.on('uploaded', function (stats) {
console.log('Upload stats: ', stats);
});
uploader.on('finished', function (response, stats) {
console.log(response);
cb(null, response);
});
uploader.on('error', function (err) {
console.log('Upload error: ', err);
cb(err);
});
};