将大型Koa请求主体上传到AWS S3的正确方法是什么?

时间:2019-01-08 08:52:30

标签: javascript node.js amazon-s3 aws-sdk koa

我正在构建应用程序后端。客户端将文件作为请求正文发布到服务器,然后服务器将文件上传到AWS S3。服务器正在使用NodeJS和koa Web框架。

如果我使用1.14.1来缓存帖子正文,则当文件很大时,缓冲区很大,并且会导致内存不足错误。
如果我直接将ctx.req(一个IncomingMessage对象)传递给S3.putObject,则AWS开发工具包会抛出一个错误ProviderInstaller.installIfNeeded(this);,看起来AWS开发工具包尝试获取流的长度,然后开始分段上传。

AWS SDK版本2.383.0(当前最新)
NodeJS 10.14.2

这时,我编写了一个函数,该函数以流的形式从IncomingMessage读取,等待数据事件填满大缓冲区(16MB),然后分段上传到S3,这很好地解决了问题,但是我我仍在寻找更好的解决方案。

1 个答案:

答案 0 :(得分:0)

运行几个月后,我认为我的最终解决方案是稳定可靠的。

主要概念是从IncomingMessage流存储接收到缓冲区,缓冲区达到大小后,将当前部分放入S3,然后继续读取流直到结束。

const uploaderLogger = Log4js.getLogger('customUploader');
function customMultiPartUpload(s3, bucket, key, incomingMessage, partSizeInByte) {
    return new Promise((resolve) => {
        partSizeInByte = partSizeInByte || uploadBufferMB * 1024 * 1024;
        uploaderLogger.debug(`part size is ${partSizeInByte}`);

        let uploadId = null;
        let partNumber = 0;
        let parts = [];
        let fileSize = 0;
        let reserveBuffer = Buffer.alloc(0);
        const sendBuffer = Buffer.alloc(partSizeInByte);
        const md5Hash = Crypto.createHash('md5');

        const doUpload = async (uploadBuffer) => {
            if (!uploadId) {
                uploaderLogger.debug('multipart upload not initialized');
                const createData = await s3.createMultipartUpload({
                    Bucket: bucket,
                    Key: key
                }).promise();
                uploadId = createData.UploadId;
                uploaderLogger.debug(`uploadId ${uploadId}`);

                partNumber = 0;
            }
            fileSize += uploadBuffer.length;
            uploaderLogger.debug(`buffer length ${uploadBuffer.length}, total ${fileSize}`);

            partNumber += 1;
            uploaderLogger.debug(`part number ${partNumber}`);

            md5Hash.update(uploadBuffer);

            const partData = await s3.uploadPart({
                Bucket: bucket,
                Key: key,
                PartNumber: partNumber,
                UploadId: uploadId,
                Body: uploadBuffer
            }).promise();
            parts.push({
                PartNumber: partNumber,
                ETag: partData.ETag
            });
            uploaderLogger.debug(`etag ${partData.ETag}`);
        };

        incomingMessage.on('data', async (chunkBuffer) => {
            incomingMessage.pause();

            reserveBuffer = Buffer.concat([ reserveBuffer, chunkBuffer ]);
            if (reserveBuffer.length > partSizeInByte) {
                do {
                    reserveBuffer.copy(sendBuffer, 0, 0, partSizeInByte);
                    reserveBuffer = reserveBuffer.slice(partSizeInByte);
                    await doUpload(sendBuffer);
                } while (reserveBuffer.length > partSizeInByte);
            }

            incomingMessage.resume();
        });

        incomingMessage.on('end', async () => {
            uploaderLogger.debug('stream end');

            if (reserveBuffer.length > 0) {
                await doUpload(reserveBuffer);
            }

            if (uploadId) {
                uploaderLogger.debug('uploadId not null');
                await s3.completeMultipartUpload({
                    Bucket: bucket,
                    Key: key,
                    UploadId: uploadId,
                    MultipartUpload: {
                        Parts: parts
                    }
                }).promise();
                uploaderLogger.debug('multipart upload complete');
            }

            const hash = md5Hash.digest('hex');

            resolve({
                size: fileSize,
                hash: hash
            });
            uploaderLogger.debug(`return file size ${fileSize}, hash ${hash}`);
        });
    });
}

调整partSizeInByte以适合您的服务器内存使用情况,太大的零件大小可能会在服务器处理许多请求时导致OOM,太小的零件大小可能会小于S3零件限制。