I am using 'ssh2-sftp-client' to do this job. So basically it works fine if I copy the file to /tmp/ folder of Lambda first and then upload it to s3. I want to point the read stream to s3 without saving in the lambda /tmp/ folder.
So basically:
let sftp = new Client();
sftp.connect({
host: Config.host,
port: Config.port,
username: Config.user,
password: Config.password
}).then(() => {
return sftp.list('/photos');
}).then((files) => {
return get(files)
}).then((streams) => {
return streamToString(streams)
}).then((dataArray) => {
return uploadToS3.putBatch(Config,dataArray)
}).then(() => {
return cleanSFTP();
}).then(() => {
context.done();
}).catch((err) => {
console.log('Catch Error: ', err);
context.fail();
});
Get readstream from sftp:
function get(files) {
return Promise.all(files.slice(0, 2).map((file) => {
fileList.push(file.name);
return sftp.get(BASIC_URL + '/' + file.name);
}));
}
Stream to string:
var streamToArray = require('stream-to-array')
module.exports = function (fileStreams) {
return Promise.all(fileStreams.map((file) => {
return fileToArray(file)
}))
}
function fileToArray(fileStream) {
return streamToArray(fileStream)
.then((fileArray) => {
return {data: fileArray, key: fileStream.path}
})
.catch((err) => {
return err
})
}
Uploading to s3:
var AWS = require('aws-sdk')
AWS.config.setPromisesDependency(require('bluebird'));
function putBatch(config, files) {
var s3 = new AWS.S3();
// Make all the putObject calls immediately
// Will return rejected promise if any requests fail
return Promise.all(files.map((file) => {
if (typeof file.key && file.data !== undefined) {
var params = {
Bucket: config.bucket,
Key: "preix/" + file.key,
Body: file.data
};
return s3.putObject(params).promise()
}
}));
};
Using this the images get copied to s3 but they have different sizes from what is in sftp and images can not be previewed.
Any idea?
答案 0 :(得分:1)
我使用了s3-sftp-bridge,它可以无缝运行。
https://github.com/gilt/s3-sftp-bridge
只需具有凭据的s3位置和sftp位置,它就会自动同步。
希望有帮助。