有没有机会使用带有进度信息和快速的Node.js复制大文件?
解决方案1 :fs.createReadStream()。pipe(...)=无用,比本机cp慢5 ...
请参阅:Fastest way to copy file in node.js,可以获得进度信息(使用npm包'progress-stream'):
library(plyr)
testmax <- ddply(test,.(Ring),summarize,maxLfd = max(Lfd[slope>=0]))
test1 <- merge(test,testmax)
test_out <- test1[!(test1$Lfd>=test1$maxLfd & test1$slope<0),-4]
test_out
# Ring Lfd slope
# 1 1 1 2
# 2 1 2 2
# 5 2 1 2
# 6 2 2 -1
# 7 2 3 2
这种方式的唯一问题是它比“cp source dest”容易长5倍。有关完整的测试代码,另请参阅下面的附录。
解决方案2 :rsync --- info = progress2 =同解决方案1一样慢=无用
解决方案3 :我的最后一步,为node.js编写一个本机模块,使用“CoreUtils”(cp及其他的linux源代码)或其他函数,如Fast file copy with progress所示< / p>
有谁知道比解决方案3更好?我想避免使用本机代码,但它似乎最合适。
谢谢!任何包装推荐或提示(尝试所有fs **)都是受欢迎的!
附录:
测试代码,使用管道和进度:
fs = require('fs');
fs.createReadStream('test.log').pipe(fs.createWriteStream('newLog.log'));
更新:快速(详细进展!)C版本在此处实施:https://github.com/MidnightCommander/mc/blob/master/src/filemanager/file.c#L1480。似乎是最好的去处:-)
答案 0 :(得分:2)
可能会降低进程速度的一个方面与console.log有关。看看这段代码:
const fs = require('fs');
const sourceFile = 'large.exe'
const destFile = 'large_copy.exe'
console.time('copying')
fs.stat(sourceFile, function(err, stat){
const filesize = stat.size
let bytesCopied = 0
const readStream = fs.createReadStream(sourceFile)
readStream.on('data', function(buffer){
bytesCopied+= buffer.length
let porcentage = ((bytesCopied/filesize)*100).toFixed(2)
console.log(porcentage+'%') // run once with this and later with this line commented
})
readStream.on('end', function(){
console.timeEnd('copying')
})
readStream.pipe(fs.createWriteStream(destFile));
})
以下是复制400mb文件的执行时间:
使用console.log:692.950ms
没有console.log:382.540ms
答案 1 :(得分:2)
答案 2 :(得分:0)
我通过更改缓冲区大小解决了类似的问题(使用Node v8或v10)。我认为默认的缓冲区大小约为16kb,可以快速填充和清空,但是每个操作都需要围绕事件循环的完整周期。我将缓冲区更改为1MB,并写了2GB的图像,从大约30分钟降到了5分钟,这听起来与您所看到的类似。我的图像也被即时解压缩,这可能加剧了问题。至少从节点v6开始,有关流缓冲的文档已在手册中:https://nodejs.org/api/stream.html#stream_buffering
以下是您可以使用的关键代码组件:
let gzSize = 1; // do not initialize divisors to 0
const hwm = { highWaterMark: 1024 * 1024 }
const inStream = fs.createReadStream( filepath, hwm );
// Capture the filesize for showing percentages
inStream.on( 'open', function fileOpen( fdin ) {
inStream.pause(); // wait for fstat before starting
fs.fstat( fdin, function( err, stats ) {
gzSize = stats.size;
// openTargetDevice does a complicated fopen() for the output.
// This could simply be inStream.resume()
openTargetDevice( gzSize, targetDeviceOpened );
});
});
inStream.on( 'data', function shaData( data ) {
const bytesRead = data.length;
offset += bytesRead;
console.log( `Read ${offset} of ${gzSize} bytes, ${Math.floor( offset * 100 / gzSize )}% ...` );
// Write to the output file, etc.
});
// Once the target is open, I convert the fd to a stream and resume the input.
// For the purpose of example, note only that the output has the same buffer size.
function targetDeviceOpened( error, fd, device ) {
if( error ) return exitOnError( error );
const writeOpts = Object.assign( { fd }, hwm );
outStream = fs.createWriteStream( undefined, writeOpts );
outStream.on( 'open', function fileOpen( fdin ) {
// In a simpler structure, this is in the fstat() callback.
inStream.resume(); // we have the _input_ size, resume read
});
// [...]
}
我没有尝试进一步优化它们;结果类似于我在基准测试中使用'dd'
在命令行上得到的结果。
我离开了,将文件描述符转换为流并使用了暂停/恢复逻辑,因此您可以看到它们比原始帖子中的简单fs.statSync()在更复杂的情况下可能有用。否则,这只是在tulio的答案中添加highWaterMark选项。
答案 3 :(得分:0)
这是我现在要使用的内容,它会复制1个具有进度的文件:
String.prototype.toHHMMSS = function () {
var sec_num = parseInt(this, 10); // don't forget the second param
var hours = Math.floor(sec_num / 3600);
var minutes = Math.floor((sec_num - (hours * 3600)) / 60);
var seconds = sec_num - (hours * 3600) - (minutes * 60);
if (hours < 10) {hours = "0"+hours;}
if (minutes < 10) {minutes = "0"+minutes;}
if (seconds < 10) {seconds = "0"+seconds;}
return hours+':'+minutes+':'+seconds;
}
var purefile="20200811140938_0002.MP4";
var filename="/sourceDir"+purefile;
var output="/destinationDir"+purefile;
var progress = require('progress-stream');
var fs = require('fs');
const convertBytes = function(bytes) {
const sizes = ["Bytes", "KB", "MB", "GB", "TB"]
if (bytes == 0) {
return "n/a"
}
const i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)))
if (i == 0) {
return bytes + " " + sizes[i]
}
return (bytes / Math.pow(1024, i)).toFixed(1) + " " + sizes[i]
}
var copiedFileSize = fs.statSync(filename).size;
var str = progress({
length: copiedFileSize, // length(integer) - If you already know the length of the stream, then you can set it. Defaults to 0.
time: 200, // time(integer) - Sets how often progress events are emitted in ms. If omitted then the default is to do so every time a chunk is received.
speed: 1, // speed(integer) - Sets how long the speedometer needs to calculate the speed. Defaults to 5 sec.
// drain: true // drain(boolean) - In case you don't want to include a readstream after progress-stream, set to true to drain automatically. Defaults to false.
// transferred: false// transferred(integer) - If you want to set the size of previously downloaded data. Useful for a resumed download.
});
/*
{
percentage: 9.05,
transferred: 949624,
length: 10485760,
remaining: 9536136,
eta: 42,
runtime: 3,
delta: 295396,
speed: 949624
}
*/
str.on('progress', function(progress) {
console.log(progress.percentage+'%');
console.log('eltelt: '+progress.runtime.toString().toHHMMSS() + 's / hátra: ' + progress.eta.toString().toHHMMSS()+'s');
console.log(convertBytes(progress.speed)+"/s"+' '+progress.speed);
});
//const hwm = { highWaterMark: 1024 * 1024 } ;
var hrstart = process.hrtime(); // measure the copy time
var rs=fs.createReadStream(filename)
.pipe(str)
.pipe(fs.createWriteStream(output, {emitClose: true}).on("close", () => {
var hrend = process.hrtime(hrstart);
var timeInMs = (hrend[0]* 1000000000 + hrend[1]) / 1000000000;
var finalSpeed=convertBytes(copiedFileSize/timeInMs);
console.log('Done: file copy: '+ finalSpeed+"/s");
console.info('Execution time (hr): %ds %dms', hrend[0], hrend[1] / 1000000);
}) );
答案 4 :(得分:0)
我也有同样的问题。我想尽快复制大文件并需要进度信息。我创建了一个测试实用程序来测试不同的复制方法:
https://www.npmjs.com/package/copy-speed-test
你可以简单地运行它:
npx copy-speed-test --source someFile.zip --destination someNonExistentFolder
它使用 child_process.exec()
进行本地复制,使用 fs.copyFile
进行复制文件,并使用具有各种不同缓冲区大小的 createReadStream(您可以通过在命令行上传递它们来更改缓冲区大小。运行npx copy-speed-test -h
了解更多信息。
我学到的一些东西:
fs.copyFile
和原生一样快createReadStream
几乎与其他方法一样好最后一点是因为进度是基于读流,而不是写流。如果复制 1.5GB 的文件并且您的缓冲区为 1GB,则进度立即跳至 66%,然后跳至 100%,然后您必须等待写入流完成写入。我认为您无法显示写入流的进度。
如果您有同样的问题,我建议您使用与您将要处理的文件大小相似的文件大小并跨相似的媒体运行这些测试。我的最终用例是从插入树莓派的 SD 卡复制文件,然后通过网络复制到 NAS,这就是我运行测试的场景。
我希望我以外的人觉得它有用!