我正在尝试制作一个能够复制非常大文件的(前端)JavaScript(即,从文件输入元素中读取它们并使用StreamSaver.js“下载”它们)。
这是实际的代码:
<html>
<header>
<title>File copying</title>
</header>
<body>
<script src="https://cdn.jsdelivr.net/npm/web-streams-polyfill@2.0.2/dist/ponyfill.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/streamsaver@2.0.3/StreamSaver.min.js"></script>
<script type="text/javascript">
const streamSaver = window.streamSaver;
async function copyFile() {
const fileInput = document.getElementById("fileInput");
const file = fileInput.files[0];
if (!file) {
alert('select a (large) file');
return;
}
const newName = file.name + " - Copy";
let remaining = file.size;
let written = 0;
const chunkSize = 1048576; // 1MB
const writeStream = streamSaver.createWriteStream(newName);
const writer = writeStream.getWriter();
while (remaining > 0) {
let readSize = chunkSize > remaining ? remaining : chunkSize;
let blob = file.slice(written, readSize);
let aBuff = await blob.arrayBuffer();
await writer.write(new Uint8Array(aBuff));
written += readSize;
remaining -= readSize;
}
await writer.close();
}
</script>
<input type="file" id="fileInput"/>
<button onclick="copyFile()">Copy file</button>
</body>
</html>
在while
的第二个循环中,aBuff
变量值(blob.arrayBuffer
)似乎是空的ArrayBuffer
。
我读取文件的方式有误吗?我的意图是逐块读取一个(可能是巨大的)文件,并对每个块进行一些处理(在这种情况下,只需将其输出到StreamSaver.js的下载文件中)。当今的浏览器有什么更好的方法可用?
答案 0 :(得分:0)
我会使用blob.stream()或new Response(blob).body
之类的东西来读取文件的所有块,并在需要时读取TransformStream。但是,如果您需要自定义切片大小或更好的浏览器支持,而不是可以创建自己的blob-> readyStream实用程序
// Taken from https://www.npmjs.com/package/screw-filereader
function stream (blob) {
var position = 0
var blob = this
return new ReadableStream({
pull (controller) {
var chunk = blob.slice(position, position + 1048576)
return chunk.arrayBuffer().then(buffer => {
position += buffer.byteLength
var uint8array = new Uint8Array(buffer)
controller.enqueue(uint8array)
if (position == blob.size)
controller.close()
})
}
})
}
stream(blob).pipeTo(writeStream)
这样,您可以将其通过管道传输到streamsaver,而不必手动编写每个块