我正在尝试通过chunk将Revit文件发送到我的Bucket块。我的Revit文件差不多是13 MB。这是我的代码:
function handleFileSelect(evt) {
var files = evt.target.files;
var file = files[0];
var segmentSize = 1024 * 1024 * 5; //5 MB
var startingByte = 0;
var endingByte = startingByte + segmentSize - 1;
var segments = Math.ceil(file.size / segmentSize);
var session = Math.floor(100000000 + Math.random() * -900000000);
for (var i = 0; i < segments; i ++)
{
var blob = file.slice(startingByte, endingByte);
var url = 'https://developer.api.autodesk.com/oss/v2/buckets/' + 'linked_model' + '/objects/' + file.name + '/resumable';
//console.log(url);
var contentRange = 'bytes ' + startingByte + '-' + endingByte + '/' + file.size;
$.ajax({
type: 'PUT',
url: url,
data: blob,
headers: {
'Authorization':'Bearer ' + token,
'Content-Type':'application/octet-stream',
'Content-Range': contentRange,
'Session-Id': session
},
crossDomain: true,
processData: false,
success: function (data) {
console.log(i);
startingByte = endingByte + 1;
endingByte = startingByte + segmentSize - 1;
},
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert("Status: " + textStatus); alert("Error: " + errorThrown);
console.log(startingByte);
console.log(endingByte);
console.log(file.size);
}
});
}
}
它给了我错误:416(请求的范围不满足)
有人可以帮忙吗?
答案 0 :(得分:1)
我有同样的416错误,但我的问题是我尝试上传小于2MB的块,这是不可行的(除了最后一个块)。
当我将块大小增加到5MB时,它开始工作。我刚写了一篇关于它的博客文章:https://forge.autodesk.com/blog/nailing-large-files-uploads-forge-resumable-api
下面是处理分块和上传的核心代码(在node.js中)。
顺便说一句,我强烈建议您不要在客户端执行此类操作,因为您的代码段建议,这意味着您必须将写访问令牌传递到网页,这会危及您应用的安全性。您应首先将文件上传到您的服务器,然后按照帖子和sample中的说明将其安全上传到Forge。
/////////////////////////////////////////////////////////
// Uploads object to bucket using resumable endpoint
//
/////////////////////////////////////////////////////////
uploadObjectChunked (getToken, bucketKey, objectKey,
file, opts = {}) {
return new Promise((resolve, reject) => {
const chunkSize = opts.chunkSize || 5 * 1024 * 1024
const nbChunks = Math.ceil(file.size / chunkSize)
const chunksMap = Array.from({
length: nbChunks
}, (e, i) => i)
// generates uniques session ID
const sessionId = this.guid()
// prepare the upload tasks
const uploadTasks = chunksMap.map((chunkIdx) => {
const start = chunkIdx * chunkSize
const end = Math.min(
file.size, (chunkIdx + 1) * chunkSize) - 1
const range = `bytes ${start}-${end}/${file.size}`
const length = end - start + 1
const readStream =
fs.createReadStream(file.path, {
start, end: end
})
const run = async () => {
const token = await getToken()
return this._objectsAPI.uploadChunk(
bucketKey, objectKey,
length, range, sessionId,
readStream, {},
{autoRefresh: false}, token)
}
return {
chunkIndex: chunkIdx,
run
}
})
let progress = 0
// runs asynchronously in parallel the upload tasks
// number of simultaneous uploads is defined by
// opts.concurrentUploads
eachLimit(uploadTasks, opts.concurrentUploads || 3,
(task, callback) => {
task.run().then((res) => {
if (opts.onProgress) {
progress += 100.0 / nbChunks
opts.onProgress ({
progress: Math.round(progress * 100) / 100,
chunkIndex: task.chunkIndex
})
}
callback ()
}, (err) => {
console.log('error')
console.log(err)
callback(err)
})
}, (err) => {
if (err) {
return reject(err)
}
return resolve({
fileSize: file.size,
bucketKey,
objectKey,
nbChunks
})
})
})
}