我认为我非常接近这一点,我有以下dropzone配置:
Dropzone.options.myDZ = {
chunking: true,
chunkSize: 500000,
retryChunks: true,
retryChunksLimit: 3,
chunksUploaded: function(file, done) {
done();
}
};
然而,由于done()
命令,它在1个块之后完成。我想在这一点上我需要检查是否已经上传了所有块,如果是,请调用done()
以下是分块的维基:https://gitlab.com/meno/dropzone/wikis/faq#chunked-uploads
以下是配置选项:http://www.dropzonejs.com/#configuration
之前有没有人使用过dropzone?
答案 0 :(得分:8)
经过一段时间的努力后,我可以确认(使用最新版本的dropzone 5.4.0)只有在上传文件的所有块时才会调用chunksUploaded
。致电done()
会将file
视为成功。您尝试上传的文件大小是多少?如果它低于chunkSize
,那么它实际上不会对该文件进行分块(因为forceChunking = false;
的默认值)和chunksUploaded
将不会被调用(source)
下面我介绍了使用dropzone.js进行分块的工作前端实现。事先记下几个注释:myDropzone
和currentFile
是在$(document).ready()
之外声明的全局变量,如下所示:
var currentFile = null;
var myDropzone = null;
这是因为当我对chunksUploaded
函数内的PUT请求进行错误处理时,我需要它们在范围内(此处传递的done()
不接受错误消息作为参数,所以我们必须自己处理它,这需要那些全局变量。如果需要,我可以详细说明。
$(function () {
myDropzone = new Dropzone("#attachDZ", {
url: "/api/ChunkedUpload",
params: function (files, xhr, chunk) {
if (chunk) {
return {
dzUuid: chunk.file.upload.uuid,
dzChunkIndex: chunk.index,
dzTotalFileSize: chunk.file.size,
dzCurrentChunkSize: chunk.dataBlock.data.size,
dzTotalChunkCount: chunk.file.upload.totalChunkCount,
dzChunkByteOffset: chunk.index * this.options.chunkSize,
dzChunkSize: this.options.chunkSize,
dzFilename: chunk.file.name,
userID: <%= UserID %>,
};
}
},
parallelUploads: 1, // since we're using a global 'currentFile', we could have issues if parallelUploads > 1, so we'll make it = 1
maxFilesize: 1024, // max individual file size 1024 MB
chunking: true, // enable chunking
forceChunking: true, // forces chunking when file.size < chunkSize
parallelChunkUploads: true, // allows chunks to be uploaded in parallel (this is independent of the parallelUploads option)
chunkSize: 1000000, // chunk size 1,000,000 bytes (~1MB)
retryChunks: true, // retry chunks on failure
retryChunksLimit: 3, // retry maximum of 3 times (default is 3)
chunksUploaded: function (file, done) {
// All chunks have been uploaded. Perform any other actions
currentFile = file;
// This calls server-side code to merge all chunks for the currentFile
$.ajax({
type: "PUT",
url: "/api/ChunkedUpload?dzIdentifier=" + currentFile.upload.uuid
+ "&fileName=" + encodeURIComponent(currentFile.name)
+ "&expectedBytes=" + currentFile.size
+ "&totalChunks=" + currentFile.upload.totalChunkCount
+ "&userID=" + <%= UserID %>,
success: function (data) {
// Must call done() if successful
done();
},
error: function (msg) {
currentFile.accepted = false;
myDropzone._errorProcessing([currentFile], msg.responseText);
}
});
},
init: function() {
// This calls server-side code to delete temporary files created if the file failed to upload
// This also gets called if the upload is canceled
this.on('error', function(file, errorMessage) {
$.ajax({
type: "DELETE",
url: "/api/ChunkedUpload?dzIdentifier=" + file.upload.uuid
+ "&fileName=" + encodeURIComponent(file.name)
+ "&expectedBytes=" + file.size
+ "&totalChunks=" + file.upload.totalChunkCount
+ "&userID=" + <%= UserID %>,
success: function (data) {
// nothing
}
});
});
}
});
});
如果有人对我的服务器端代码感兴趣,请告诉我,我会发布它。我正在使用C#/ ASP.Net。
编辑:添加了服务器端代码
ChunkedUploadController.cs :
public class ChunkedUploadController : ApiController
{
private class DzMeta
{
public int intChunkNumber = 0;
public string dzChunkNumber { get; set; }
public string dzChunkSize { get; set; }
public string dzCurrentChunkSize { get; set; }
public string dzTotalSize { get; set; }
public string dzIdentifier { get; set; }
public string dzFilename { get; set; }
public string dzTotalChunks { get; set; }
public string dzCurrentChunkByteOffset { get; set; }
public string userID { get; set; }
public DzMeta(Dictionary<string, string> values)
{
dzChunkNumber = values["dzChunkIndex"];
dzChunkSize = values["dzChunkSize"];
dzCurrentChunkSize = values["dzCurrentChunkSize"];
dzTotalSize = values["dzTotalFileSize"];
dzIdentifier = values["dzUuid"];
dzFilename = values["dzFileName"];
dzTotalChunks = values["dzTotalChunkCount"];
dzCurrentChunkByteOffset = values["dzChunkByteOffset"];
userID = values["userID"];
int.TryParse(dzChunkNumber, out intChunkNumber);
}
public DzMeta(NameValueCollection values)
{
dzChunkNumber = values["dzChunkIndex"];
dzChunkSize = values["dzChunkSize"];
dzCurrentChunkSize = values["dzCurrentChunkSize"];
dzTotalSize = values["dzTotalFileSize"];
dzIdentifier = values["dzUuid"];
dzFilename = values["dzFileName"];
dzTotalChunks = values["dzTotalChunkCount"];
dzCurrentChunkByteOffset = values["dzChunkByteOffset"];
userID = values["userID"];
int.TryParse(dzChunkNumber, out intChunkNumber);
}
}
[HttpPost]
public async Task<HttpResponseMessage> UploadChunk()
{
HttpResponseMessage response = new HttpResponseMessage { StatusCode = HttpStatusCode.Created };
try
{
if (!Request.Content.IsMimeMultipartContent("form-data"))
{
//No Files uploaded
response.StatusCode = HttpStatusCode.BadRequest;
response.Content = new StringContent("No file uploaded or MIME multipart content not as expected!");
throw new HttpResponseException(response);
}
var meta = new DzMeta(HttpContext.Current.Request.Form);
var chunkDirBasePath = tSysParm.GetParameter("CHUNKUPDIR");
var path = string.Format(@"{0}\{1}", chunkDirBasePath, meta.dzIdentifier);
var filename = string.Format(@"{0}.{1}.{2}.tmp", meta.dzFilename, (meta.intChunkNumber + 1).ToString().PadLeft(4, '0'), meta.dzTotalChunks.PadLeft(4, '0'));
Directory.CreateDirectory(path);
Request.Content.LoadIntoBufferAsync().Wait();
await Request.Content.ReadAsMultipartAsync(new CustomMultipartFormDataStreamProvider(path, filename)).ContinueWith((task) =>
{
if (task.IsFaulted || task.IsCanceled)
{
response.StatusCode = HttpStatusCode.InternalServerError;
response.Content = new StringContent("Chunk upload task is faulted or canceled!");
throw new HttpResponseException(response);
}
});
}
catch (HttpResponseException ex)
{
LogProxy.WriteError(ex.Response.Content.ToString(), ex);
}
catch (Exception ex)
{
LogProxy.WriteError("Error uploading/saving chunk to filesystem", ex);
response.StatusCode = HttpStatusCode.InternalServerError;
response.Content = new StringContent(string.Format("Error uploading/saving chunk to filesystem: {0}", ex.Message));
}
return response;
}
[HttpPut]
public HttpResponseMessage CommitChunks([FromUri]string dzIdentifier, [FromUri]string fileName, [FromUri]int expectedBytes, [FromUri]int totalChunks, [FromUri]int userID)
{
HttpResponseMessage response = new HttpResponseMessage { StatusCode = HttpStatusCode.OK };
string path = "";
try
{
var chunkDirBasePath = tSysParm.GetParameter("CHUNKUPDIR");
path = string.Format(@"{0}\{1}", chunkDirBasePath, dzIdentifier);
var dest = Path.Combine(path, HttpUtility.UrlDecode(fileName));
FileInfo info = null;
// Get all files in directory and combine in filestream
var files = Directory.EnumerateFiles(path).Where(s => !s.Equals(dest)).OrderBy(s => s);
// Check that the number of chunks is as expected
if (files.Count() != totalChunks)
{
response.Content = new StringContent(string.Format("Total number of chunks: {0}. Expected: {1}!", files.Count(), totalChunks));
throw new HttpResponseException(response);
}
// Merge chunks into one file
using (var fStream = new FileStream(dest, FileMode.Create))
{
foreach (var file in files)
{
using (var sourceStream = System.IO.File.OpenRead(file))
{
sourceStream.CopyTo(fStream);
}
}
fStream.Flush();
}
// Check that merged file length is as expected.
info = new FileInfo(dest);
if (info != null)
{
if (info.Length == expectedBytes)
{
// Save the file in the database
tTempAtt file = tTempAtt.NewInstance();
file.ContentType = MimeMapping.GetMimeMapping(info.Name);
file.File = System.IO.File.ReadAllBytes(info.FullName);
file.FileName = info.Name;
file.Title = info.Name;
file.TemporaryID = userID;
file.Description = info.Name;
file.User = userID;
file.Date = SafeDateTime.Now;
file.Insert();
}
else
{
response.Content = new StringContent(string.Format("Total file size: {0}. Expected: {1}!", info.Length, expectedBytes));
throw new HttpResponseException(response);
}
}
else
{
response.Content = new StringContent("Chunks failed to merge and file not saved!");
throw new HttpResponseException(response);
}
}
catch (HttpResponseException ex)
{
LogProxy.WriteError(ex.Response.Content.ToString(), ex);
response.StatusCode = HttpStatusCode.InternalServerError;
}
catch (Exception ex)
{
LogProxy.WriteError("Error merging chunked upload!", ex);
response.StatusCode = HttpStatusCode.InternalServerError;
response.Content = new StringContent(string.Format("Error merging chunked upload: {0}", ex.Message));
}
finally
{
// No matter what happens, we need to delete the temporary files if they exist
if (!path.IsNullOrWS() && Directory.Exists(path))
{
Directory.Delete(path, true);
}
}
return response;
}
[HttpDelete]
public HttpResponseMessage DeleteCanceledChunks([FromUri]string dzIdentifier, [FromUri]string fileName, [FromUri]int expectedBytes, [FromUri]int totalChunks, [FromUri]int userID)
{
HttpResponseMessage response = new HttpResponseMessage { StatusCode = HttpStatusCode.OK };
try
{
var chunkDirBasePath = tSysParm.GetParameter("CHUNKUPDIR");
var path = string.Format(@"{0}\{1}", chunkDirBasePath, dzIdentifier);
// Delete abandoned chunks if they exist
if (!path.IsNullOrWS() && Directory.Exists(path))
{
Directory.Delete(path, true);
}
}
catch (Exception ex)
{
LogProxy.WriteError("Error deleting canceled chunks", ex);
response.StatusCode = HttpStatusCode.InternalServerError;
response.Content = new StringContent(string.Format("Error deleting canceled chunks: {0}", ex.Message));
}
return response;
}
}
最后, CustomMultipartFormDataStreamPrivder.cs :
public class CustomMultipartFormDataStreamProvider : MultipartFormDataStreamProvider
{
public readonly string _filename;
public CustomMultipartFormDataStreamProvider(string path, string filename) : base(path)
{
_filename = filename;
}
public override string GetLocalFileName(HttpContentHeaders headers)
{
return _filename;
}
}