尝试使用blueimp
JQuery
文件上传插件上传大文件(大于1 GB)。使用maxChunkSize
配置找到允许从客户端以块的形式上传文件。服务器,我们可以得到块大小&文件名使用Content-Range
& Content-Disposition
标题。
我的服务器是Weblogic
,并在Servlet
中编写服务器端代码。
以下是我的问题:
答案 0 :(得分:4)
检查github上插件的wiki - 它有一个关于chunked文件上传的部分。
来自wiki:
示例PHP upload handler支持分块上传 框。
为了支持分块上传,上传处理程序使用了 Content-Range header,由插件为每个传输 块。
检查上面链接的示例PHP代码。
服务器端:如何知道请求是否是最后一个块?
每个Content-Range
请求标头将包含该请求中包含的文件的字节范围以及文件的总字节数。因此,您可以检查范围的结束值与总字节数,以确定请求是否包含最后一个块。
查看W3C网站this部分中给出的示例。
服务器端如何将收到的所有块数据写入单个文件?
您可以收集数组中内存中的所有块,然后一次性将它们写入文件 - 但这对于较大的文件来说效率很低。 Java的IO API提供了通过提供初始偏移量来写入文件部分的方法。检查this问题。
如何识别分块请求与同一文件相关,因为每个块都将作为单独的请求发送?
检查每个请求中的Content-Range
标头 - 如果请求具有该标头,那么它就是众多块上传请求之一。使用标题的值,您可以确定该请求中包含文件的哪个部分/部分。
此外,请求中的Content-Disposition
标头将包含用于链接同一文件的各种请求的文件名。
答案 1 :(得分:2)
我可以通过工作代码回答吗?这是必要的客户端和服务器部件。见下面的解释。
客户端:
<input id="fileupload" type="file" name="files[]" data-url="file.upload" multiple>
<script>
var uuid = function() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g,
function(c) {
var r = Math.random()*16|0, v = c == 'x' ? r : (r&0x3|0x8);
return v.toString(16);
});
};
$(function () {
$('#fileupload').fileupload({
dataType: 'json',
maxChunkSize: 1000000,
done: function (e, data) {
$.each(data.result.files, function (index, file) {
$('<p/>').text(file.name).appendTo(document.body);
});
}
}).bind('fileuploadsubmit', function (e, data) {
data.formData = {uploadId: uuid()};
});
});
</script>
WEB_INF / web.xml中:
<!-- ...other servlet blocks... -->
<servlet>
<servlet-name>fileUploadServlet</servlet-name>
<servlet-class>your.package.FileUploadServlet</servlet-class>
</servlet>
<!-- ...other servlet-mapping blocks... -->
<servlet-mapping>
<servlet-name>fileUploadServlet</servlet-name>
<url-pattern>/file.upload</url-pattern>
</servlet-mapping>
Servlet&#34; FileUploadServlet&#34;:
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Pattern;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileItemFactory;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import com.fasterxml.jackson.databind.ObjectMapper;
...
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
String range = request.getHeader("Content-Range");
long fileFullLength = -1;
long chunkFrom = -1;
long chunkTo = -1;
if (range != null) {
if (!range.startsWith("bytes "))
throw new ServletException("Unexpected range format: " + range);
String[] fromToAndLength = range.substring(6).split(Pattern.quote("/"));
fileFullLength = Long.parseLong(fromToAndLength[1]);
String[] fromAndTo = fromToAndLength[0].split(Pattern.quote("-"));
chunkFrom = Long.parseLong(fromAndTo[0]);
chunkTo = Long.parseLong(fromAndTo[1]);
}
File tempDir = new File(System.getProperty("java.io.tmpdir")); // Configure according
File storageDir = tempDir; // project server environment.
String uploadId = null;
FileItemFactory factory = new DiskFileItemFactory(10000000, tempDir);
ServletFileUpload upload = new ServletFileUpload(factory);
try {
List<?> items = upload.parseRequest(request);
Iterator<?> it = items.iterator();
List<Map<String, Object>> ret = new ArrayList<Map<String,Object>>();
while (it.hasNext()) {
FileItem item = (FileItem) it.next();
if (item.isFormField()) {
if (item.getFieldName().equals("uploadId"))
uploadId = item.getString();
} else {
Map<String, Object> fileInfo = new LinkedHashMap<String, Object>();
File assembledFile = null;
fileInfo.put("name", item.getName());
fileInfo.put("type", item.getContentType());
File dir = new File(storageDir, uploadId);
if (!dir.exists())
dir.mkdir();
if (fileFullLength < 0) { // File is not chunked
fileInfo.put("size", item.getSize());
assembledFile = new File(dir, item.getName());
item.write(assembledFile);
} else { // File is chunked
byte[] bytes = item.get();
if (chunkFrom + bytes.length != chunkTo + 1)
throw new ServletException("Unexpected length of chunk: " + bytes.length +
" != " + (chunkTo + 1) + " - " + chunkFrom);
saveChunk(dir, item.getName(), chunkFrom, bytes, fileFullLength);
TreeMap<Long, Long> chunkStartsToLengths = getChunkStartsToLengths(dir, item.getName());
long lengthSoFar = getCommonLength(chunkStartsToLengths);
fileInfo.put("size", lengthSoFar);
if (lengthSoFar == fileFullLength) {
assembledFile = assembleAndDeleteChunks(dir, item.getName(),
new ArrayList<Long>(chunkStartsToLengths.keySet()));
}
}
if (assembledFile != null) {
fileInfo.put("complete", true);
fileInfo.put("serverPath", assembledFile.getAbsolutePath());
// Here you can do something with fully assembled file.
}
ret.add(fileInfo);
}
}
Map<String, Object> filesInfo = new LinkedHashMap<String, Object>();
filesInfo.put("files", ret);
response.setContentType("application/json");
response.getWriter().write(new ObjectMapper().writeValueAsString(filesInfo));
response.getWriter().close();
} catch (ServletException ex) {
throw ex;
} catch (Exception ex) {
ex.printStackTrace();
throw new ServletException(ex);
}
}
private static void saveChunk(File dir, String fileName,
long from, byte[] bytes, long fileFullLength) throws IOException {
File target = new File(dir, fileName + "." + from + ".chunk");
OutputStream os = new FileOutputStream(target);
try {
os.write(bytes);
} finally {
os.close();
}
}
private static TreeMap<Long, Long> getChunkStartsToLengths(File dir,
String fileName) throws IOException {
TreeMap<Long, Long> chunkStartsToLengths = new TreeMap<Long, Long>();
for (File f : dir.listFiles()) {
String chunkFileName = f.getName();
if (chunkFileName.startsWith(fileName + ".") &&
chunkFileName.endsWith(".chunk")) {
chunkStartsToLengths.put(Long.parseLong(chunkFileName.substring(
fileName.length() + 1, chunkFileName.length() - 6)), f.length());
}
}
return chunkStartsToLengths;
}
private static long getCommonLength(TreeMap<Long, Long> chunkStartsToLengths) {
long ret = 0;
for (long len : chunkStartsToLengths.values())
ret += len;
return ret;
}
private static File assembleAndDeleteChunks(File dir, String fileName,
List<Long> chunkStarts) throws IOException {
File assembledFile = new File(dir, fileName);
if (assembledFile.exists()) // In case chunks come in concurrent way
return assembledFile;
OutputStream assembledOs = new FileOutputStream(assembledFile);
byte[] buf = new byte[100000];
try {
for (long chunkFrom : chunkStarts) {
File chunkFile = new File(dir, fileName + "." + chunkFrom + ".chunk");
InputStream is = new FileInputStream(chunkFile);
try {
while (true) {
int r = is.read(buf);
if (r == -1)
break;
if (r > 0)
assembledOs.write(buf, 0, r);
}
} finally {
is.close();
}
chunkFile.delete();
}
} finally {
assembledOs.close();
}
return assembledFile;
}
这个想法是将块作为单独的文件写入,并在所有块文件的公共长度等于完整文件长度时组装它们。在客户端,您可以在上传开始时定义使用属性(这里为uploadId完成 - 每个文件的唯一ID)。对于文件的所有块,此uploadId都是相同的。任何问题?请告诉我。
[更新]有两个依赖项:https://commons.apache.org/proper/commons-fileupload/和https://github.com/FasterXML/jackson。