我们开始使用S3 Amazon Multi-Part Upload。我们正在使用tomcat在Java应用程序中发送InputStream。
在哪里可以找到使用输入流(而非文件)将多部分上传到S3的工作示例?
我删除了我们的代码来演示问题。我们似乎上传了由特定模式(数字)组成的2个部分,但在完成上传到亚马逊后,完整对象包含不同的内容。
代码测试上传2个字节[]并随后下载完整内容。
@Test
public void AmazonWay(){
final long partSize = 5*1024*1024; // 5 MB
byte[][] multi = new byte[][]{
AmazonAdaptor.initArray((byte)1, (int)partSize),
AmazonAdaptor.initArray((byte)2, (int)partSize)
};
try {
String filePathOrig = getTempPath("D:\\temp\\", "orig");
writeArraysToFile(multi[0],multi[1], filePathOrig );
ContentData contentData = storage.multiPartExampleSend2BigParts(multi);
byte[] retrieveAllBytes = new byte[(int)(partSize*2)];
contentData.getInputStream().read(retrieveAllBytes);
String filePath = getTempPath("D:\\temp\\", "fromAmazon");
writeBytesToFile(filePath, retrieveAllBytes);
byte[] part1 = Arrays.copyOfRange(retrieveAllBytes, 0, (int)partSize);
byte[] part2 = Arrays.copyOfRange(retrieveAllBytes, (int)(partSize+1), (int)(retrieveAllBytes.length));
Assert.assertTrue(Arrays.equals(multi[0], part1));
Assert.assertTrue(Arrays.equals(multi[1], part2));
} catch (Exception e) {
e.printStackTrace();
fail("Amazon Exception - " + e.getMessage());
}
}
下面的方法接收一个字节数组数组,并使用Java SDK将它们上传到S3。完成多部分上载后,该方法尝试检索完整对象并返回完整对象内容。
因此测试将能够比较合并的Object内容 由上传部分的总和组成。
public ContentData multiPartExampleSend2BigParts(byte[][] multi) throws Exception{
final long partSize = 5*1024*1024; // 5 MB
String existingBucketName = bucketNameDefault;
String keyName = UUID.randomUUID().toString();
// Create a list of UploadPartResponse objects. You get one of these
// for each part upload.
List<PartETag> partETags = new ArrayList<PartETag>();
// Step 1: Initialize.
InitiateMultipartUploadRequest initRequest = new
InitiateMultipartUploadRequest(existingBucketName, keyName);
InitiateMultipartUploadResult initResponse =
amazonS3Client.initiateMultipartUpload(initRequest);
String uploadId = initResponse.getUploadId();
String eTagComplete = null;
String eTagRetrieve = null;
try{
System.out.println("-------------------------------");
for (int iPart = 0; iPart < multi.length; iPart++) {
byte[] part = multi[iPart];
//partSize = part.length;
// Create request to upload a part.
ByteArrayInputStream inputStream = new ByteArrayInputStream(part);
UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(existingBucketName)
.withKey(keyName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(iPart+1)
.withPartSize(partSize)
.withInputStream(inputStream)
;
// Upload part and add response to our list.
System.out.println(String.format("Starting upload Part-%d, Size=%d",(iPart+1), part.length));
partETags.add(
amazonS3Client.uploadPart(uploadPartRequest).getPartETag());
System.out.println(String.format("End upload Part-%d, Size=%d",(iPart+1), part.length));
}
CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(existingBucketName, keyName, uploadId, partETags);
CompleteMultipartUploadResult amazonResult = amazonS3Client.completeMultipartUpload(request);
GetObjectRequest getObjectRequest = new GetObjectRequest(existingBucketName, keyName);
S3Object s3object = amazonS3Client.getObject(getObjectRequest);
S3ObjectInputStream inputStream = s3object.getObjectContent();
long objectDataSize = s3object.getObjectMetadata().getContentLength();
String contentType = s3object.getObjectMetadata().getContentType();
String amazonTag = s3object.getObjectMetadata().getETag();
byte[] retrieveBytes = new byte[(int) objectDataSize];
inputStream.read(retrieveBytes);
ContentData data = new ContentDataImpl(inputStream, objectDataSize);
return data;
} catch (Exception e) {
amazonS3Client.abortMultipartUpload(new AbortMultipartUploadRequest(
existingBucketName, keyName, initResponse.getUploadId()));
throw e;
}
}
public static byte[] initArray(byte value, int size){
byte[] bytes = new byte[size];
Arrays.fill( bytes, value );
return bytes;
}