Swisscom S3 Dynamic Storage并发访问

时间:2016-11-08 13:26:10

标签: amazon-s3 parallel-processing upload load-testing swisscomdev

使用Swisscom S3 Dynamic Storage时遇到问题。 当在5个或更多并行线程中进行并发测试CRUD请求时,存储服务随机向我们发送403 Forbidden而不是正确答案。在按顺序执行相同的请求时,一切都正常。

我正在使用的代码如下所示

import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.*;
import com.amazonaws.util.StringInputStream;
import org.apache.commons.io.IOUtils;
import org.junit.Test;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

/**
 * Tutorial https://javatutorial.net/java-s3-example
 */
public class AmazonS3ManualTest {

    public static final String BUCKET_NAME = "??";
    private static String accessKey = "??";
    private static String secretKey = "??";

    @Test
    public void testOperations() throws IOException, InterruptedException {
        final int maxCount = 5;

        final AmazonS3Client amazonS3Client = getS3Client();

        final CountDownLatch latch = new CountDownLatch(maxCount);
        final ExecutorService executor = Executors.newFixedThreadPool(maxCount);
        for (int i = 0; i < maxCount; i++) {
            final int index = i;
            executor.submit(() -> {
                try {
                    final String FolderOne = "testFolderOne" + index;
                    final String FolderTwo = "testFolderTwo" + index;
                    final String FolderCopy = "copyFolder" + index;

                    try {
                        createFile(amazonS3Client, "/" + FolderOne + "/file.txt");
                        createFolder(amazonS3Client, FolderTwo + "/");

                        exists(amazonS3Client, FolderOne + "/file.txt");
                        exists(amazonS3Client, FolderTwo + "/");

                        copy(amazonS3Client, FolderOne + "/file.txt", FolderCopy + "/filecopy.txt");

                        delete(amazonS3Client, "/" + FolderOne);
                        delete(amazonS3Client, "/" + FolderTwo);

                        get(amazonS3Client, FolderCopy + "/filecopy.txt");
                        delete(amazonS3Client, "/" + FolderCopy + "/filecopy.txt");

                        isEmptyFolder(amazonS3Client, "/" + FolderCopy);
                        delete(amazonS3Client, "/ + FolderCopy");
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                    latch.countDown();
                } catch (final Exception ignored) {
                }
            });
        }

        if (!latch.await(300, TimeUnit.SECONDS)) {
            throw new RuntimeException("Waiting too long for the result");
        }
    }

    private void isEmptyFolder(AmazonS3Client amazonS3Client, String folder) {
        final ObjectListing objectListing = amazonS3Client.listObjects(BUCKET_NAME, folder);
        assert(objectListing.getObjectSummaries().isEmpty());
    }

    private void get(AmazonS3Client amazonS3Client, String file) throws IOException {
        GetObjectRequest request = new GetObjectRequest(BUCKET_NAME, file);
        final S3Object object = amazonS3Client.getObject(request);
        final S3ObjectInputStream objectContent = object.getObjectContent();
        final String s = IOUtils.toString(objectContent);
        assert(s.length() > 0);
    }

    private void copy(AmazonS3Client amazonS3Client, String source, String target) {
        CopyObjectRequest request = new CopyObjectRequest(BUCKET_NAME, source, BUCKET_NAME, target);
        amazonS3Client.copyObject(request);
    }

    private void delete(AmazonS3Client amazonS3Client, String path) {
        deleteRecursive(amazonS3Client, path);
    }

    private void deleteRecursive(AmazonS3Client amazonS3Client, String path) {
        ObjectListing objects = amazonS3Client.listObjects(BUCKET_NAME, path);

        for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
            if (objectSummary.getKey().equals(path)) {
                continue;
            }

            if (objectSummary.getKey().endsWith("/")) {
                deleteRecursive(amazonS3Client, objectSummary.getKey());
            } else {
                amazonS3Client.deleteObject(BUCKET_NAME, objectSummary.getKey());
            }
        }

        amazonS3Client.deleteObject(BUCKET_NAME, path);
    }

    private void exists(AmazonS3Client amazonS3Client, String folder) {
        GetObjectMetadataRequest request = new GetObjectMetadataRequest(BUCKET_NAME, folder);
        try {
            final ObjectMetadata objectMetadata = amazonS3Client.getObjectMetadata(request);
            assert(objectMetadata != null);
        } catch (AmazonS3Exception e) {
            if (e.getMessage().contains("404")) {
                assert(false);
                return;
            }
        }
        assert(true);
    }

    private void createFolder(AmazonS3Client amazonS3Client, String folder) {
        final InputStream input = new ByteArrayInputStream(new byte[0]);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(0);

        amazonS3Client.putObject(new PutObjectRequest(BUCKET_NAME, folder, input, metadata));
    }

    private void createFile(AmazonS3Client amazonS3Client, String fileName) throws IOException {
        ObjectMetadata omd = new ObjectMetadata();
        //omd.setContentType("html/text");
        omd.setHeader("filename", fileName);
        omd.setHeader("x-amz-server-side-encryption", "AES256");

        // upload file to folder and set it to public
        final StringInputStream testFile = new StringInputStream("Test");
        final PutObjectRequest putObjectRequest = new PutObjectRequest(BUCKET_NAME, fileName, testFile, omd);
        amazonS3Client.putObject(putObjectRequest.withCannedAcl(CannedAccessControlList.Private));

        testFile.close();
    }

    private AmazonS3Client getS3Client() {

        ClientConfiguration opts = new ClientConfiguration();
        opts.setSignerOverride("S3SignerType");  // NOT "AWS3SignerType"
        opts.setMaxConnections(100);

        final AmazonS3Client s3 = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey), opts);
        s3.setEndpoint("ds31s3.swisscom.com");

        return s3;
    }
}

我们得到的例外情况是:

com.amazonaws.services.s3.model.AmazonS3Exception: The AWS Access Key Id you provided does not exist in our records. (Service: Amazon S3; Status Code: 403; Error Code: InvalidAccessKeyId; Request ID: null), S3 Extended Request ID: null

请您推荐一下,我们可以对这种情况做些什么,因为它是异常的,不可扩展。

2 个答案:

答案 0 :(得分:1)

我重新创建了新的Dynamic Storage S3并重新运行上面的测试。现在异常没有提高。似乎以前创建的存储存在基础设施问题。

答案 1 :(得分:0)

我们连续80次针对Swisscom的S3 Dynamic Storage运行您的代码段,但无法重现此问题。

但是在上传后直接访问对象时可能会出现计时问题。 PUT请求可以平衡到除GET请求之外的另一节点。因此,如果您在上传后立即下载对象,请执行短暂睡眠或重试。