Search in sources :

Example 11 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project elasticsearch by elastic.

the class TestAmazonS3 method uploadPart.

@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
    if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) {
        long length = request.getPartSize();
        long partToRead = (long) (length * randomDouble());
        byte[] buffer = new byte[1024];
        for (long cur = 0; cur < partToRead; cur += buffer.length) {
            try (InputStream input = request.getInputStream()) {
                input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
            } catch (IOException ex) {
                throw new ElasticsearchException("cannot read input stream", ex);
            }
        }
        logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey());
        AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception");
        ex.setStatusCode(400);
        ex.setErrorCode("RequestTimeout");
        throw ex;
    } else {
        return super.uploadPart(request);
    }
}
Also used : InputStream(java.io.InputStream) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception)

Example 12 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project nifi by apache.

the class PutS3Object method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final long startNanos = System.nanoTime();
    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;
    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);
    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();
    final long now = System.currentTimeMillis();
    /*
         * If necessary, run age off for existing uploads in AWS S3 and local state
         */
    ageoffS3Uploads(context, s3, now);
    /*
         * Then
         */
    try {
        session.read(flowFile, new InputStreamCallback() {

            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());
                    final String contentType = context.getProperty(CONTENT_TYPE).evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }
                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID).evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }
                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey()).evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }
                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }
                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }
                    if (ff.getSize() <= multipartThreshold) {
                        // ----------------------------------------
                        // single part upload
                        // ----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }
                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet().contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata().getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        // ----------------------------------------
                        // multipart upload
                        // ----------------------------------------
                        // load or create persistent state
                        // ------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags().get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' " + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' " + "contentLength='{}' partsLoaded={} lastPart={}/{}", new Object[] { ffFilename, bucket, key, currentState.getUploadId(), currentState.getFilePosition(), currentState.getPartSize(), currentState.getStorageClass().toString(), currentState.getContentLength(), currentState.getPartETags().size(), Integer.toString(lastETag.getPartNumber()), lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' " + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' " + "contentLength='{}' no partsLoaded", new Object[] { ffFilename, bucket, key, currentState.getUploadId(), currentState.getFilePosition(), currentState.getPartSize(), currentState.getStorageClass().toString(), currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'", new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: " + e.getMessage());
                            throw (e);
                        }
                        // ------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3.initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: " + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info("Success initiating upload flowfile={} available={} position={} " + "length={} bucket={} key={} uploadId={}", new Object[] { ffFilename, in.available(), currentState.getFilePosition(), currentState.getContentLength(), bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info("Failure initiating upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info("Failure skipping to resume upload flowfile={} " + "bucket={} key={} position={} skipped={}", new Object[] { ffFilename, bucket, key, currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info("Failure skipping to resume upload flowfile={} bucket={} " + "key={} position={} reason={}", new Object[] { ffFilename, bucket, key, currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }
                        // upload parts
                        // ------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState.getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(), (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket).withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in).withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: " + e.getMessage());
                                }
                                getLogger().info("Success uploading part flowfile={} part={} available={} " + "etag={} uploadId={}", new Object[] { ffFilename, part, in.available(), uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info("Failure uploading part flowfile={} part={} bucket={} key={} " + "reason={}", new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }
                        // complete multipart upload
                        // ------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3.completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}", new Object[] { ffFilename, completeResult.getETag(), currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });
        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);
        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);
        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}", new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }
}
Also used : CannedAccessControlList(com.amazonaws.services.s3.model.CannedAccessControlList) AccessControlList(com.amazonaws.services.s3.model.AccessControlList) InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) HashMap(java.util.HashMap) AmazonClientException(com.amazonaws.AmazonClientException) CompleteMultipartUploadResult(com.amazonaws.services.s3.model.CompleteMultipartUploadResult) PartETag(com.amazonaws.services.s3.model.PartETag) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) Entry(java.util.Map.Entry) BufferedInputStream(java.io.BufferedInputStream) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) FlowFile(org.apache.nifi.flowfile.FlowFile) PutObjectResult(com.amazonaws.services.s3.model.PutObjectResult) BufferedInputStream(java.io.BufferedInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) IOException(java.io.IOException) CannedAccessControlList(com.amazonaws.services.s3.model.CannedAccessControlList) AmazonClientException(com.amazonaws.AmazonClientException) ProcessException(org.apache.nifi.processor.exception.ProcessException) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) IOException(java.io.IOException) AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) ProcessException(org.apache.nifi.processor.exception.ProcessException) AtomicLong(java.util.concurrent.atomic.AtomicLong) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) Map(java.util.Map) HashMap(java.util.HashMap) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 13 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project kafka-connect-storage-cloud by confluentinc.

the class S3SinkTaskTest method testWriteRecordsSpanningMultiplePartsWithRetry.

@Test
public void testWriteRecordsSpanningMultiplePartsWithRetry() throws Exception {
    localProps.put(S3SinkConnectorConfig.FLUSH_SIZE_CONFIG, "10000");
    localProps.put(S3SinkConnectorConfig.S3_PART_RETRIES_CONFIG, "3");
    setUp();
    List<SinkRecord> sinkRecords = createRecords(11000);
    int totalBytes = calcByteSize(sinkRecords);
    final int parts = totalBytes / connectorConfig.getPartSize();
    // From time to time fail S3 upload part method
    final AtomicInteger count = new AtomicInteger();
    PowerMockito.doAnswer(new Answer<UploadPartResult>() {

        @Override
        public UploadPartResult answer(InvocationOnMock invocationOnMock) throws Throwable {
            if (count.getAndIncrement() % parts == 0) {
                throw new SdkClientException("Boom!");
            } else {
                return (UploadPartResult) invocationOnMock.callRealMethod();
            }
        }
    }).when(s3).uploadPart(Mockito.isA(UploadPartRequest.class));
    replayAll();
    task = new S3SinkTask();
    task.initialize(context);
    task.start(properties);
    verifyAll();
    task.put(sinkRecords);
    task.close(context.assignment());
    task.stop();
    long[] validOffsets = { 0, 10000 };
    verify(sinkRecords, validOffsets);
}
Also used : UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) SdkClientException(com.amazonaws.SdkClientException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 14 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project bender by Nextdoor.

the class S3Transport method sendStream.

protected void sendStream(InputStream input, String key, long streamSize) throws TransportException {
    /*
     * Create metadata
     */
    ObjectMetadata metadata = new ObjectMetadata();
    /*
     * Find if a multipart upload has already begun or start a new one.
     */
    MultiPartUpload upload;
    synchronized (multiPartUploads) {
        if (!multiPartUploads.containsKey(key)) {
            InitiateMultipartUploadRequest uploadRequest = new InitiateMultipartUploadRequest(bucketName, key);
            uploadRequest.setObjectMetadata(metadata);
            InitiateMultipartUploadResult res = client.initiateMultipartUpload(uploadRequest);
            upload = new MultiPartUpload(bucketName, key, res.getUploadId());
            multiPartUploads.put(key, upload);
        } else {
            upload = multiPartUploads.get(key);
        }
    }
    /*
     * Write out to S3. Note that the S3 client auto closes the input stream.
     */
    UploadPartRequest req = upload.getUploadPartRequest().withInputStream(input).withPartSize(streamSize);
    try {
        UploadPartResult res = client.uploadPart(req);
        upload.addPartETag(res.getPartETag());
    } catch (AmazonClientException e) {
        client.abortMultipartUpload(upload.getAbortMultipartUploadRequest());
        throw new TransportException("unable to put file" + e, e);
    } finally {
        try {
            input.close();
        } catch (IOException e) {
            logger.warn("error encountered while closing input stream", e);
        }
    }
}
Also used : UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) AmazonClientException(com.amazonaws.AmazonClientException) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) IOException(java.io.IOException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) TransportException(com.nextdoor.bender.ipc.TransportException)

Example 15 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project bender by Nextdoor.

the class S3TransporterTest method testAmazonClientException.

@Test(expected = TransportException.class)
public void testAmazonClientException() throws TransportException, IllegalStateException, IOException {
    /*
     * Create mock client, requets, and replies
     */
    AmazonS3Client mockClient = mock(AmazonS3Client.class);
    UploadPartResult uploadResult = new UploadPartResult();
    uploadResult.setETag("foo");
    doThrow(new AmazonClientException("expected")).when(mockClient).uploadPart(any(UploadPartRequest.class));
    InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
    initUploadResult.setUploadId("123");
    doReturn(initUploadResult).when(mockClient).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
    /*
     * Fill buffer with mock data
     */
    S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
    InternalEvent mockIevent = mock(InternalEvent.class);
    doReturn("foo").when(mockIevent).getSerialized();
    /*
     * Create transport
     */
    Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
    S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
    /*
     * Do actual test
     */
    buffer.add(mockIevent);
    LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
    partitions.put(S3Transport.FILENAME_KEY, "a_filename");
    ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
    try {
        transport.sendBatch(buffer, partitions, new TestContext());
    } catch (Exception e) {
        assertEquals(e.getCause().getClass(), AmazonClientException.class);
        throw e;
    }
}
Also used : InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) AmazonClientException(com.amazonaws.AmazonClientException) TestContext(com.nextdoor.bender.aws.TestContext) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) TransportException(com.nextdoor.bender.ipc.TransportException) IOException(java.io.IOException) AmazonClientException(com.amazonaws.AmazonClientException) InternalEvent(com.nextdoor.bender.InternalEvent) LinkedHashMap(java.util.LinkedHashMap) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) Test(org.junit.Test)

Aggregations

UploadPartResult (com.amazonaws.services.s3.model.UploadPartResult)17 UploadPartRequest (com.amazonaws.services.s3.model.UploadPartRequest)16 InitiateMultipartUploadRequest (com.amazonaws.services.s3.model.InitiateMultipartUploadRequest)11 InitiateMultipartUploadResult (com.amazonaws.services.s3.model.InitiateMultipartUploadResult)10 CompleteMultipartUploadRequest (com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)8 IOException (java.io.IOException)8 AmazonClientException (com.amazonaws.AmazonClientException)7 Test (org.junit.Test)7 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)6 CompleteMultipartUploadResult (com.amazonaws.services.s3.model.CompleteMultipartUploadResult)6 PartETag (com.amazonaws.services.s3.model.PartETag)5 ArrayList (java.util.ArrayList)5 AmazonS3 (com.amazonaws.services.s3.AmazonS3)4 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)4 HashMap (java.util.HashMap)4 AbortMultipartUploadRequest (com.amazonaws.services.s3.model.AbortMultipartUploadRequest)3 CannedAccessControlList (com.amazonaws.services.s3.model.CannedAccessControlList)3 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)3 PutObjectResult (com.amazonaws.services.s3.model.PutObjectResult)3 InternalEvent (com.nextdoor.bender.InternalEvent)3