Search in sources :

Example 21 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project big-data-plugin by pentaho.

the class S3FileObjectTest method testDoGetOutputStream.

@Test
public void testDoGetOutputStream() throws Exception {
    InitiateMultipartUploadResult initResponse = mock(InitiateMultipartUploadResult.class);
    when(initResponse.getUploadId()).thenReturn("foo");
    when(s3ServiceMock.initiateMultipartUpload(any())).thenReturn(initResponse);
    UploadPartResult uploadPartResult = mock(UploadPartResult.class);
    PartETag tag = mock(PartETag.class);
    when(s3ServiceMock.uploadPart(any())).thenReturn(uploadPartResult);
    when(uploadPartResult.getPartETag()).thenReturn(tag);
    assertNotNull(s3FileObjectBucketSpy.doGetOutputStream(false));
    OutputStream out = s3FileObjectBucketSpy.doGetOutputStream(true);
    assertNotNull(out);
    // 6MB
    out.write(new byte[1024 * 1024 * 6]);
    out.close();
    // check kettle.properties 's3.vfs.partSize' is less than [5MB, 6MB)
    verify(s3ServiceMock, times(2)).uploadPart(any());
    verify(s3ServiceMock, atMost(1)).completeMultipartUpload(any());
}
Also used : UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) OutputStream(java.io.OutputStream) PartETag(com.amazonaws.services.s3.model.PartETag) Test(org.junit.Test)

Example 22 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project android-simpl3r by jgilfelt.

the class Uploader method start.

/**
 * Initiate a multipart file upload to Amazon S3
 *
 * @return the URL of a successfully uploaded file
 */
public String start() {
    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;
    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;
    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);
        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);
        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;
        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);
    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();
    }
    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId);
    for (int k = startPartNumber; filePosition < contentLength; k++) {
        long thisPartSize = Math.min(partSize, (contentLength - filePosition));
        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);
        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key).withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file).withPartSize(thisPartSize);
        ProgressListener s3progressListener = new ProgressListener() {

            public void progressChanged(ProgressEvent progressEvent) {
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }
                bytesUploaded += progressEvent.getBytesTransfered();
                // Log.d(TAG, "bytesUploaded=" + bytesUploaded);
                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }
            }
        };
        uploadRequest.setProgressListener(s3progressListener);
        UploadPartResult result = s3Client.uploadPart(uploadRequest);
        partETags.add(result.getPartETag());
        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);
        filePosition += thisPartSize;
    }
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags);
    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;
    Log.i(TAG, "upload complete for " + uploadId);
    clearProgressCache();
    return result.getLocation();
}
Also used : InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) ArrayList(java.util.ArrayList) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) AbortMultipartUploadRequest(com.amazonaws.services.s3.model.AbortMultipartUploadRequest) CompleteMultipartUploadResult(com.amazonaws.services.s3.model.CompleteMultipartUploadResult) ProgressEvent(com.amazonaws.services.s3.model.ProgressEvent) PartETag(com.amazonaws.services.s3.model.PartETag) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) ProgressListener(com.amazonaws.services.s3.model.ProgressListener) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 23 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project beam by apache.

the class S3WritableByteChannel method flush.

private void flush() throws IOException {
    uploadBuffer.flip();
    ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array(), 0, uploadBuffer.limit());
    UploadPartRequest request = new UploadPartRequest().withBucketName(path.getBucket()).withKey(path.getKey()).withUploadId(uploadId).withPartNumber(partNumber++).withPartSize(uploadBuffer.limit()).withMD5Digest(Base64.encodeAsString(md5.digest())).withInputStream(inputStream);
    request.setSSECustomerKey(config.getSSECustomerKey());
    UploadPartResult result;
    try {
        result = amazonS3.uploadPart(request);
    } catch (AmazonClientException e) {
        throw new IOException(e);
    }
    uploadBuffer.clear();
    md5.reset();
    eTags.add(result.getPartETag());
}
Also used : UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) ByteArrayInputStream(java.io.ByteArrayInputStream) AmazonClientException(com.amazonaws.AmazonClientException) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) IOException(java.io.IOException)

Example 24 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project beam by apache.

the class S3WritableByteChannelTest method write.

private void write(AmazonS3 mockAmazonS3, Supplier channelSupplier, S3ResourceId path, String sseAlgorithm, String sseCustomerKeyMd5, SSEAwsKeyManagementParams sseAwsKeyManagementParams, long s3UploadBufferSizeBytes, boolean bucketKeyEnabled, boolean writeReadOnlyBuffer) throws IOException {
    InitiateMultipartUploadResult initiateMultipartUploadResult = new InitiateMultipartUploadResult();
    initiateMultipartUploadResult.setUploadId("upload-id");
    if (sseAlgorithm != null) {
        initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
    }
    if (sseCustomerKeyMd5 != null) {
        initiateMultipartUploadResult.setSSECustomerKeyMd5(sseCustomerKeyMd5);
    }
    if (sseAwsKeyManagementParams != null) {
        sseAlgorithm = "aws:kms";
        initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
    }
    initiateMultipartUploadResult.setBucketKeyEnabled(bucketKeyEnabled);
    doReturn(initiateMultipartUploadResult).when(mockAmazonS3).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
    InitiateMultipartUploadResult mockInitiateMultipartUploadResult = mockAmazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()));
    assertEquals(sseAlgorithm, mockInitiateMultipartUploadResult.getSSEAlgorithm());
    assertEquals(bucketKeyEnabled, mockInitiateMultipartUploadResult.getBucketKeyEnabled());
    assertEquals(sseCustomerKeyMd5, mockInitiateMultipartUploadResult.getSSECustomerKeyMd5());
    UploadPartResult result = new UploadPartResult();
    result.setETag("etag");
    if (sseCustomerKeyMd5 != null) {
        result.setSSECustomerKeyMd5(sseCustomerKeyMd5);
    }
    doReturn(result).when(mockAmazonS3).uploadPart(any(UploadPartRequest.class));
    UploadPartResult mockUploadPartResult = mockAmazonS3.uploadPart(new UploadPartRequest());
    assertEquals(sseCustomerKeyMd5, mockUploadPartResult.getSSECustomerKeyMd5());
    int contentSize = 34_078_720;
    ByteBuffer uploadContent = ByteBuffer.allocate((int) (contentSize * 2.5));
    for (int i = 0; i < contentSize; i++) {
        uploadContent.put((byte) 0xff);
    }
    uploadContent.flip();
    S3WritableByteChannel channel = channelSupplier.get();
    int uploadedSize = channel.write(writeReadOnlyBuffer ? uploadContent.asReadOnlyBuffer() : uploadContent);
    assertEquals(contentSize, uploadedSize);
    CompleteMultipartUploadResult completeMultipartUploadResult = new CompleteMultipartUploadResult();
    doReturn(completeMultipartUploadResult).when(mockAmazonS3).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
    channel.close();
    verify(mockAmazonS3, times(2)).initiateMultipartUpload(notNull(InitiateMultipartUploadRequest.class));
    int partQuantity = (int) Math.ceil((double) contentSize / s3UploadBufferSizeBytes) + 1;
    verify(mockAmazonS3, times(partQuantity)).uploadPart(notNull(UploadPartRequest.class));
    verify(mockAmazonS3, times(1)).completeMultipartUpload(notNull(CompleteMultipartUploadRequest.class));
    verifyNoMoreInteractions(mockAmazonS3);
}
Also used : UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) CompleteMultipartUploadResult(com.amazonaws.services.s3.model.CompleteMultipartUploadResult) ByteBuffer(java.nio.ByteBuffer) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 25 with UploadPartResult

use of com.amazonaws.services.s3.model.UploadPartResult in project elasticsearch by elastic.

the class TestAmazonS3 method uploadPart.

@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
    if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) {
        long length = request.getPartSize();
        long partToRead = (long) (length * randomDouble());
        byte[] buffer = new byte[1024];
        for (long cur = 0; cur < partToRead; cur += buffer.length) {
            try (InputStream input = request.getInputStream()) {
                input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
            } catch (IOException ex) {
                throw new ElasticsearchException("cannot read input stream", ex);
            }
        }
        logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey());
        AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception");
        ex.setStatusCode(400);
        ex.setErrorCode("RequestTimeout");
        throw ex;
    } else {
        return super.uploadPart(request);
    }
}
Also used : InputStream(java.io.InputStream) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception)

Aggregations

UploadPartResult (com.amazonaws.services.s3.model.UploadPartResult)33 UploadPartRequest (com.amazonaws.services.s3.model.UploadPartRequest)28 InitiateMultipartUploadRequest (com.amazonaws.services.s3.model.InitiateMultipartUploadRequest)22 InitiateMultipartUploadResult (com.amazonaws.services.s3.model.InitiateMultipartUploadResult)19 CompleteMultipartUploadRequest (com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)17 ArrayList (java.util.ArrayList)16 PartETag (com.amazonaws.services.s3.model.PartETag)15 IOException (java.io.IOException)14 Test (org.junit.Test)13 AmazonClientException (com.amazonaws.AmazonClientException)11 CompleteMultipartUploadResult (com.amazonaws.services.s3.model.CompleteMultipartUploadResult)10 ByteArrayInputStream (java.io.ByteArrayInputStream)10 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)9 AmazonS3 (com.amazonaws.services.s3.AmazonS3)8 InputStream (java.io.InputStream)7 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)6 AbortMultipartUploadRequest (com.amazonaws.services.s3.model.AbortMultipartUploadRequest)6 CannedAccessControlList (com.amazonaws.services.s3.model.CannedAccessControlList)5 File (java.io.File)5 HashMap (java.util.HashMap)5