use of com.amazonaws.services.s3.model.UploadPartResult in project crate by crate.
the class S3BlobStoreContainerTests method testExecuteMultipartUploadAborted.
@Test
public void testExecuteMultipartUploadAborted() {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
final long blobSize = ByteSizeUnit.MB.toBytes(765);
final long bufferSize = ByteSizeUnit.MB.toBytes(150);
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values()));
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
doAnswer(invocation -> {
clientReference.incRef();
return clientReference;
}).when(blobStore).clientReference();
final String uploadId = randomAlphaOfLength(25);
final int stage = randomInt(2);
final List<AmazonClientException> exceptions = Arrays.asList(new AmazonClientException("Expected initialization request to fail"), new AmazonClientException("Expected upload part request to fail"), new AmazonClientException("Expected completion request to fail"));
if (stage == 0) {
// Fail the initialization request
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
} else if (stage == 1) {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
// Fail the upload part request
when(client.uploadPart(any(UploadPartRequest.class))).thenThrow(exceptions.get(stage));
} else {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
when(client.uploadPart(any(UploadPartRequest.class))).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(randomAlphaOfLength(20));
return response;
});
// Fail the completion request
when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
}
final ArgumentCaptor<AbortMultipartUploadRequest> argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class);
doNothing().when(client).abortMultipartUpload(argumentCaptor.capture());
final IOException e = expectThrows(IOException.class, () -> {
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize);
});
assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage());
assertThat(e.getCause(), instanceOf(AmazonClientException.class));
assertEquals(exceptions.get(stage).getMessage(), e.getCause().getMessage());
if (stage == 0) {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
verify(client, times(0)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
} else {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
if (stage == 1) {
verify(client, times(1)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
} else {
verify(client, times(6)).uploadPart(any(UploadPartRequest.class));
verify(client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
}
verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
final AbortMultipartUploadRequest abortRequest = argumentCaptor.getValue();
assertEquals(bucketName, abortRequest.getBucketName());
assertEquals(blobName, abortRequest.getKey());
assertEquals(uploadId, abortRequest.getUploadId());
}
}
use of com.amazonaws.services.s3.model.UploadPartResult in project crate by crate.
the class S3BlobStoreContainerTests method testExecuteMultipartUpload.
@Test
public void testExecuteMultipartUpload() throws IOException {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
if (randomBoolean()) {
IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value));
}
final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(1, 128));
final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024));
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
final boolean serverSideEncryption = randomBoolean();
when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption);
final StorageClass storageClass = randomFrom(StorageClass.values());
when(blobStore.getStorageClass()).thenReturn(storageClass);
final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null;
if (cannedAccessControlList != null) {
when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList);
}
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
when(blobStore.clientReference()).thenReturn(clientReference);
final ArgumentCaptor<InitiateMultipartUploadRequest> initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class);
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(randomAlphaOfLength(10));
when(client.initiateMultipartUpload(initArgCaptor.capture())).thenReturn(initResult);
final ArgumentCaptor<UploadPartRequest> uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class);
final List<String> expectedEtags = new ArrayList<>();
final long partSize = Math.min(bufferSize, blobSize);
long totalBytes = 0;
do {
expectedEtags.add(randomAlphaOfLength(50));
totalBytes += partSize;
} while (totalBytes < blobSize);
when(client.uploadPart(uploadArgCaptor.capture())).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(expectedEtags.get(request.getPartNumber() - 1));
return response;
});
final ArgumentCaptor<CompleteMultipartUploadRequest> compArgCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class);
when(client.completeMultipartUpload(compArgCaptor.capture())).thenReturn(new CompleteMultipartUploadResult());
final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]);
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, inputStream, blobSize);
final InitiateMultipartUploadRequest initRequest = initArgCaptor.getValue();
assertEquals(bucketName, initRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, initRequest.getKey());
assertEquals(storageClass, initRequest.getStorageClass());
assertEquals(cannedAccessControlList, initRequest.getCannedACL());
if (serverSideEncryption) {
assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, initRequest.getObjectMetadata().getSSEAlgorithm());
}
final Tuple<Long, Long> numberOfParts = S3BlobContainer.numberOfMultiparts(blobSize, bufferSize);
final List<UploadPartRequest> uploadRequests = uploadArgCaptor.getAllValues();
assertEquals(numberOfParts.v1().intValue(), uploadRequests.size());
for (int i = 0; i < uploadRequests.size(); i++) {
final UploadPartRequest uploadRequest = uploadRequests.get(i);
assertEquals(bucketName, uploadRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey());
assertEquals(initResult.getUploadId(), uploadRequest.getUploadId());
assertEquals(i + 1, uploadRequest.getPartNumber());
assertEquals(inputStream, uploadRequest.getInputStream());
if (i == (uploadRequests.size() - 1)) {
assertTrue(uploadRequest.isLastPart());
assertEquals(numberOfParts.v2().longValue(), uploadRequest.getPartSize());
} else {
assertFalse(uploadRequest.isLastPart());
assertEquals(bufferSize, uploadRequest.getPartSize());
}
}
final CompleteMultipartUploadRequest compRequest = compArgCaptor.getValue();
assertEquals(bucketName, compRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey());
assertEquals(initResult.getUploadId(), compRequest.getUploadId());
final List<String> actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList());
assertEquals(expectedEtags, actualETags);
}
use of com.amazonaws.services.s3.model.UploadPartResult in project beam by apache.
the class S3WritableByteChannel method flush.
private void flush() throws IOException {
uploadBuffer.flip();
ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array(), 0, uploadBuffer.limit());
UploadPartRequest request = new UploadPartRequest().withBucketName(path.getBucket()).withKey(path.getKey()).withUploadId(uploadId).withPartNumber(partNumber++).withPartSize(uploadBuffer.limit()).withMD5Digest(Base64.encodeAsString(md5.digest())).withInputStream(inputStream);
request.setSSECustomerKey(config.getSSECustomerKey());
UploadPartResult result;
try {
result = amazonS3.uploadPart(request);
} catch (AmazonClientException e) {
throw new IOException(e);
}
uploadBuffer.clear();
md5.reset();
eTags.add(result.getPartETag());
}
use of com.amazonaws.services.s3.model.UploadPartResult in project beam by apache.
the class S3WritableByteChannelTest method write.
private void write(AmazonS3 mockAmazonS3, Supplier channelSupplier, S3ResourceId path, String sseAlgorithm, String sseCustomerKeyMd5, SSEAwsKeyManagementParams sseAwsKeyManagementParams, long s3UploadBufferSizeBytes, boolean bucketKeyEnabled, boolean writeReadOnlyBuffer) throws IOException {
InitiateMultipartUploadResult initiateMultipartUploadResult = new InitiateMultipartUploadResult();
initiateMultipartUploadResult.setUploadId("upload-id");
if (sseAlgorithm != null) {
initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
}
if (sseCustomerKeyMd5 != null) {
initiateMultipartUploadResult.setSSECustomerKeyMd5(sseCustomerKeyMd5);
}
if (sseAwsKeyManagementParams != null) {
sseAlgorithm = "aws:kms";
initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
}
initiateMultipartUploadResult.setBucketKeyEnabled(bucketKeyEnabled);
doReturn(initiateMultipartUploadResult).when(mockAmazonS3).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
InitiateMultipartUploadResult mockInitiateMultipartUploadResult = mockAmazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()));
assertEquals(sseAlgorithm, mockInitiateMultipartUploadResult.getSSEAlgorithm());
assertEquals(bucketKeyEnabled, mockInitiateMultipartUploadResult.getBucketKeyEnabled());
assertEquals(sseCustomerKeyMd5, mockInitiateMultipartUploadResult.getSSECustomerKeyMd5());
UploadPartResult result = new UploadPartResult();
result.setETag("etag");
if (sseCustomerKeyMd5 != null) {
result.setSSECustomerKeyMd5(sseCustomerKeyMd5);
}
doReturn(result).when(mockAmazonS3).uploadPart(any(UploadPartRequest.class));
UploadPartResult mockUploadPartResult = mockAmazonS3.uploadPart(new UploadPartRequest());
assertEquals(sseCustomerKeyMd5, mockUploadPartResult.getSSECustomerKeyMd5());
int contentSize = 34_078_720;
ByteBuffer uploadContent = ByteBuffer.allocate((int) (contentSize * 2.5));
for (int i = 0; i < contentSize; i++) {
uploadContent.put((byte) 0xff);
}
uploadContent.flip();
S3WritableByteChannel channel = channelSupplier.get();
int uploadedSize = channel.write(writeReadOnlyBuffer ? uploadContent.asReadOnlyBuffer() : uploadContent);
assertEquals(contentSize, uploadedSize);
CompleteMultipartUploadResult completeMultipartUploadResult = new CompleteMultipartUploadResult();
doReturn(completeMultipartUploadResult).when(mockAmazonS3).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
channel.close();
verify(mockAmazonS3, times(2)).initiateMultipartUpload(notNull(InitiateMultipartUploadRequest.class));
int partQuantity = (int) Math.ceil((double) contentSize / s3UploadBufferSizeBytes) + 1;
verify(mockAmazonS3, times(partQuantity)).uploadPart(notNull(UploadPartRequest.class));
verify(mockAmazonS3, times(1)).completeMultipartUpload(notNull(CompleteMultipartUploadRequest.class));
verifyNoMoreInteractions(mockAmazonS3);
}
use of com.amazonaws.services.s3.model.UploadPartResult in project android-simpl3r by jgilfelt.
the class Uploader method start.
/**
* Initiate a multipart file upload to Amazon S3
*
* @return the URL of a successfully uploaded file
*/
public String start() {
// initialize
List<PartETag> partETags = new ArrayList<PartETag>();
final long contentLength = file.length();
long filePosition = 0;
int startPartNumber = 1;
userInterrupted = false;
userAborted = false;
bytesUploaded = 0;
// check if we can resume an incomplete download
String uploadId = getCachedUploadId();
if (uploadId != null) {
// we can resume the download
Log.i(TAG, "resuming upload for " + uploadId);
// get the cached etags
List<PartETag> cachedEtags = getCachedPartEtags();
partETags.addAll(cachedEtags);
// calculate the start position for resume
startPartNumber = cachedEtags.size() + 1;
filePosition = (startPartNumber - 1) * partSize;
bytesUploaded = filePosition;
Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);
} else {
// initiate a new multi part upload
Log.i(TAG, "initiating new upload");
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
configureInitiateRequest(initRequest);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
uploadId = initResponse.getUploadId();
}
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId);
for (int k = startPartNumber; filePosition < contentLength; k++) {
long thisPartSize = Math.min(partSize, (contentLength - filePosition));
Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key).withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file).withPartSize(thisPartSize);
ProgressListener s3progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
// TODO calling shutdown too brute force?
if (userInterrupted) {
s3Client.shutdown();
throw new UploadIterruptedException("User interrupted");
} else if (userAborted) {
// aborted requests cannot be resumed, so clear any cached etags
clearProgressCache();
s3Client.abortMultipartUpload(abortRequest);
s3Client.shutdown();
}
bytesUploaded += progressEvent.getBytesTransfered();
// Log.d(TAG, "bytesUploaded=" + bytesUploaded);
// broadcast progress
float fpercent = ((bytesUploaded * 100) / contentLength);
int percent = Math.round(fpercent);
if (progressListener != null) {
progressListener.progressChanged(progressEvent, bytesUploaded, percent);
}
}
};
uploadRequest.setProgressListener(s3progressListener);
UploadPartResult result = s3Client.uploadPart(uploadRequest);
partETags.add(result.getPartETag());
// cache the part progress for this upload
if (k == 1) {
initProgressCache(uploadId);
}
// store part etag
cachePartEtag(result);
filePosition += thisPartSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags);
CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
bytesUploaded = 0;
Log.i(TAG, "upload complete for " + uploadId);
clearProgressCache();
return result.getLocation();
}
Aggregations