use of com.amazonaws.services.s3.model.UploadPartResult in project bender by Nextdoor.
the class S3TransporterTest method testAmazonClientException.
@Test(expected = TransportException.class)
public void testAmazonClientException() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = mock(AmazonS3Client.class);
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setETag("foo");
doThrow(new AmazonClientException("expected")).when(mockClient).uploadPart(any(UploadPartRequest.class));
InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
initUploadResult.setUploadId("123");
doReturn(initUploadResult).when(mockClient).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
try {
transport.sendBatch(buffer, partitions, new TestContext());
} catch (Exception e) {
assertEquals(e.getCause().getClass(), AmazonClientException.class);
throw e;
}
}
use of com.amazonaws.services.s3.model.UploadPartResult in project OpenSearch by opensearch-project.
the class S3BlobStoreContainerTests method testExecuteMultipartUpload.
public void testExecuteMultipartUpload() throws IOException {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
if (randomBoolean()) {
IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value));
}
final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(1, 128));
final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024));
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
final boolean serverSideEncryption = randomBoolean();
when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption);
final StorageClass storageClass = randomFrom(StorageClass.values());
when(blobStore.getStorageClass()).thenReturn(storageClass);
final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null;
if (cannedAccessControlList != null) {
when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList);
}
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
when(blobStore.clientReference()).thenReturn(clientReference);
final ArgumentCaptor<InitiateMultipartUploadRequest> initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class);
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(randomAlphaOfLength(10));
when(client.initiateMultipartUpload(initArgCaptor.capture())).thenReturn(initResult);
final ArgumentCaptor<UploadPartRequest> uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class);
final List<String> expectedEtags = new ArrayList<>();
final long partSize = Math.min(bufferSize, blobSize);
long totalBytes = 0;
do {
expectedEtags.add(randomAlphaOfLength(50));
totalBytes += partSize;
} while (totalBytes < blobSize);
when(client.uploadPart(uploadArgCaptor.capture())).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(expectedEtags.get(request.getPartNumber() - 1));
return response;
});
final ArgumentCaptor<CompleteMultipartUploadRequest> compArgCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class);
when(client.completeMultipartUpload(compArgCaptor.capture())).thenReturn(new CompleteMultipartUploadResult());
final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]);
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, inputStream, blobSize);
final InitiateMultipartUploadRequest initRequest = initArgCaptor.getValue();
assertEquals(bucketName, initRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, initRequest.getKey());
assertEquals(storageClass, initRequest.getStorageClass());
assertEquals(cannedAccessControlList, initRequest.getCannedACL());
if (serverSideEncryption) {
assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, initRequest.getObjectMetadata().getSSEAlgorithm());
}
final Tuple<Long, Long> numberOfParts = S3BlobContainer.numberOfMultiparts(blobSize, bufferSize);
final List<UploadPartRequest> uploadRequests = uploadArgCaptor.getAllValues();
assertEquals(numberOfParts.v1().intValue(), uploadRequests.size());
for (int i = 0; i < uploadRequests.size(); i++) {
final UploadPartRequest uploadRequest = uploadRequests.get(i);
assertEquals(bucketName, uploadRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey());
assertEquals(initResult.getUploadId(), uploadRequest.getUploadId());
assertEquals(i + 1, uploadRequest.getPartNumber());
assertEquals(inputStream, uploadRequest.getInputStream());
if (i == (uploadRequests.size() - 1)) {
assertTrue(uploadRequest.isLastPart());
assertEquals(numberOfParts.v2().longValue(), uploadRequest.getPartSize());
} else {
assertFalse(uploadRequest.isLastPart());
assertEquals(bufferSize, uploadRequest.getPartSize());
}
}
final CompleteMultipartUploadRequest compRequest = compArgCaptor.getValue();
assertEquals(bucketName, compRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey());
assertEquals(initResult.getUploadId(), compRequest.getUploadId());
final List<String> actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList());
assertEquals(expectedEtags, actualETags);
}
use of com.amazonaws.services.s3.model.UploadPartResult in project OpenSearch by opensearch-project.
the class S3BlobStoreContainerTests method testExecuteMultipartUploadAborted.
public void testExecuteMultipartUploadAborted() {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
final long blobSize = ByteSizeUnit.MB.toBytes(765);
final long bufferSize = ByteSizeUnit.MB.toBytes(150);
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values()));
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
doAnswer(invocation -> {
clientReference.incRef();
return clientReference;
}).when(blobStore).clientReference();
final String uploadId = randomAlphaOfLength(25);
final int stage = randomInt(2);
final List<AmazonClientException> exceptions = Arrays.asList(new AmazonClientException("Expected initialization request to fail"), new AmazonClientException("Expected upload part request to fail"), new AmazonClientException("Expected completion request to fail"));
if (stage == 0) {
// Fail the initialization request
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
} else if (stage == 1) {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
// Fail the upload part request
when(client.uploadPart(any(UploadPartRequest.class))).thenThrow(exceptions.get(stage));
} else {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
when(client.uploadPart(any(UploadPartRequest.class))).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(randomAlphaOfLength(20));
return response;
});
// Fail the completion request
when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
}
final ArgumentCaptor<AbortMultipartUploadRequest> argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class);
doNothing().when(client).abortMultipartUpload(argumentCaptor.capture());
final IOException e = expectThrows(IOException.class, () -> {
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize);
});
assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage());
assertThat(e.getCause(), instanceOf(AmazonClientException.class));
assertEquals(exceptions.get(stage).getMessage(), e.getCause().getMessage());
if (stage == 0) {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
verify(client, times(0)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
} else {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
if (stage == 1) {
verify(client, times(1)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
} else {
verify(client, times(6)).uploadPart(any(UploadPartRequest.class));
verify(client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
}
verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
final AbortMultipartUploadRequest abortRequest = argumentCaptor.getValue();
assertEquals(bucketName, abortRequest.getBucketName());
assertEquals(blobName, abortRequest.getKey());
assertEquals(uploadId, abortRequest.getUploadId());
}
}
use of com.amazonaws.services.s3.model.UploadPartResult in project ozone by apache.
the class S3KeyGenerator method createKey.
private void createKey(long counter) throws Exception {
timer.time(() -> {
if (multiPart) {
final String keyName = generateObjectName(counter);
final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
final InitiateMultipartUploadResult initiateMultipartUploadResult = s3.initiateMultipartUpload(initiateRequest);
final String uploadId = initiateMultipartUploadResult.getUploadId();
List<PartETag> parts = new ArrayList<>();
for (int i = 1; i <= numberOfParts; i++) {
final UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucketName).withKey(keyName).withPartNumber(i).withLastPart(i == numberOfParts).withUploadId(uploadId).withPartSize(fileSize).withInputStream(new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)));
final UploadPartResult uploadPartResult = s3.uploadPart(uploadPartRequest);
parts.add(uploadPartResult.getPartETag());
}
s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, keyName, uploadId, parts));
} else {
s3.putObject(bucketName, generateObjectName(counter), content);
}
return null;
});
}
use of com.amazonaws.services.s3.model.UploadPartResult in project aws-sdk-android by aws-amplify.
the class AmazonS3Client method uploadPart.
/*
* (non-Javadoc)
* @see
* com.amazonaws.services.s3.AmazonS3#uploadPart(com.amazonaws.services.
* s3.model.UploadPartRequest)
*/
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
assertParameterNotNull(uploadPartRequest, "The request parameter must be specified when uploading a part");
final String bucketName = uploadPartRequest.getBucketName();
final String key = uploadPartRequest.getKey();
final String uploadId = uploadPartRequest.getUploadId();
final int partNumber = uploadPartRequest.getPartNumber();
final long partSize = uploadPartRequest.getPartSize();
assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading a part");
assertParameterNotNull(key, "The key parameter must be specified when uploading a part");
assertParameterNotNull(uploadId, "The upload ID parameter must be specified when uploading a part");
assertParameterNotNull(partNumber, "The part number parameter must be specified when uploading a part");
assertParameterNotNull(partSize, "The part size parameter must be specified when uploading a part");
final Request<UploadPartRequest> request = createRequest(bucketName, key, uploadPartRequest, HttpMethodName.PUT);
request.addParameter("uploadId", uploadId);
request.addParameter("partNumber", Integer.toString(partNumber));
final ObjectMetadata objectMetadata = uploadPartRequest.getObjectMetadata();
if (objectMetadata != null) {
populateRequestMetadata(request, objectMetadata);
}
request.addHeader(Headers.CONTENT_LENGTH, Long.toString(partSize));
/*
* HttpUrlConnection seems to be buggy in terms of implementation of
* expect continue.
*/
// request.addHeader("Expect", "100-continue");
populateRequesterPaysHeader(request, uploadPartRequest.isRequesterPays());
// Populate the SSE-CPK parameters to the request header
populateSSE_C(request, uploadPartRequest.getSSECustomerKey());
InputStream inputStream = null;
if (uploadPartRequest.getInputStream() != null) {
inputStream = uploadPartRequest.getInputStream();
} else if (uploadPartRequest.getFile() != null) {
try {
inputStream = new InputSubstream(new RepeatableFileInputStream(uploadPartRequest.getFile()), uploadPartRequest.getFileOffset(), partSize, true);
} catch (final FileNotFoundException e) {
throw new IllegalArgumentException("The specified file doesn't exist", e);
}
} else {
throw new IllegalArgumentException("A File or InputStream must be specified when uploading part");
}
// until request is invoked.
if (uploadPartRequest.getMd5Digest() == null && !ServiceUtils.skipMd5CheckPerRequest(uploadPartRequest, clientOptions) && inputStream.markSupported()) {
try {
final String contentMd5_b64 = Md5Utils.md5AsBase64(inputStream);
addHeaderIfNotNull(request, Headers.CONTENT_MD5, contentMd5_b64);
inputStream.reset();
} catch (final Exception e) {
throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
}
}
/*
* This is compatible with progress listener set by either the legacy
* method UploadPartRequest#setProgressListener or the new method
* UploadPartRequest#setGeneralProgressListener.
*/
final ProgressListener progressListener = uploadPartRequest.getGeneralProgressListener();
final ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor.wrapListener(progressListener);
if (progressListenerCallbackExecutor != null) {
inputStream = new ProgressReportingInputStream(inputStream, progressListenerCallbackExecutor);
((ProgressReportingInputStream) inputStream).setNotificationThreshold(this.notificationThreshold);
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.PART_STARTED_EVENT_CODE);
}
try {
request.setContent(inputStream);
final ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.PART_COMPLETED_EVENT_CODE);
final UploadPartResult result = new UploadPartResult();
result.setETag(metadata.getETag());
result.setPartNumber(partNumber);
result.setSSEAlgorithm(metadata.getSSEAlgorithm());
result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
result.setRequesterCharged(metadata.isRequesterCharged());
return result;
} catch (final AmazonClientException ace) {
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.PART_FAILED_EVENT_CODE);
throw ace;
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (final Exception e) {
}
}
}
}
Aggregations