use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project alluxio by Alluxio.
the class S3ALowLevelOutputStream method close.
@Override
public void close() throws IOException {
if (mClosed) {
return;
}
// Set the closed flag, we never retry close() even if exception occurs
mClosed = true;
// Multi-part upload has not been initialized
if (mUploadId == null) {
LOG.debug("S3A Streaming upload output stream closed without uploading any data.");
return;
}
try {
if (mFile != null) {
mLocalOutputStream.close();
int partNumber = mPartNumber.getAndIncrement();
final UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(mBucketName).withKey(mKey).withUploadId(mUploadId).withPartNumber(partNumber).withFile(mFile).withPartSize(mFile.length());
uploadRequest.setLastPart(true);
execUpload(uploadRequest);
}
waitForAllPartsUpload();
completeMultiPartUpload();
} catch (Exception e) {
LOG.error("Failed to upload {}", mKey, e);
throw new IOException(e);
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project alluxio by Alluxio.
the class S3ALowLevelOutputStream method uploadPart.
/**
* Uploads part async.
*/
protected void uploadPart() throws IOException {
if (mFile == null) {
return;
}
mLocalOutputStream.close();
int partNumber = mPartNumber.getAndIncrement();
File newFileToUpload = new File(mFile.getPath());
mFile = null;
mLocalOutputStream = null;
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(mBucketName).withKey(mKey).withUploadId(mUploadId).withPartNumber(partNumber).withFile(newFileToUpload).withPartSize(newFileToUpload.length());
execUpload(uploadRequest);
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project crate by crate.
the class S3BlobStoreContainerTests method testExecuteMultipartUploadAborted.
@Test
public void testExecuteMultipartUploadAborted() {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
final long blobSize = ByteSizeUnit.MB.toBytes(765);
final long bufferSize = ByteSizeUnit.MB.toBytes(150);
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values()));
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
doAnswer(invocation -> {
clientReference.incRef();
return clientReference;
}).when(blobStore).clientReference();
final String uploadId = randomAlphaOfLength(25);
final int stage = randomInt(2);
final List<AmazonClientException> exceptions = Arrays.asList(new AmazonClientException("Expected initialization request to fail"), new AmazonClientException("Expected upload part request to fail"), new AmazonClientException("Expected completion request to fail"));
if (stage == 0) {
// Fail the initialization request
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
} else if (stage == 1) {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
// Fail the upload part request
when(client.uploadPart(any(UploadPartRequest.class))).thenThrow(exceptions.get(stage));
} else {
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(uploadId);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
when(client.uploadPart(any(UploadPartRequest.class))).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(randomAlphaOfLength(20));
return response;
});
// Fail the completion request
when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenThrow(exceptions.get(stage));
}
final ArgumentCaptor<AbortMultipartUploadRequest> argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class);
doNothing().when(client).abortMultipartUpload(argumentCaptor.capture());
final IOException e = expectThrows(IOException.class, () -> {
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize);
});
assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage());
assertThat(e.getCause(), instanceOf(AmazonClientException.class));
assertEquals(exceptions.get(stage).getMessage(), e.getCause().getMessage());
if (stage == 0) {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
verify(client, times(0)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
} else {
verify(client, times(1)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
if (stage == 1) {
verify(client, times(1)).uploadPart(any(UploadPartRequest.class));
verify(client, times(0)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
} else {
verify(client, times(6)).uploadPart(any(UploadPartRequest.class));
verify(client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
}
verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class));
final AbortMultipartUploadRequest abortRequest = argumentCaptor.getValue();
assertEquals(bucketName, abortRequest.getBucketName());
assertEquals(blobName, abortRequest.getKey());
assertEquals(uploadId, abortRequest.getUploadId());
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project crate by crate.
the class S3BlobStoreContainerTests method testExecuteMultipartUpload.
@Test
public void testExecuteMultipartUpload() throws IOException {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
if (randomBoolean()) {
IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value));
}
final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(1, 128));
final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024));
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize);
final boolean serverSideEncryption = randomBoolean();
when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption);
final StorageClass storageClass = randomFrom(StorageClass.values());
when(blobStore.getStorageClass()).thenReturn(storageClass);
final CannedAccessControlList cannedAccessControlList = randomBoolean() ? randomFrom(CannedAccessControlList.values()) : null;
if (cannedAccessControlList != null) {
when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList);
}
final AmazonS3 client = mock(AmazonS3.class);
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
when(blobStore.clientReference()).thenReturn(clientReference);
final ArgumentCaptor<InitiateMultipartUploadRequest> initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class);
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
initResult.setUploadId(randomAlphaOfLength(10));
when(client.initiateMultipartUpload(initArgCaptor.capture())).thenReturn(initResult);
final ArgumentCaptor<UploadPartRequest> uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class);
final List<String> expectedEtags = new ArrayList<>();
final long partSize = Math.min(bufferSize, blobSize);
long totalBytes = 0;
do {
expectedEtags.add(randomAlphaOfLength(50));
totalBytes += partSize;
} while (totalBytes < blobSize);
when(client.uploadPart(uploadArgCaptor.capture())).thenAnswer(invocationOnMock -> {
final UploadPartRequest request = (UploadPartRequest) invocationOnMock.getArguments()[0];
final UploadPartResult response = new UploadPartResult();
response.setPartNumber(request.getPartNumber());
response.setETag(expectedEtags.get(request.getPartNumber() - 1));
return response;
});
final ArgumentCaptor<CompleteMultipartUploadRequest> compArgCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class);
when(client.completeMultipartUpload(compArgCaptor.capture())).thenReturn(new CompleteMultipartUploadResult());
final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]);
final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
blobContainer.executeMultipartUpload(blobStore, blobName, inputStream, blobSize);
final InitiateMultipartUploadRequest initRequest = initArgCaptor.getValue();
assertEquals(bucketName, initRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, initRequest.getKey());
assertEquals(storageClass, initRequest.getStorageClass());
assertEquals(cannedAccessControlList, initRequest.getCannedACL());
if (serverSideEncryption) {
assertEquals(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION, initRequest.getObjectMetadata().getSSEAlgorithm());
}
final Tuple<Long, Long> numberOfParts = S3BlobContainer.numberOfMultiparts(blobSize, bufferSize);
final List<UploadPartRequest> uploadRequests = uploadArgCaptor.getAllValues();
assertEquals(numberOfParts.v1().intValue(), uploadRequests.size());
for (int i = 0; i < uploadRequests.size(); i++) {
final UploadPartRequest uploadRequest = uploadRequests.get(i);
assertEquals(bucketName, uploadRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey());
assertEquals(initResult.getUploadId(), uploadRequest.getUploadId());
assertEquals(i + 1, uploadRequest.getPartNumber());
assertEquals(inputStream, uploadRequest.getInputStream());
if (i == (uploadRequests.size() - 1)) {
assertTrue(uploadRequest.isLastPart());
assertEquals(numberOfParts.v2().longValue(), uploadRequest.getPartSize());
} else {
assertFalse(uploadRequest.isLastPart());
assertEquals(bufferSize, uploadRequest.getPartSize());
}
}
final CompleteMultipartUploadRequest compRequest = compArgCaptor.getValue();
assertEquals(bucketName, compRequest.getBucketName());
assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey());
assertEquals(initResult.getUploadId(), compRequest.getUploadId());
final List<String> actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList());
assertEquals(expectedEtags, actualETags);
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project beam by apache.
the class S3WritableByteChannelTest method write.
private void write(S3Client mockS3Client, Supplier channelSupplier, S3ResourceId path, String sseAlgorithmStr, String sseCustomerKeyMd5, String ssekmsKeyId, long s3UploadBufferSizeBytes, boolean bucketKeyEnabled, boolean writeReadOnlyBuffer) throws IOException {
CreateMultipartUploadResponse.Builder builder = CreateMultipartUploadResponse.builder().uploadId("upload-id");
ServerSideEncryption sseAlgorithm = ServerSideEncryption.fromValue(sseAlgorithmStr);
if (sseAlgorithm != null) {
builder.serverSideEncryption(sseAlgorithm);
}
if (sseCustomerKeyMd5 != null) {
builder.sseCustomerKeyMD5(sseCustomerKeyMd5);
}
if (ssekmsKeyId != null) {
sseAlgorithm = ServerSideEncryption.AWS_KMS;
builder.serverSideEncryption(sseAlgorithm);
}
builder.bucketKeyEnabled(bucketKeyEnabled);
CreateMultipartUploadResponse createMultipartUploadResponse = builder.build();
doReturn(createMultipartUploadResponse).when(mockS3Client).createMultipartUpload(any(CreateMultipartUploadRequest.class));
CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder().bucket(path.getBucket()).key(path.getKey()).build();
CreateMultipartUploadResponse mockCreateMultipartUploadResponse1 = mockS3Client.createMultipartUpload(createMultipartUploadRequest);
assertEquals(sseAlgorithm, mockCreateMultipartUploadResponse1.serverSideEncryption());
assertEquals(sseCustomerKeyMd5, mockCreateMultipartUploadResponse1.sseCustomerKeyMD5());
assertEquals(bucketKeyEnabled, mockCreateMultipartUploadResponse1.bucketKeyEnabled());
UploadPartResponse.Builder uploadPartResponseBuilder = UploadPartResponse.builder().eTag("etag");
if (sseCustomerKeyMd5 != null) {
uploadPartResponseBuilder.sseCustomerKeyMD5(sseCustomerKeyMd5);
}
UploadPartResponse response = uploadPartResponseBuilder.build();
doReturn(response).when(mockS3Client).uploadPart(any(UploadPartRequest.class), any(RequestBody.class));
UploadPartResponse mockUploadPartResult = mockS3Client.uploadPart(UploadPartRequest.builder().build(), RequestBody.empty());
assertEquals(sseCustomerKeyMd5, mockUploadPartResult.sseCustomerKeyMD5());
S3WritableByteChannel channel = channelSupplier.get();
int contentSize = 34_078_720;
ByteBuffer uploadContent = ByteBuffer.allocate((int) (contentSize * 2.5));
for (int i = 0; i < contentSize; i++) {
uploadContent.put((byte) 0xff);
}
uploadContent.flip();
int uploadedSize = channel.write(writeReadOnlyBuffer ? uploadContent.asReadOnlyBuffer() : uploadContent);
assertEquals(contentSize, uploadedSize);
CompleteMultipartUploadResponse completeMultipartUploadResponse = CompleteMultipartUploadResponse.builder().build();
doReturn(completeMultipartUploadResponse).when(mockS3Client).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
channel.close();
int partQuantity = (int) Math.ceil((double) contentSize / s3UploadBufferSizeBytes) + 1;
verify(mockS3Client, times(2)).createMultipartUpload((CreateMultipartUploadRequest) isNotNull());
verify(mockS3Client, times(partQuantity)).uploadPart((UploadPartRequest) isNotNull(), any(RequestBody.class));
verify(mockS3Client, times(1)).completeMultipartUpload((CompleteMultipartUploadRequest) notNull());
verifyNoMoreInteractions(mockS3Client);
}
Aggregations