use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3TransporterTest method testCompressedBuffer.
@Test
public void testCompressedBuffer() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
* InputStream and makes it unavailable for reading.
*/
ByteArrayOutputStream captured = new ByteArrayOutputStream();
Answer answer = new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
captured.write(req.getInputStream());
return new UploadPartResult();
}
};
Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
buffer.close();
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
assertEquals(40, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
/*
* Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
* and verified.
*/
byte[] actualBytes = captured.toByteArray();
byte[] expectedBytes = { 66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0, -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54 };
assertArrayEquals(expectedBytes, actualBytes);
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3TransporterTest method testCompressedPartitoned.
@Test
public void testCompressedPartitoned() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
partitions.put("day", "01");
partitions.put("hour", "23");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/day=01/hour=23/a_filename.bz2", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
assertEquals(3, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3TransporterTest method testContextBasedFilename.
@Test
public void testContextBasedFilename() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requests, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
TestContext context = new TestContext();
context.setAwsRequestId("request_id");
transport.sendBatch(buffer, partitions, context);
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("basepath/request_id.bz2", argument.getValue().getKey());
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project beam by apache.
the class S3WritableByteChannel method flush.
private void flush() throws IOException {
uploadBuffer.flip();
ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array(), 0, uploadBuffer.limit());
UploadPartRequest request = UploadPartRequest.builder().bucket(path.getBucket()).key(path.getKey()).uploadId(uploadId).partNumber(partNumber++).contentLength((long) uploadBuffer.limit()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).sseCustomerKeyMD5(config.getSSECustomerKey().getMD5()).contentMD5(Base64.getEncoder().encodeToString(md5.digest())).build();
UploadPartResponse response;
try {
response = s3Client.uploadPart(request, RequestBody.fromInputStream(inputStream, request.contentLength()));
} catch (SdkClientException e) {
throw new IOException(e);
}
CompletedPart part = CompletedPart.builder().partNumber(request.partNumber()).eTag(response.eTag()).build();
uploadBuffer.clear();
md5.reset();
completedParts.add(part);
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
Aggregations