use of com.amazonaws.services.s3.model.UploadPartResult in project bender by Nextdoor.
the class S3TransporterTest method testCompressedBuffer.
@Test
public void testCompressedBuffer() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
* InputStream and makes it unavailable for reading.
*/
ByteArrayOutputStream captured = new ByteArrayOutputStream();
Answer answer = new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
captured.write(req.getInputStream());
return new UploadPartResult();
}
};
Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
buffer.close();
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
assertEquals(40, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
/*
* Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
* and verified.
*/
byte[] actualBytes = captured.toByteArray();
byte[] expectedBytes = { 66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0, -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54 };
assertArrayEquals(expectedBytes, actualBytes);
}
use of com.amazonaws.services.s3.model.UploadPartResult in project aws-doc-sdk-examples by awsdocs.
the class LowLevelMultipartUpload method main.
public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
String keyName = "*** Key name ***";
String filePath = "*** Path to file to upload ***";
File file = new File(filePath);
long contentLength = file.length();
// Set part size to 5 MB.
long partSize = 5 * 1024 * 1024;
try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withRegion(clientRegion).withCredentials(new ProfileCredentialsProvider()).build();
// Create a list of ETag objects. You retrieve ETags for each object part uploaded,
// then, after each individual part has been uploaded, pass the list of ETags to
// the request to complete the upload.
List<PartETag> partETags = new ArrayList<PartETag>();
// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
// Upload the file parts.
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be less than 5 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));
// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(keyName).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
filePosition += partSize;
}
// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
}
}
use of com.amazonaws.services.s3.model.UploadPartResult in project exhibitor by soabase.
the class S3ClientImpl method uploadPart.
@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws Exception {
RefCountedClient holder = client.get();
AmazonS3Client amazonS3Client = holder.useClient();
try {
return amazonS3Client.uploadPart(request);
} finally {
holder.release();
}
}
use of com.amazonaws.services.s3.model.UploadPartResult in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
use of com.amazonaws.services.s3.model.UploadPartResult in project alluxio by Alluxio.
the class S3ALowLevelOutputStreamTest method mockS3ClientAndExecutor.
/**
* Mocks the S3 client and executor.
*/
private void mockS3ClientAndExecutor() throws Exception {
mMockS3Client = PowerMockito.mock(AmazonS3.class);
InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
when(mMockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
initResult.setUploadId(UPLOAD_ID);
when(mMockS3Client.uploadPart(any(UploadPartRequest.class))).thenAnswer((InvocationOnMock invocation) -> {
Object[] args = invocation.getArguments();
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setPartNumber(((UploadPartRequest) args[0]).getPartNumber());
return uploadResult;
});
when(mMockS3Client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(new CompleteMultipartUploadResult());
mMockTag = (ListenableFuture<PartETag>) PowerMockito.mock(ListenableFuture.class);
when(mMockTag.get()).thenReturn(new PartETag(1, "someTag"));
mMockExecutor = Mockito.mock(ListeningExecutorService.class);
when(mMockExecutor.submit(any(Callable.class))).thenReturn(mMockTag);
}
Aggregations