use of com.amazonaws.services.s3.model.CompleteMultipartUploadRequest in project aws-doc-sdk-examples by awsdocs.
the class LowLevelMultipartUpload method main.
public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
String keyName = "*** Key name ***";
String filePath = "*** Path to file to upload ***";
File file = new File(filePath);
long contentLength = file.length();
// Set part size to 5 MB.
long partSize = 5 * 1024 * 1024;
try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withRegion(clientRegion).withCredentials(new ProfileCredentialsProvider()).build();
// Create a list of ETag objects. You retrieve ETags for each object part uploaded,
// then, after each individual part has been uploaded, pass the list of ETags to
// the request to complete the upload.
List<PartETag> partETags = new ArrayList<PartETag>();
// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
// Upload the file parts.
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be less than 5 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));
// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(keyName).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
filePosition += partSize;
}
// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
}
}
use of com.amazonaws.services.s3.model.CompleteMultipartUploadRequest in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
use of com.amazonaws.services.s3.model.CompleteMultipartUploadRequest in project alluxio by Alluxio.
the class S3ALowLevelOutputStream method completeMultiPartUpload.
/**
* Completes multipart upload.
*/
protected void completeMultiPartUpload(AmazonS3 s3Client, String uploadId) throws IOException {
AmazonClientException lastException;
CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(mBucketName, mKey, uploadId, mTags);
do {
try {
s3Client.completeMultipartUpload(completeRequest);
LOG.debug("Completed multipart upload for key {} and id '{}' with {} partitions.", mKey, uploadId, mTags.size());
return;
} catch (AmazonClientException e) {
lastException = e;
}
} while (mRetryPolicy.attempt());
// than the allowed retry count
throw new IOException("Unable to complete multipart upload with id '" + uploadId + "' to " + mKey, lastException);
}
use of com.amazonaws.services.s3.model.CompleteMultipartUploadRequest in project gradle by gradle.
the class S3Client method putMultiPartObject.
private void putMultiPartObject(InputStream inputStream, Long contentLength, URI destination) {
try {
S3RegionalResource s3RegionalResource = new S3RegionalResource(destination);
String bucketName = s3RegionalResource.getBucketName();
String s3BucketKey = s3RegionalResource.getKey();
configureClient(s3RegionalResource);
List<PartETag> partETags = new ArrayList<>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, s3BucketKey).withCannedACL(CannedAccessControlList.BucketOwnerFullControl);
InitiateMultipartUploadResult initResponse = amazonS3Client.initiateMultipartUpload(initRequest);
try {
long filePosition = 0;
long partSize = s3ConnectionProperties.getPartSize();
LOGGER.debug("Attempting to put resource:[{}] into s3 bucket [{}]", s3BucketKey, bucketName);
for (int partNumber = 1; filePosition < contentLength; partNumber++) {
partSize = Math.min(partSize, contentLength - filePosition);
UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucketName).withKey(s3BucketKey).withUploadId(initResponse.getUploadId()).withPartNumber(partNumber).withPartSize(partSize).withInputStream(inputStream);
partETags.add(amazonS3Client.uploadPart(uploadPartRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, s3BucketKey, initResponse.getUploadId(), partETags);
amazonS3Client.completeMultipartUpload(completeRequest);
} catch (AmazonClientException e) {
amazonS3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, s3BucketKey, initResponse.getUploadId()));
throw e;
}
} catch (AmazonClientException e) {
throw ResourceExceptions.putFailed(destination, e);
}
}
use of com.amazonaws.services.s3.model.CompleteMultipartUploadRequest in project hippo by NHS-digital-website.
the class S3SdkConnectorTest method uploadsFileAsS3Resource_usingMultipartRequest.
@Test
public void uploadsFileAsS3Resource_usingMultipartRequest() throws Exception {
// given
final String contentType = newRandomString();
final String uploadId = newRandomString();
final String s3ObjectUrl = "https://" + bucketName + "/" + objectKey;
final byte[] fullChunk = newRandomByteArray(BUFFER_SIZE);
final byte[] partialChunk = newRandomByteArray(1024);
final byte[][] expectedChunks = { fullChunk, partialChunk };
final long contentLength = fullChunk.length + partialChunk.length;
given(s3ObjectKeyGenerator.generateObjectKey(fileName)).willReturn(objectKey);
final InitiateMultipartUploadResult result = mock(InitiateMultipartUploadResult.class);
given(result.getUploadId()).willReturn(uploadId);
given(s3.initiateMultipartUpload(any())).willReturn(result);
final PartETag firstPartETag = mock(PartETag.class);
final PartETag finalPartETag = mock(PartETag.class);
final UploadPartResult uploadPartResult = mock(UploadPartResult.class);
given(uploadPartResult.getPartETag()).willReturn(firstPartETag, finalPartETag);
given(s3.uploadPart(any(UploadPartRequest.class))).willReturn(uploadPartResult);
final ObjectMetadata resultMetadata = mock(ObjectMetadata.class);
given(resultMetadata.getContentType()).willReturn(contentType);
given(resultMetadata.getContentLength()).willReturn(contentLength);
given(s3.getObjectMetadata(bucketName, objectKey)).willReturn(resultMetadata);
final InputStream uploadedFileInputStream = new InputStreamStub(expectedChunks);
// when
final S3ObjectMetadata actualMetadata = s3Connector.uploadFile(uploadedFileInputStream, fileName, contentType);
// then
// assert upload request - initiation
final ArgumentCaptor<InitiateMultipartUploadRequest> initiateRequestArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class);
then(s3).should().initiateMultipartUpload(initiateRequestArgCaptor.capture());
final InitiateMultipartUploadRequest initRequest = initiateRequestArgCaptor.getValue();
assertThat("Upload to expected S3 bucket is initiated.", initRequest.getBucketName(), is(bucketName));
assertThat("Upload is initiated with expected object key.", initRequest.getKey(), is(objectKey));
// assert upload request - both chunks (full and partial) of the upload itself
final ArgumentCaptor<UploadPartRequest> uploadPartRequestArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class);
then(s3).should(times(2)).uploadPart(uploadPartRequestArgCaptor.capture());
final List<UploadPartRequest> actualUploadRequests = uploadPartRequestArgCaptor.getAllValues();
for (int i = 0; i < expectedChunks.length; i++) {
final byte[] expectedChunk = expectedChunks[i];
final UploadPartRequest uploadPartRequest = actualUploadRequests.get(i);
assertThat("Chunk " + i + " uploaded to correct bucket", uploadPartRequest.getBucketName(), is(bucketName));
assertThat("Chunk " + i + " uploaded with correct object key", uploadPartRequest.getKey(), is(objectKey));
assertThat("Chunk " + i + " uploaded with correct upload id", uploadPartRequest.getUploadId(), is(uploadId));
assertThat("Chunk " + i + " uploaded with correct part number", uploadPartRequest.getPartNumber(), is(i + 1));
assertThat("Chunk " + i + " uploaded with correct part size", uploadPartRequest.getPartSize(), is(Long.valueOf(expectedChunk.length)));
final byte[] actualChunk = new byte[expectedChunk.length];
try (final InputStream uploadPartRequestInputStream = uploadPartRequest.getInputStream()) {
IOUtils.read(uploadPartRequestInputStream, actualChunk, 0, actualChunk.length);
}
assertThat("Chunk " + i + " uploaded with correct content", actualChunk, is(expectedChunk));
}
// assert upload request - finalisation
final ArgumentCaptor<CompleteMultipartUploadRequest> completeRequestArgCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class);
then(s3).should().completeMultipartUpload(completeRequestArgCaptor.capture());
final CompleteMultipartUploadRequest completeMultipartUploadRequest = completeRequestArgCaptor.getValue();
assertThat("Complete upload request with correct bucket name", completeMultipartUploadRequest.getBucketName(), is(bucketName));
assertThat("Complete upload request with correct object key", completeMultipartUploadRequest.getKey(), is(objectKey));
assertThat("Complete upload request with correct upload id", completeMultipartUploadRequest.getUploadId(), is(uploadId));
assertThat("Complete upload request with correct upload id", completeMultipartUploadRequest.getPartETags(), is(asList(firstPartETag, finalPartETag)));
// assert upload response metadata
then(s3).should().getObjectMetadata(bucketName, objectKey);
assertNewResourceMetadata(contentType, s3ObjectUrl, contentLength, objectKey, fileName, actualMetadata);
}
Aggregations