use of software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest in project hippo by NHS-digital-website.
the class S3ConnectorImpl method uploadFile.
public S3ObjectMetadata uploadFile(InputStream fileStream, String fileName, String contentType) {
String objectKey = s3ObjectKeyGenerator.generateObjectKey(fileName);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
// initialise multipart upload
InitiateMultipartUploadResult initResult = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, objectKey, metadata));
// loop parts
List<PartETag> partETags;
try {
partETags = uploadParts(fileStream, bucketName, objectKey, initResult.getUploadId());
} catch (Exception ex) {
final String errorMessage = "Failed to upload file " + objectKey;
log.error(errorMessage, ex);
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, objectKey, initResult.getUploadId()));
throw new RuntimeException(errorMessage, ex);
}
// finalise multipart upload
s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, objectKey, initResult.getUploadId(), partETags));
// The above put request returns metadata object but it's empty,
// hence the need for a separate call to fetch actual metadata.
ObjectMetadata resultMetadata = s3.getObjectMetadata(bucketName, objectKey);
return new S3ObjectMetadataImpl(resultMetadata, bucketName, objectKey);
}
use of software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest in project bender by Nextdoor.
the class S3TransportFactory method close.
@Override
public void close() {
Exception e = null;
for (MultiPartUpload upload : this.pendingMultiPartUploads.values()) {
if (e == null) {
CompleteMultipartUploadRequest req = upload.getCompleteMultipartUploadRequest();
try {
this.client.completeMultipartUpload(req);
} catch (AmazonS3Exception e1) {
logger.error("failed to complete multi-part upload for " + upload.getKey() + " parts " + upload.getPartCount(), e1);
e = e1;
}
} else {
logger.warn("aborting upload for: " + upload.getKey());
AbortMultipartUploadRequest req = upload.getAbortMultipartUploadRequest();
try {
this.client.abortMultipartUpload(req);
} catch (AmazonS3Exception e1) {
logger.error("failed to abort multi-part upload", e1);
}
}
}
this.pendingMultiPartUploads.clear();
if (e != null) {
throw new RuntimeException("failed while closing transport", e);
}
}
use of software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest in project beam by apache.
the class S3WritableByteChannel method close.
@Override
public void close() throws IOException {
open = false;
if (uploadBuffer.remaining() > 0) {
flush();
}
CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest().withBucketName(path.getBucket()).withKey(path.getKey()).withUploadId(uploadId).withPartETags(eTags);
try {
amazonS3.completeMultipartUpload(request);
} catch (AmazonClientException e) {
throw new IOException(e);
}
}
use of software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest in project beam by apache.
the class S3FileSystem method multipartCopy.
@VisibleForTesting
CompleteMultipartUploadResponse multipartCopy(S3ResourceId sourcePath, S3ResourceId destinationPath, HeadObjectResponse sourceObjectHead) throws SdkServiceException {
CreateMultipartUploadRequest initiateUploadRequest = CreateMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).storageClass(config.getS3StorageClass()).metadata(sourceObjectHead.metadata()).serverSideEncryption(config.getSSEAlgorithm()).ssekmsKeyId(config.getSSEKMSKeyId()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
CreateMultipartUploadResponse createMultipartUploadResponse = s3Client.get().createMultipartUpload(initiateUploadRequest);
final String uploadId = createMultipartUploadResponse.uploadId();
List<CompletedPart> completedParts = new ArrayList<>();
final long objectSize = sourceObjectHead.contentLength();
CopyPartResult copyPartResult;
CompletedPart completedPart;
// without using S3FileSystem.copy in the future
if (objectSize == 0) {
final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().bucket(sourcePath.getBucket()).key(sourcePath.getKey()).copySource(sourcePath.getBucket() + "/" + sourcePath.getKey()).uploadId(uploadId).partNumber(1).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).copySourceSSECustomerKey(config.getSSECustomerKey().getKey()).copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
} else {
long bytePosition = 0;
Integer uploadBufferSizeBytes = config.getS3UploadBufferSizeBytes();
// Amazon parts are 1-indexed, not zero-indexed.
for (int partNumber = 1; bytePosition < objectSize; partNumber++) {
final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().bucket(sourcePath.getBucket()).key(sourcePath.getKey()).copySource(destinationPath.getBucket() + "/" + sourcePath.getKey()).uploadId(uploadId).partNumber(partNumber).copySourceRange(String.format("bytes=%s-%s", bytePosition, Math.min(objectSize - 1, bytePosition + uploadBufferSizeBytes - 1))).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).copySourceSSECustomerKey(config.getSSECustomerKey().getKey()).copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
bytePosition += uploadBufferSizeBytes;
}
}
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder().parts(completedParts).build();
CompleteMultipartUploadRequest completeUploadRequest = CompleteMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).uploadId(uploadId).multipartUpload(completedMultipartUpload).build();
return s3Client.get().completeMultipartUpload(completeUploadRequest);
}
use of software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
Aggregations