use of software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest in project nifi by apache.
the class PutS3Object method abortS3MultipartUpload.
protected void abortS3MultipartUpload(final AmazonS3Client s3, final String bucket, final MultipartUpload upload) {
final String uploadKey = upload.getKey();
final String uploadId = upload.getUploadId();
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucket, uploadKey, uploadId);
try {
s3.abortMultipartUpload(abortRequest);
getLogger().info("Aborting out of date multipart upload, bucket {} key {} ID {}, initiated {}", new Object[] { bucket, uploadKey, uploadId, logFormat.format(upload.getInitiated()) });
} catch (AmazonClientException ace) {
getLogger().info("Error trying to abort multipart upload from bucket {} with key {} and ID {}: {}", new Object[] { bucket, uploadKey, uploadId, ace.getMessage() });
}
}
use of software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest in project hippo by NHS-digital-website.
the class S3ConnectorImpl method uploadFile.
public S3ObjectMetadata uploadFile(InputStream fileStream, String fileName, String contentType) {
String objectKey = s3ObjectKeyGenerator.generateObjectKey(fileName);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
// initialise multipart upload
InitiateMultipartUploadResult initResult = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, objectKey, metadata));
// loop parts
List<PartETag> partETags;
try {
partETags = uploadParts(fileStream, bucketName, objectKey, initResult.getUploadId());
} catch (Exception ex) {
final String errorMessage = "Failed to upload file " + objectKey;
log.error(errorMessage, ex);
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, objectKey, initResult.getUploadId()));
throw new RuntimeException(errorMessage, ex);
}
// finalise multipart upload
s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, objectKey, initResult.getUploadId(), partETags));
// The above put request returns metadata object but it's empty,
// hence the need for a separate call to fetch actual metadata.
ObjectMetadata resultMetadata = s3.getObjectMetadata(bucketName, objectKey);
return new S3ObjectMetadataImpl(resultMetadata, bucketName, objectKey);
}
use of software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest in project aws-doc-sdk-examples by awsdocs.
the class AbortMultipartUpload method abortUploads.
// snippet-start:[s3.java2.abort_upload.main]
public static void abortUploads(S3Client s3, String bucketName, String accountId) {
try {
ListMultipartUploadsRequest listMultipartUploadsRequest = ListMultipartUploadsRequest.builder().bucket(bucketName).build();
ListMultipartUploadsResponse response = s3.listMultipartUploads(listMultipartUploadsRequest);
List<MultipartUpload> uploads = response.uploads();
AbortMultipartUploadRequest abortMultipartUploadRequest = null;
for (MultipartUpload upload : uploads) {
abortMultipartUploadRequest = AbortMultipartUploadRequest.builder().bucket(bucketName).key(upload.key()).expectedBucketOwner(accountId).uploadId(upload.uploadId()).build();
s3.abortMultipartUpload(abortMultipartUploadRequest);
}
} catch (S3Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
use of software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
use of software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest in project alluxio by Alluxio.
the class S3ALowLevelOutputStream method abortMultiPartUpload.
/**
* Aborts multipart upload.
*/
protected void abortMultiPartUpload(AmazonS3 s3Client, String uploadId) {
AmazonClientException lastException;
do {
try {
s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(mBucketName, mKey, uploadId));
LOG.warn("Aborted multipart upload for key {} and id '{}' to bucket {}", mKey, uploadId, mBucketName);
return;
} catch (AmazonClientException e) {
lastException = e;
}
} while (mRetryPolicy.attempt());
// This point is only reached if the operation failed more
// than the allowed retry count
LOG.warn("Unable to abort multipart upload for key '{}' and id '{}' to bucket {}. " + "You may need to enable the periodical cleanup by setting property {}" + "to be true.", mKey, uploadId, mBucketName, PropertyKey.UNDERFS_CLEANUP_ENABLED.getName(), lastException);
}
Aggregations