use of com.amazonaws.services.s3.model.ListMultipartUploadsRequest in project nifi by apache.
the class PutS3Object method localUploadExistsInS3.
protected boolean localUploadExistsInS3(final AmazonS3Client s3, final String bucket, final MultipartState localState) {
ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
for (MultipartUpload upload : listing.getMultipartUploads()) {
if (upload.getUploadId().equals(localState.getUploadId())) {
return true;
}
}
return false;
}
use of com.amazonaws.services.s3.model.ListMultipartUploadsRequest in project nifi by apache.
the class PutS3Object method getS3AgeoffListAndAgeoffLocalState.
protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context, final AmazonS3Client s3, final long now) {
final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions().getValue();
final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
final long ageCutoff = now - maxAge;
final List<MultipartUpload> ageoffList = new ArrayList<>();
if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
try {
ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
for (MultipartUpload upload : listing.getMultipartUploads()) {
long uploadTime = upload.getInitiated().getTime();
if (uploadTime < ageCutoff) {
ageoffList.add(upload);
}
}
// ageoff any local state
ageoffLocalState(ageCutoff);
lastS3AgeOff.set(System.currentTimeMillis());
} catch (AmazonClientException e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == 403 && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} " + "** The configured user does not have the s3:ListBucketMultipartUploads permission " + "for this bucket, S3 ageoff cannot occur without this permission. Next ageoff check " + "time is being advanced by interval to prevent checking on every upload **", new Object[] { bucket, e.getMessage() });
lastS3AgeOff.set(System.currentTimeMillis());
} else {
getLogger().error("Error checking S3 Multipart Upload list for {}: {}", new Object[] { bucket, e.getMessage() });
}
} finally {
s3BucketLock.unlock();
}
}
MultipartUploadListing result = new MultipartUploadListing();
result.setBucketName(bucket);
result.setMultipartUploads(ageoffList);
return result;
}
use of com.amazonaws.services.s3.model.ListMultipartUploadsRequest in project herd by FINRAOS.
the class S3DaoTest method testAbortMultipartUploadsAssertAbortOnlyBeforeThreshold.
@Test
public void testAbortMultipartUploadsAssertAbortOnlyBeforeThreshold() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
String s3BucketName = "s3BucketName";
String uploadKey = "uploadKey1";
String uploadId = "uploadId1";
Date uploadInitiated = new Date(0);
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
Date thresholdDate = new Date(1);
when(mockS3Operations.listMultipartUploads(any(), any())).then(new Answer<MultipartUploadListing>() {
@Override
public MultipartUploadListing answer(InvocationOnMock invocation) throws Throwable {
ListMultipartUploadsRequest listMultipartUploadsRequest = invocation.getArgument(0);
assertEquals(s3BucketName, listMultipartUploadsRequest.getBucketName());
MultipartUploadListing multipartUploadListing = new MultipartUploadListing();
{
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setUploadId(uploadId);
multipartUpload.setKey(uploadKey);
multipartUpload.setInitiated(uploadInitiated);
multipartUploadListing.getMultipartUploads().add(multipartUpload);
}
// This upload is not aborted since the initiated date is greater than the threshold
{
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setUploadId("uploadId2");
multipartUpload.setKey("uploadKey2");
multipartUpload.setInitiated(new Date(2));
multipartUploadListing.getMultipartUploads().add(multipartUpload);
}
return multipartUploadListing;
}
});
assertEquals(1, s3Dao.abortMultipartUploads(s3FileTransferRequestParamsDto, thresholdDate));
verify(mockS3Operations).listMultipartUploads(any(), any());
/*
* Assert that S3Operations.abortMultipartUpload is called exactly ONCE with arguments matching the given ArgumentMatcher
*/
verify(mockS3Operations).abortMultipartUpload(argThat(argument -> Objects.equal(s3BucketName, argument.getBucketName()) && Objects.equal(uploadKey, argument.getKey()) && Objects.equal(uploadId, argument.getUploadId())), any());
// Assert that no other interactions occur with the mock
verifyNoMoreInteractions(mockS3Operations);
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.services.s3.model.ListMultipartUploadsRequest in project herd by FINRAOS.
the class S3DaoTest method testAbortMultipartUploadsAssertTruncatedResult.
@Test
public void testAbortMultipartUploadsAssertTruncatedResult() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
String s3BucketName = "s3BucketName";
String uploadKey = "uploadKey";
String uploadId = "uploadId";
Date uploadInitiated = new Date(0);
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
Date thresholdDate = new Date(1);
when(mockS3Operations.listMultipartUploads(any(), any())).then(new Answer<MultipartUploadListing>() {
@Override
public MultipartUploadListing answer(InvocationOnMock invocation) throws Throwable {
ListMultipartUploadsRequest listMultipartUploadsRequest = invocation.getArgument(0);
String keyMarker = listMultipartUploadsRequest.getKeyMarker();
String uploadIdMarker = listMultipartUploadsRequest.getUploadIdMarker();
MultipartUploadListing multipartUploadListing = new MultipartUploadListing();
if (keyMarker == null || uploadIdMarker == null) {
multipartUploadListing.setNextKeyMarker("nextKeyMarker");
multipartUploadListing.setNextUploadIdMarker("nextUploadIdMarker");
multipartUploadListing.setTruncated(true);
} else {
assertEquals("nextKeyMarker", keyMarker);
assertEquals("nextUploadIdMarker", uploadIdMarker);
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setUploadId(uploadId);
multipartUpload.setKey(uploadKey);
multipartUpload.setInitiated(uploadInitiated);
multipartUploadListing.getMultipartUploads().add(multipartUpload);
}
return multipartUploadListing;
}
});
assertEquals(1, s3Dao.abortMultipartUploads(s3FileTransferRequestParamsDto, thresholdDate));
// Assert listMultipartUploads() is called twice due to truncation
verify(mockS3Operations, times(2)).listMultipartUploads(any(), any());
/*
* Assert that S3Operations.abortMultipartUpload is called exactly ONCE with arguments matching the given ArgumentMatcher
*/
verify(mockS3Operations).abortMultipartUpload(argThat(argument -> Objects.equal(s3BucketName, argument.getBucketName()) && Objects.equal(uploadKey, argument.getKey()) && Objects.equal(uploadId, argument.getUploadId())), any());
// Assert that no other interactions occur with the mock
verifyNoMoreInteractions(mockS3Operations);
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.services.s3.model.ListMultipartUploadsRequest in project herd by FINRAOS.
the class S3DaoImpl method abortMultipartUploads.
@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
// Create an Amazon S3 client.
AmazonS3Client s3Client = getAmazonS3(params);
int abortedMultipartUploadsCount = 0;
try {
// List upload markers. Null implies initial list request.
String uploadIdMarker = null;
String keyMarker = null;
boolean truncated;
do {
// Create the list multipart request, optionally using the last markers.
ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
request.setUploadIdMarker(uploadIdMarker);
request.setKeyMarker(keyMarker);
// Request the multipart upload listing.
MultipartUploadListing uploadListing = s3Operations.listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);
for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
if (upload.getInitiated().compareTo(thresholdDate) < 0) {
// Abort the upload.
s3Operations.abortMultipartUpload(TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(params.getS3BucketName(), upload.getKey(), upload.getUploadId())), s3Client);
// Log the information about the aborted multipart upload.
LOGGER.info("Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"", upload.getKey(), params.getS3BucketName(), upload.getInitiated());
// Increment the counter.
abortedMultipartUploadsCount++;
}
}
// Determine whether there are more uploads to list.
truncated = uploadListing.isTruncated();
if (truncated) {
// Record the list markers.
uploadIdMarker = uploadListing.getNextUploadIdMarker();
keyMarker = uploadListing.getNextKeyMarker();
}
} while (truncated);
} finally {
// Shutdown the Amazon S3 client instance to release resources.
s3Client.shutdown();
}
return abortedMultipartUploadsCount;
}
Aggregations