use of com.amazonaws.services.s3.transfer.Transfer in project herd by FINRAOS.
the class S3DaoImplTest method testDeleteDirectoryMultiObjectDeleteException.
@Test
public void testDeleteDirectoryMultiObjectDeleteException() {
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME);
s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX);
// Create a retry policy.
RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true);
// Create an S3 version summary.
S3VersionSummary s3VersionSummary = new S3VersionSummary();
s3VersionSummary.setKey(S3_KEY);
s3VersionSummary.setVersionId(S3_VERSION_ID);
// Create a version listing.
VersionListing versionListing = new VersionListing();
versionListing.setVersionSummaries(Arrays.asList(s3VersionSummary));
// Create a delete error.
MultiObjectDeleteException.DeleteError deleteError = new MultiObjectDeleteException.DeleteError();
deleteError.setKey(S3_KEY);
deleteError.setVersionId(S3_VERSION_ID);
deleteError.setCode(ERROR_CODE);
deleteError.setMessage(ERROR_MESSAGE);
// Create a multi object delete exception.
MultiObjectDeleteException multiObjectDeleteException = new MultiObjectDeleteException(Arrays.asList(deleteError), new ArrayList<>());
// Mock the external calls.
when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy);
when(s3Operations.listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class))).thenReturn(versionListing);
when(s3Operations.deleteObjects(any(DeleteObjectsRequest.class), any(AmazonS3Client.class))).thenThrow(multiObjectDeleteException);
// Try to call the method under test.
try {
s3DaoImpl.deleteDirectory(s3FileTransferRequestParamsDto);
} catch (IllegalStateException e) {
assertEquals(String.format("Failed to delete keys/key versions with prefix \"%s\" from bucket \"%s\". " + "Reason: One or more objects could not be deleted (Service: null; Status Code: 0; Error Code: null; Request ID: null; S3 Extended Request ID: null)", S3_KEY_PREFIX, S3_BUCKET_NAME), e.getMessage());
}
// Verify the external calls.
verify(retryPolicyFactory, times(2)).getRetryPolicy();
verify(s3Operations).listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class));
verify(s3Operations).deleteObjects(any(DeleteObjectsRequest.class), any(AmazonS3Client.class));
verifyNoMoreInteractionsHelper();
}
use of com.amazonaws.services.s3.transfer.Transfer in project herd by FINRAOS.
the class S3DaoImpl method performTransfer.
/**
* Performs a file/directory transfer.
*
* @param params the parameters.
* @param transferer a transferer that knows how to perform the transfer.
*
* @return the results.
* @throws InterruptedException if a problem is encountered.
*/
private S3FileTransferResultsDto performTransfer(final S3FileTransferRequestParamsDto params, Transferer transferer) throws InterruptedException {
// Create a transfer manager.
TransferManager transferManager = getTransferManager(params);
try {
// Start a stop watch to keep track of how long the transfer takes.
StopWatch stopWatch = new StopWatch();
stopWatch.start();
// Perform the transfer.
Transfer transfer = transferer.performTransfer(transferManager);
TransferProgress transferProgress = transfer.getProgress();
logTransferProgress(transferProgress);
long stepCount = 0;
// Loop until the transfer is complete.
do {
Thread.sleep(sleepIntervalsMillis);
stepCount++;
// Log progress status every 30 seconds and when transfer is complete.
if (transfer.isDone() || stepCount % 300 == 0) {
logTransferProgress(transferProgress);
}
} while (!transfer.isDone());
// Stop the stop watch and create a results object.
stopWatch.stop();
// If the transfer failed, throw the underlying AWS exception if we can determine one. Otherwise, throw our own exception.
TransferState transferState = transfer.getState();
if (transferState == TransferState.Failed) {
// The waitForException method should return the underlying AWS exception since the state is "Failed". It should not block since the
// transfer is already "done" per previous code checking "isDone".
AmazonClientException amazonClientException = transfer.waitForException();
// This is unlikely since the transfer failed, but it's better to handle the possibility just in case.
if (amazonClientException == null) {
throw new IllegalStateException("The transfer operation \"" + transfer.getDescription() + "\" failed for an unknown reason.");
}
// Throw the Amazon underlying exception.
throw amazonClientException;
} else // Ensure the transfer completed. If not, throw an exception.
if (transferState != TransferState.Completed) {
throw new IllegalStateException("The transfer operation \"" + transfer.getDescription() + "\" did not complete successfully. Current state: \"" + transferState + "\".");
}
// TransferProgress.getBytesTransferred() are not populated for S3 Copy objects.
if (!(transfer instanceof Copy)) {
// Sanity check for the number of bytes transferred.
Assert.isTrue(transferProgress.getBytesTransferred() >= transferProgress.getTotalBytesToTransfer(), String.format("Actual number of bytes transferred is less than expected (actual: %d bytes; expected: %d bytes).", transferProgress.getBytesTransferred(), transferProgress.getTotalBytesToTransfer()));
}
// Create the results object and populate it with the standard data.
S3FileTransferResultsDto results = new S3FileTransferResultsDto();
results.setDurationMillis(stopWatch.getTime());
results.setTotalBytesTransferred(transfer.getProgress().getBytesTransferred());
results.setTotalFilesTransferred(1L);
if (transfer instanceof MultipleFileUpload) {
// For upload directory, we need to calculate the total number of files transferred differently.
results.setTotalFilesTransferred((long) ((MultipleFileUpload) transfer).getSubTransfers().size());
} else if (transfer instanceof MultipleFileDownload) {
// For download directory, we need to calculate the total number of files differently.
results.setTotalFilesTransferred((long) listDirectory(params).size());
}
// Return the results.
return results;
} finally {
// Shutdown the transfer manager to release resources. If this isn't done, the JVM may delay upon exiting.
transferManager.shutdownNow();
}
}
use of com.amazonaws.services.s3.transfer.Transfer in project herd by FINRAOS.
the class S3DaoImpl method copyFile.
@Override
public S3FileTransferResultsDto copyFile(final S3FileCopyRequestParamsDto params) throws InterruptedException {
LOGGER.info("Copying S3 object... sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\"", params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(), params.getTargetBucketName());
// Perform the copy.
S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
@Override
public Transfer performTransfer(TransferManager transferManager) {
// Create a copy request.
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(params.getSourceBucketName(), params.getSourceObjectKey(), params.getTargetBucketName(), params.getTargetObjectKey());
// If KMS Key ID is specified, set the AWS Key Management System parameters to be used to encrypt the object.
if (StringUtils.isNotBlank(params.getKmsKeyId())) {
copyObjectRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(params.getKmsKeyId()));
} else // Otherwise, specify the server-side encryption algorithm for encrypting the object using AWS-managed keys.
{
ObjectMetadata metadata = new ObjectMetadata();
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
copyObjectRequest.setNewObjectMetadata(metadata);
}
return s3Operations.copyFile(copyObjectRequest, transferManager);
}
});
LOGGER.info("Copied S3 object. sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\" " + "totalBytesTransferred={} transferDuration=\"{}\"", params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(), params.getTargetBucketName(), results.getTotalBytesTransferred(), HerdDateUtils.formatDuration(results.getDurationMillis()));
logOverallTransferRate(results);
return results;
}
use of com.amazonaws.services.s3.transfer.Transfer in project herd by FINRAOS.
the class S3DaoTest method testCopyFileInvalidKmsIdIllegalStateException.
/**
* Test S3 file copy with an invalid KMS Id that throws an IllegalStateException because no AmazonServiceException was found.
*/
@Test
public void testCopyFileInvalidKmsIdIllegalStateException() throws InterruptedException {
// Put a 1 byte file in S3.
s3Operations.putObject(new PutObjectRequest(storageDaoTestHelper.getS3LoadingDockBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), null), null);
try {
S3FileCopyRequestParamsDto transferDto = new S3FileCopyRequestParamsDto();
transferDto.setSourceBucketName(storageDaoTestHelper.getS3LoadingDockBucketName());
transferDto.setTargetBucketName(storageDaoTestHelper.getS3ExternalBucketName());
transferDto.setSourceObjectKey(TARGET_S3_KEY);
transferDto.setTargetObjectKey(TARGET_S3_KEY);
transferDto.setKmsKeyId(MockS3OperationsImpl.MOCK_KMS_ID_FAILED_TRANSFER_NO_EXCEPTION);
s3Dao.copyFile(transferDto);
fail("An IllegalStateException was expected but not thrown.");
} catch (IllegalStateException ex) {
assertEquals("Invalid IllegalStateException message returned.", "The transfer operation \"" + MockS3OperationsImpl.MOCK_TRANSFER_DESCRIPTION + "\" failed for an unknown reason.", ex.getMessage());
}
}
use of com.amazonaws.services.s3.transfer.Transfer in project herd by FINRAOS.
the class S3DaoTest method testCopyFileInvalidKmsIdCancelled.
/**
* Test S3 file copy with an invalid KMS Id that will result in a cancelled transfer.
*/
@Test
public void testCopyFileInvalidKmsIdCancelled() throws InterruptedException {
// Put a 1 byte file in S3.
s3Operations.putObject(new PutObjectRequest(storageDaoTestHelper.getS3LoadingDockBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), null), null);
try {
S3FileCopyRequestParamsDto transferDto = new S3FileCopyRequestParamsDto();
transferDto.setSourceBucketName(storageDaoTestHelper.getS3LoadingDockBucketName());
transferDto.setTargetBucketName(storageDaoTestHelper.getS3ExternalBucketName());
transferDto.setSourceObjectKey(TARGET_S3_KEY);
transferDto.setTargetObjectKey(TARGET_S3_KEY);
transferDto.setKmsKeyId(MockS3OperationsImpl.MOCK_KMS_ID_CANCELED_TRANSFER);
s3Dao.copyFile(transferDto);
fail("An IllegalStateException was expected but not thrown.");
} catch (IllegalStateException ex) {
assertEquals("Invalid IllegalStateException message returned.", "The transfer operation \"" + MockS3OperationsImpl.MOCK_TRANSFER_DESCRIPTION + "\" did not complete successfully. " + "Current state: \"" + Transfer.TransferState.Canceled + "\".", ex.getMessage());
}
}
Aggregations