use of org.finra.herd.model.dto.S3FileTransferResultsDto in project herd by FINRAOS.
the class S3DaoImpl method downloadDirectory.
@Override
public S3FileTransferResultsDto downloadDirectory(final S3FileTransferRequestParamsDto params) throws InterruptedException {
LOGGER.info("Downloading S3 directory to the local system... s3KeyPrefix=\"{}\" s3BucketName=\"{}\" localDirectory=\"{}\"", params.getS3KeyPrefix(), params.getS3BucketName(), params.getLocalPath());
// Note that the directory download always recursively copies sub-directories.
// To not recurse, we would have to list the files on S3 (AmazonS3Client.html#listObjects) and manually copy them one at a time.
// Perform the transfer.
S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
@Override
public Transfer performTransfer(TransferManager transferManager) {
return s3Operations.downloadDirectory(params.getS3BucketName(), params.getS3KeyPrefix(), new File(params.getLocalPath()), transferManager);
}
});
LOGGER.info("Downloaded S3 directory to the local system. " + "s3KeyPrefix=\"{}\" s3BucketName=\"{}\" localDirectory=\"{}\" s3KeyCount={} totalBytesTransferred={} transferDuration=\"{}\"", params.getS3KeyPrefix(), params.getS3BucketName(), params.getLocalPath(), results.getTotalFilesTransferred(), results.getTotalBytesTransferred(), HerdDateUtils.formatDuration(results.getDurationMillis()));
logOverallTransferRate(results);
return results;
}
use of org.finra.herd.model.dto.S3FileTransferResultsDto in project herd by FINRAOS.
the class S3DaoImpl method performTransfer.
/**
* Performs a file/directory transfer.
*
* @param params the parameters.
* @param transferer a transferer that knows how to perform the transfer.
*
* @return the results.
* @throws InterruptedException if a problem is encountered.
*/
private S3FileTransferResultsDto performTransfer(final S3FileTransferRequestParamsDto params, Transferer transferer) throws InterruptedException {
// Create a transfer manager.
TransferManager transferManager = getTransferManager(params);
try {
// Start a stop watch to keep track of how long the transfer takes.
StopWatch stopWatch = new StopWatch();
stopWatch.start();
// Perform the transfer.
Transfer transfer = transferer.performTransfer(transferManager);
TransferProgress transferProgress = transfer.getProgress();
logTransferProgress(transferProgress);
long stepCount = 0;
// Loop until the transfer is complete.
do {
Thread.sleep(sleepIntervalsMillis);
stepCount++;
// Log progress status every 30 seconds and when transfer is complete.
if (transfer.isDone() || stepCount % 300 == 0) {
logTransferProgress(transferProgress);
}
} while (!transfer.isDone());
// Stop the stop watch and create a results object.
stopWatch.stop();
// If the transfer failed, throw the underlying AWS exception if we can determine one. Otherwise, throw our own exception.
TransferState transferState = transfer.getState();
if (transferState == TransferState.Failed) {
// The waitForException method should return the underlying AWS exception since the state is "Failed". It should not block since the
// transfer is already "done" per previous code checking "isDone".
AmazonClientException amazonClientException = transfer.waitForException();
// This is unlikely since the transfer failed, but it's better to handle the possibility just in case.
if (amazonClientException == null) {
throw new IllegalStateException("The transfer operation \"" + transfer.getDescription() + "\" failed for an unknown reason.");
}
// Throw the Amazon underlying exception.
throw amazonClientException;
} else // Ensure the transfer completed. If not, throw an exception.
if (transferState != TransferState.Completed) {
throw new IllegalStateException("The transfer operation \"" + transfer.getDescription() + "\" did not complete successfully. Current state: \"" + transferState + "\".");
}
// TransferProgress.getBytesTransferred() are not populated for S3 Copy objects.
if (!(transfer instanceof Copy)) {
// Sanity check for the number of bytes transferred.
Assert.isTrue(transferProgress.getBytesTransferred() >= transferProgress.getTotalBytesToTransfer(), String.format("Actual number of bytes transferred is less than expected (actual: %d bytes; expected: %d bytes).", transferProgress.getBytesTransferred(), transferProgress.getTotalBytesToTransfer()));
}
// Create the results object and populate it with the standard data.
S3FileTransferResultsDto results = new S3FileTransferResultsDto();
results.setDurationMillis(stopWatch.getTime());
results.setTotalBytesTransferred(transfer.getProgress().getBytesTransferred());
results.setTotalFilesTransferred(1L);
if (transfer instanceof MultipleFileUpload) {
// For upload directory, we need to calculate the total number of files transferred differently.
results.setTotalFilesTransferred((long) ((MultipleFileUpload) transfer).getSubTransfers().size());
} else if (transfer instanceof MultipleFileDownload) {
// For download directory, we need to calculate the total number of files differently.
results.setTotalFilesTransferred((long) listDirectory(params).size());
}
// Return the results.
return results;
} finally {
// Shutdown the transfer manager to release resources. If this isn't done, the JVM may delay upon exiting.
transferManager.shutdownNow();
}
}
use of org.finra.herd.model.dto.S3FileTransferResultsDto in project herd by FINRAOS.
the class S3DaoImpl method copyFile.
@Override
public S3FileTransferResultsDto copyFile(final S3FileCopyRequestParamsDto params) throws InterruptedException {
LOGGER.info("Copying S3 object... sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\"", params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(), params.getTargetBucketName());
// Perform the copy.
S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
@Override
public Transfer performTransfer(TransferManager transferManager) {
// Create a copy request.
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(params.getSourceBucketName(), params.getSourceObjectKey(), params.getTargetBucketName(), params.getTargetObjectKey());
// If KMS Key ID is specified, set the AWS Key Management System parameters to be used to encrypt the object.
if (StringUtils.isNotBlank(params.getKmsKeyId())) {
copyObjectRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(params.getKmsKeyId()));
} else // Otherwise, specify the server-side encryption algorithm for encrypting the object using AWS-managed keys.
{
ObjectMetadata metadata = new ObjectMetadata();
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
copyObjectRequest.setNewObjectMetadata(metadata);
}
return s3Operations.copyFile(copyObjectRequest, transferManager);
}
});
LOGGER.info("Copied S3 object. sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\" " + "totalBytesTransferred={} transferDuration=\"{}\"", params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(), params.getTargetBucketName(), results.getTotalBytesTransferred(), HerdDateUtils.formatDuration(results.getDurationMillis()));
logOverallTransferRate(results);
return results;
}
use of org.finra.herd.model.dto.S3FileTransferResultsDto in project herd by FINRAOS.
the class S3DaoTest method testCopyFile.
/**
* Test S3 file copy without any errors.
*/
@Test
public void testCopyFile() throws InterruptedException {
// Put a 1 byte file in S3.
s3Operations.putObject(new PutObjectRequest(storageDaoTestHelper.getS3LoadingDockBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), null), null);
S3FileCopyRequestParamsDto transferDto = new S3FileCopyRequestParamsDto();
transferDto.setSourceBucketName(storageDaoTestHelper.getS3LoadingDockBucketName());
transferDto.setTargetBucketName(storageDaoTestHelper.getS3ExternalBucketName());
transferDto.setSourceObjectKey(TARGET_S3_KEY);
transferDto.setTargetObjectKey(TARGET_S3_KEY);
transferDto.setKmsKeyId(MockS3OperationsImpl.MOCK_KMS_ID);
S3FileTransferResultsDto resultsDto = s3Dao.copyFile(transferDto);
assertEquals(Long.valueOf(1L), resultsDto.getTotalFilesTransferred());
}
use of org.finra.herd.model.dto.S3FileTransferResultsDto in project herd by FINRAOS.
the class S3DaoTest method testUploadFileWithMaxThreadsSet.
/**
* Test that we are able to perform the uploadFile S3Dao operation with the specified maximum number of threads set to some value.
*/
@Test
public void testUploadFileWithMaxThreadsSet() throws IOException, InterruptedException {
// Create local test file.
File targetFile = createLocalFile(localTempPath.toString(), LOCAL_FILE, FILE_SIZE_1_KB);
Assert.assertTrue(targetFile.isFile());
Assert.assertTrue(targetFile.length() == FILE_SIZE_1_KB);
// Upload test file to s3Dao.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = s3DaoTestHelper.getTestS3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3KeyPrefix(TARGET_S3_KEY);
s3FileTransferRequestParamsDto.setLocalPath(targetFile.getPath());
s3FileTransferRequestParamsDto.setMaxThreads(3);
S3FileTransferResultsDto results = s3Dao.uploadFile(s3FileTransferRequestParamsDto);
// Validate results.
Assert.assertTrue(results.getTotalFilesTransferred() == 1L);
// Validate the file upload.
s3DaoTestHelper.validateS3FileUpload(s3FileTransferRequestParamsDto, Arrays.asList(TARGET_S3_KEY));
}
Aggregations