use of com.amazonaws.services.s3.transfer.TransferManager in project herd by FINRAOS.
the class MockS3OperationsImpl method downloadDirectory.
/**
* {@inheritDoc}
* <p/>
* This implementation creates any directory that does not exist in the path to the destination directory.
*/
@Override
public MultipleFileDownload downloadDirectory(String bucketName, String keyPrefix, File destinationDirectory, TransferManager transferManager) {
LOGGER.debug("downloadDirectory(): bucketName = " + bucketName + ", keyPrefix = " + keyPrefix + ", destinationDirectory = " + destinationDirectory);
MockS3Bucket mockS3Bucket = mockS3Buckets.get(bucketName);
List<Download> downloads = new ArrayList<>();
long totalBytes = 0;
if (mockS3Bucket != null) {
for (MockS3Object mockS3Object : mockS3Bucket.getObjects().values()) {
if (mockS3Object.getKey().startsWith(keyPrefix)) {
String filePath = destinationDirectory.getAbsolutePath() + "/" + mockS3Object.getKey();
File file = new File(filePath);
// Create any directory in the path that does not exist.
file.getParentFile().mkdirs();
try (FileOutputStream fileOutputStream = new FileOutputStream(file)) {
LOGGER.debug("downloadDirectory(): Writing file " + file);
fileOutputStream.write(mockS3Object.getData());
totalBytes += mockS3Object.getData().length;
downloads.add(new DownloadImpl(null, null, null, null, null, new GetObjectRequest(bucketName, mockS3Object.getKey()), file, mockS3Object.getObjectMetadata(), false));
} catch (IOException e) {
throw new RuntimeException("Error writing to file " + file, e);
}
}
}
}
TransferProgress progress = new TransferProgress();
progress.setTotalBytesToTransfer(totalBytes);
progress.updateProgress(totalBytes);
MultipleFileDownloadImpl multipleFileDownload = new MultipleFileDownloadImpl(null, progress, null, keyPrefix, bucketName, downloads);
multipleFileDownload.setState(TransferState.Completed);
return multipleFileDownload;
}
use of com.amazonaws.services.s3.transfer.TransferManager in project herd by FINRAOS.
the class MockS3OperationsImpl method uploadFileList.
@Override
public MultipleFileUpload uploadFileList(String bucketName, String virtualDirectoryKeyPrefix, File directory, List<File> files, ObjectMetadataProvider metadataProvider, TransferManager transferManager) {
LOGGER.debug("uploadFileList(): bucketName = " + bucketName + ", virtualDirectoryKeyPrefix = " + virtualDirectoryKeyPrefix + ", directory = " + directory + ", files = " + files);
String directoryPath = directory.getAbsolutePath();
long totalFileLength = 0;
List<Upload> subTransfers = new ArrayList<>();
for (File file : files) {
// Get path to file relative to the specified directory
String relativeFilePath = file.getAbsolutePath().substring(directoryPath.length());
// Replace any backslashes (i.e. Windows separator) with a forward slash.
relativeFilePath = relativeFilePath.replace("\\", "/");
// Remove any leading slashes
relativeFilePath = relativeFilePath.replaceAll("^/+", "");
long fileLength = file.length();
// Remove any trailing slashes
virtualDirectoryKeyPrefix = virtualDirectoryKeyPrefix.replaceAll("/+$", "");
String s3ObjectKey = virtualDirectoryKeyPrefix + "/" + relativeFilePath;
totalFileLength += fileLength;
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, s3ObjectKey, file);
ObjectMetadata objectMetadata = new ObjectMetadata();
metadataProvider.provideObjectMetadata(null, objectMetadata);
putObjectRequest.setMetadata(objectMetadata);
putObject(putObjectRequest, transferManager.getAmazonS3Client());
subTransfers.add(new UploadImpl(null, null, null, null));
}
TransferProgress progress = new TransferProgress();
progress.setTotalBytesToTransfer(totalFileLength);
progress.updateProgress(totalFileLength);
MultipleFileUploadImpl multipleFileUpload = new MultipleFileUploadImpl(null, progress, null, virtualDirectoryKeyPrefix, bucketName, subTransfers);
multipleFileUpload.setState(TransferState.Completed);
return multipleFileUpload;
}
use of com.amazonaws.services.s3.transfer.TransferManager in project herd by FINRAOS.
the class S3DaoImpl method performTransfer.
/**
* Performs a file/directory transfer.
*
* @param params the parameters.
* @param transferer a transferer that knows how to perform the transfer.
*
* @return the results.
* @throws InterruptedException if a problem is encountered.
*/
private S3FileTransferResultsDto performTransfer(final S3FileTransferRequestParamsDto params, Transferer transferer) throws InterruptedException {
// Create a transfer manager.
TransferManager transferManager = getTransferManager(params);
try {
// Start a stop watch to keep track of how long the transfer takes.
StopWatch stopWatch = new StopWatch();
stopWatch.start();
// Perform the transfer.
Transfer transfer = transferer.performTransfer(transferManager);
TransferProgress transferProgress = transfer.getProgress();
logTransferProgress(transferProgress);
long stepCount = 0;
// Loop until the transfer is complete.
do {
Thread.sleep(sleepIntervalsMillis);
stepCount++;
// Log progress status every 30 seconds and when transfer is complete.
if (transfer.isDone() || stepCount % 300 == 0) {
logTransferProgress(transferProgress);
}
} while (!transfer.isDone());
// Stop the stop watch and create a results object.
stopWatch.stop();
// If the transfer failed, throw the underlying AWS exception if we can determine one. Otherwise, throw our own exception.
TransferState transferState = transfer.getState();
if (transferState == TransferState.Failed) {
// The waitForException method should return the underlying AWS exception since the state is "Failed". It should not block since the
// transfer is already "done" per previous code checking "isDone".
AmazonClientException amazonClientException = transfer.waitForException();
// This is unlikely since the transfer failed, but it's better to handle the possibility just in case.
if (amazonClientException == null) {
throw new IllegalStateException("The transfer operation \"" + transfer.getDescription() + "\" failed for an unknown reason.");
}
// Throw the Amazon underlying exception.
throw amazonClientException;
} else // Ensure the transfer completed. If not, throw an exception.
if (transferState != TransferState.Completed) {
throw new IllegalStateException("The transfer operation \"" + transfer.getDescription() + "\" did not complete successfully. Current state: \"" + transferState + "\".");
}
// TransferProgress.getBytesTransferred() are not populated for S3 Copy objects.
if (!(transfer instanceof Copy)) {
// Sanity check for the number of bytes transferred.
Assert.isTrue(transferProgress.getBytesTransferred() >= transferProgress.getTotalBytesToTransfer(), String.format("Actual number of bytes transferred is less than expected (actual: %d bytes; expected: %d bytes).", transferProgress.getBytesTransferred(), transferProgress.getTotalBytesToTransfer()));
}
// Create the results object and populate it with the standard data.
S3FileTransferResultsDto results = new S3FileTransferResultsDto();
results.setDurationMillis(stopWatch.getTime());
results.setTotalBytesTransferred(transfer.getProgress().getBytesTransferred());
results.setTotalFilesTransferred(1L);
if (transfer instanceof MultipleFileUpload) {
// For upload directory, we need to calculate the total number of files transferred differently.
results.setTotalFilesTransferred((long) ((MultipleFileUpload) transfer).getSubTransfers().size());
} else if (transfer instanceof MultipleFileDownload) {
// For download directory, we need to calculate the total number of files differently.
results.setTotalFilesTransferred((long) listDirectory(params).size());
}
// Return the results.
return results;
} finally {
// Shutdown the transfer manager to release resources. If this isn't done, the JVM may delay upon exiting.
transferManager.shutdownNow();
}
}
use of com.amazonaws.services.s3.transfer.TransferManager in project herd by FINRAOS.
the class S3DaoImpl method copyFile.
@Override
public S3FileTransferResultsDto copyFile(final S3FileCopyRequestParamsDto params) throws InterruptedException {
LOGGER.info("Copying S3 object... sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\"", params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(), params.getTargetBucketName());
// Perform the copy.
S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
@Override
public Transfer performTransfer(TransferManager transferManager) {
// Create a copy request.
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(params.getSourceBucketName(), params.getSourceObjectKey(), params.getTargetBucketName(), params.getTargetObjectKey());
// If KMS Key ID is specified, set the AWS Key Management System parameters to be used to encrypt the object.
if (StringUtils.isNotBlank(params.getKmsKeyId())) {
copyObjectRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(params.getKmsKeyId()));
} else // Otherwise, specify the server-side encryption algorithm for encrypting the object using AWS-managed keys.
{
ObjectMetadata metadata = new ObjectMetadata();
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
copyObjectRequest.setNewObjectMetadata(metadata);
}
return s3Operations.copyFile(copyObjectRequest, transferManager);
}
});
LOGGER.info("Copied S3 object. sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\" " + "totalBytesTransferred={} transferDuration=\"{}\"", params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(), params.getTargetBucketName(), results.getTotalBytesTransferred(), HerdDateUtils.formatDuration(results.getDurationMillis()));
logOverallTransferRate(results);
return results;
}
use of com.amazonaws.services.s3.transfer.TransferManager in project photon-model by vmware.
the class AWSUtils method getS3TransferManager.
/**
* Method to get an S3 transfer manager client.
*
* Note: ARN-based credentials will not work unless they have already been exchanged to
* AWS for session credentials. If unset, this method will fail. To enable ARN-based
* credentials, migrate to {@link #getS3TransferManagerAsync(AuthCredentialsServiceState,
* String, ExecutorService)}
*
* @param credentials An {@link AuthCredentialsServiceState} object.
* @param region The region to get the AWS client in.
* @param executorService The executor service to run async services in.
*/
public static TransferManager getS3TransferManager(AuthCredentialsServiceState credentials, String region, ExecutorService executorService) {
AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard().withCredentials(getAwsStaticCredentialsProvider(credentials)).withForceGlobalBucketAccessEnabled(true);
if (region == null) {
region = Regions.DEFAULT_REGION.getName();
}
if (isAwsS3Proxy()) {
AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(getAwsS3ProxyHost(), region);
amazonS3ClientBuilder.setEndpointConfiguration(endpointConfiguration);
} else {
amazonS3ClientBuilder.setRegion(region);
}
TransferManagerBuilder transferManagerBuilder = TransferManagerBuilder.standard().withS3Client(amazonS3ClientBuilder.build()).withExecutorFactory(() -> executorService).withShutDownThreadPools(false);
return transferManagerBuilder.build();
}
Aggregations