use of com.amazonaws.services.s3.AmazonS3Client in project Singularity by HubSpot.
the class SingularityMainModule method provideS3Services.
@Provides
@Singleton
public SingularityS3Services provideS3Services(Optional<S3Configuration> config) {
if (!config.isPresent()) {
return new SingularityS3Services();
}
final ImmutableList.Builder<SingularityS3Service> s3ServiceBuilder = ImmutableList.builder();
for (Map.Entry<String, S3GroupConfiguration> entry : config.get().getGroupOverrides().entrySet()) {
s3ServiceBuilder.add(new SingularityS3Service(entry.getKey(), entry.getValue().getS3Bucket(), new AmazonS3Client(new BasicAWSCredentials(entry.getValue().getS3AccessKey(), entry.getValue().getS3SecretKey()))));
}
for (Map.Entry<String, S3GroupConfiguration> entry : config.get().getGroupS3SearchConfigs().entrySet()) {
s3ServiceBuilder.add(new SingularityS3Service(entry.getKey(), entry.getValue().getS3Bucket(), new AmazonS3Client(new BasicAWSCredentials(entry.getValue().getS3AccessKey(), entry.getValue().getS3SecretKey()))));
}
SingularityS3Service defaultService = new SingularityS3Service(SingularityS3FormatHelper.DEFAULT_GROUP_NAME, config.get().getS3Bucket(), new AmazonS3Client(new BasicAWSCredentials(config.get().getS3AccessKey(), config.get().getS3SecretKey())));
return new SingularityS3Services(s3ServiceBuilder.build(), defaultService);
}
use of com.amazonaws.services.s3.AmazonS3Client in project carina by qaprosoft.
the class AmazonS3Manager method getInstance.
public static AmazonS3Manager getInstance() {
if (instance == null) {
synchronized (AmazonS3Manager.class) {
if (instance == null) {
instance = new AmazonS3Manager();
String accessKey = Configuration.get(Parameter.ACCESS_KEY_ID);
String secretKey = Configuration.get(Parameter.SECRET_KEY);
System.setProperty("aws.accessKeyId", accessKey);
System.setProperty("aws.secretKey", secretKey);
s3client = new AmazonS3Client(new SystemPropertiesCredentialsProvider());
}
}
}
return instance;
}
use of com.amazonaws.services.s3.AmazonS3Client in project herd by FINRAOS.
the class S3DaoImpl method abortMultipartUploads.
@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
// Create an Amazon S3 client.
AmazonS3Client s3Client = getAmazonS3(params);
int abortedMultipartUploadsCount = 0;
try {
// List upload markers. Null implies initial list request.
String uploadIdMarker = null;
String keyMarker = null;
boolean truncated;
do {
// Create the list multipart request, optionally using the last markers.
ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
request.setUploadIdMarker(uploadIdMarker);
request.setKeyMarker(keyMarker);
// Request the multipart upload listing.
MultipartUploadListing uploadListing = s3Operations.listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);
for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
if (upload.getInitiated().compareTo(thresholdDate) < 0) {
// Abort the upload.
s3Operations.abortMultipartUpload(TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(params.getS3BucketName(), upload.getKey(), upload.getUploadId())), s3Client);
// Log the information about the aborted multipart upload.
LOGGER.info("Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"", upload.getKey(), params.getS3BucketName(), upload.getInitiated());
// Increment the counter.
abortedMultipartUploadsCount++;
}
}
// Determine whether there are more uploads to list.
truncated = uploadListing.isTruncated();
if (truncated) {
// Record the list markers.
uploadIdMarker = uploadListing.getNextUploadIdMarker();
keyMarker = uploadListing.getNextKeyMarker();
}
} while (truncated);
} finally {
// Shutdown the Amazon S3 client instance to release resources.
s3Client.shutdown();
}
return abortedMultipartUploadsCount;
}
use of com.amazonaws.services.s3.AmazonS3Client in project herd by FINRAOS.
the class S3DaoImpl method deleteFileList.
@Override
public void deleteFileList(final S3FileTransferRequestParamsDto params) {
LOGGER.info("Deleting a list of objects from S3... s3BucketName=\"{}\" s3KeyCount={}", params.getS3BucketName(), params.getFiles().size());
try {
// In order to avoid a MalformedXML AWS exception, we send delete request only when we have any keys to delete.
if (!params.getFiles().isEmpty()) {
// Create an S3 client.
AmazonS3Client s3Client = getAmazonS3(params);
try {
// Build a list of keys to be deleted.
List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>();
for (File file : params.getFiles()) {
keys.add(new DeleteObjectsRequest.KeyVersion(file.getPath().replaceAll("\\\\", "/")));
}
// Delete the keys.
deleteKeyVersions(s3Client, params.getS3BucketName(), keys);
} finally {
s3Client.shutdown();
}
}
} catch (Exception e) {
throw new IllegalStateException(String.format("Failed to delete a list of keys from bucket \"%s\". Reason: %s", params.getS3BucketName(), e.getMessage()), e);
}
}
use of com.amazonaws.services.s3.AmazonS3Client in project herd by FINRAOS.
the class S3DaoImpl method validateGlacierS3FilesRestored.
@Override
public void validateGlacierS3FilesRestored(S3FileTransferRequestParamsDto params) throws RuntimeException {
LOGGER.info("Checking for already restored Glacier storage class objects... s3KeyPrefix=\"{}\" s3BucketName=\"{}\" s3KeyCount={}", params.getS3KeyPrefix(), params.getS3BucketName(), params.getFiles().size());
if (!CollectionUtils.isEmpty(params.getFiles())) {
// Initialize a key value pair for the error message in the catch block.
String key = params.getFiles().get(0).getPath().replaceAll("\\\\", "/");
try {
// Create an S3 client.
AmazonS3Client s3Client = getAmazonS3(params);
try {
for (File file : params.getFiles()) {
key = file.getPath().replaceAll("\\\\", "/");
ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(params.getS3BucketName(), key, s3Client);
// Fail if a not already restored object is detected.
if (BooleanUtils.isNotFalse(objectMetadata.getOngoingRestore())) {
throw new IllegalArgumentException(String.format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {%s}, OngoingRestore flag {%s}, S3 bucket name {%s}", key, objectMetadata.getStorageClass(), objectMetadata.getOngoingRestore(), params.getS3BucketName()));
}
}
} finally {
s3Client.shutdown();
}
} catch (AmazonServiceException e) {
throw new IllegalStateException(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. Reason: %s", key, params.getS3BucketName(), e.getMessage()), e);
}
}
}
Aggregations