use of com.talend.shaded.com.amazonaws.services.s3.model.Bucket in project Synapse-Stack-Builder by Sage-Bionetworks.
the class StackDefaultsTest method testLoadStackDefaultsFromS3.
@Test(expected = IllegalArgumentException.class)
public void testLoadStackDefaultsFromS3() throws IOException {
String bucketString = config.getDefaultS3BucketName();
Bucket bucket = new Bucket(bucketString);
when(mockClient.createBucket(bucketString)).thenReturn(new Bucket());
// This should fail since the expected properties are missing.
defaults.loadStackDefaultsFromS3();
}
use of com.talend.shaded.com.amazonaws.services.s3.model.Bucket in project Synapse-Stack-Builder by Sage-Bionetworks.
the class StackConfigurationSetupTest method testSetupMainFileBucket.
@Test
public void testSetupMainFileBucket() throws IOException {
Bucket mockBucket = mock(Bucket.class);
when(mockClient.createBucket(config.getMainFileS3BucketName())).thenReturn(mockBucket);
setup.setupMainFileBucket();
verify(mockClient, times(1)).createBucket(config.getMainFileS3BucketName());
// The resources should be set
assertNotNull(resources.getMainFileS3Bucket());
assertEquals(mockBucket, resources.getMainFileS3Bucket());
}
use of com.talend.shaded.com.amazonaws.services.s3.model.Bucket in project Synapse-Stack-Builder by Sage-Bionetworks.
the class StackDefaults method loadStackDefaultsFromS3.
/**
* Connect to S3 and downloads the default properties for this stack.
*
* @param stack
* @param s3Client
* @return
* @throws IOException
*/
public Properties loadStackDefaultsFromS3() throws IOException {
// Create the config bucket.
String bucketName = config.getStackConfigS3BucketName();
log.info("Creating S3 Bucket: " + bucketName);
// This call is idempotent and will only actually create the bucket if it does not already exist.
Bucket bucket = s3Client.createBucket(bucketName);
// This is the buck where we expect to find the properties.
bucketName = config.getDefaultS3BucketName();
log.info("Creating S3 Bucket: " + bucketName);
// This call is idempotent and will only actually create the bucket if it does not already exist.
bucket = s3Client.createBucket(bucketName);
String fileName = config.getDefaultPropertiesFileName();
File temp = File.createTempFile("DefaultProps", ".properties");
FileInputStream in = new FileInputStream(temp);
try {
// Download the file to a temp file.
s3Client.getObject(new GetObjectRequest(bucketName, fileName), temp);
Properties props = new Properties();
props.load(in);
// Did we get the expected properties?
validateProperties(bucketName, fileName, props);
// Done
return props;
} catch (IOException e) {
log.error("Failed to read the '" + fileName + "' downloaded from S3 bucket: '" + bucketName + "'. Expected the file to be a java.util.Properties file");
throw e;
} catch (AmazonClientException e) {
log.error("Failed to dowload the '" + fileName + "' from S3 bucket: '" + bucketName + "' make sure the file exists and try again.");
throw e;
} finally {
in.close();
// Delete the temp file
temp.delete();
}
}
use of com.talend.shaded.com.amazonaws.services.s3.model.Bucket in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateTags.
/**
* Calls getBucketTaggingConfiguration() on every bucket to enumerate bucket tags.
* getBucketTaggingConfiguration() method is region aware and only returns valid results when we
* call it with the client region same as the S3 bucket region. Since S3 bucket region is not
* available through API, when a new bucket is discovered, we try to get tags for it by calling
* getBucketTaggingConfiguration() with client in every AWS region. We then store the client region
* for which we received successful response as region in DiskState for that S3 bucket. This region
* in DiskState is then used for any subsequent calls for enumerating tags.
*/
private void enumerateTags(S3StorageEnumerationContext aws, S3StorageEnumerationSubStage next) {
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
OperationContext.restoreOperationContext(operationContext);
List<DeferredResult<S3ClientHandler>> s3ClientsEnumeratePreviousBuckets = new ArrayList<>();
// Get a client for that region and make a call to enumerate tags.
for (Map.Entry<String, DiskState> entry : aws.diskStatesToBeUpdatedByBucketName.entrySet()) {
// with valid region in subsequent enumeration runs.
if (entry.getValue().regionId != null) {
aws.regionsByBucketName.put(entry.getValue().id, entry.getValue().regionId);
} else {
logWarning("Null region found in S3 diskState");
Operation.createDelete(aws.service.getHost(), entry.getValue().documentSelfLink).setReferer(aws.service.getUri()).setBody(getDeletionState(Utils.getNowMicrosUtc())).setCompletion((o, e) -> {
if (e != null) {
logWarning("Exception deleting diskState with null " + "region [ex=%s]", e.getMessage());
return;
}
logWarning("Deleted diskState with null region [diskState=%s]", Utils.toJsonHtml(entry.getValue()));
}).sendWith(aws.service);
continue;
}
s3ClientsEnumeratePreviousBuckets.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, entry.getValue().regionId, aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.bucketName = entry.getKey();
s3ClientHandler.diskState = entry.getValue();
return s3ClientHandler;
}));
}
// Handler to enumerate new buckets not previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumerateNewBuckets = (s3ClientHandlers, throwable) -> {
OperationContext.restoreOperationContext(operationContext);
for (Bucket bucket : aws.bucketsToBeCreated) {
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(bucket.getName());
aws.regionsByBucketName.put(bucket.getName(), s3ClientHandler.region.getName());
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(bucket.getName(), new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(bucket.getName()).putAll(tagSet.getAllTags()));
}
break;
} catch (Exception e) {
// getbucketTaggingConfiguration().
if (e instanceof AmazonS3Exception && (((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_MOVED_PERM || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_BAD_REQUEST)) {
continue;
} else {
logSevere("Exception enumerating tags for S3 bucket with unknown region " + "[endpoint=%s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.region.getName(), e.getMessage());
continue;
}
}
}
}
// Once completed, move to next stage.
aws.subStage = next;
handleReceivedEnumerationData(aws);
};
// Handler to enumerate tags for buckets already previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumeratePreviousBuckets = (s3ClientHandlers, ignored) -> {
OperationContext.restoreOperationContext(operationContext);
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(s3ClientHandler.bucketName);
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(s3ClientHandler.bucketName, new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(s3ClientHandler.bucketName).putAll(tagSet.getAllTags()));
}
} catch (Exception e) {
logSevere("Exception enumerating tags for S3 bucket with known region " + "[endpoint=%s] [bucketName=%s - %s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.bucketName, s3ClientHandler.diskState.id, s3ClientHandler.diskState.regionId, e.getMessage());
}
}
// For remaining buckets, they have yet to be enumerated. Brute force and try to
// enumerate tags for these buckets over every region until we find the correct
// region and then store it in DiskState for future reference.
List<DeferredResult<S3ClientHandler>> s3ClientBucketsToEnumerate = new ArrayList<>();
for (Regions region : Regions.values()) {
// Get an s3 client in the region asynchronously. Once all completed, these
// clients will be used to test each bucket.
s3ClientBucketsToEnumerate.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, region.getName(), aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.region = region;
return s3ClientHandler;
}));
}
// Once finished, attempt to enumerate each of the "new" buckets.
DeferredResult.allOf(s3ClientBucketsToEnumerate).whenComplete(enumerateNewBuckets);
};
// Enumerate tags of previously enumerated buckets first
DeferredResult.allOf(s3ClientsEnumeratePreviousBuckets).whenComplete(enumeratePreviousBuckets);
});
}
use of com.talend.shaded.com.amazonaws.services.s3.model.Bucket in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateS3Buckets.
/**
* Call the listBuckets() method to enumerate S3 buckets.
* AWS SDK does not have an async method for listing buckets, so we use the synchronous method
* in a fixed thread pool for S3 enumeration service.
* If listBuckets() call fails due to unsupported region, we mark the S3 client invalid,
* stop the enumeration flow and patch back to parent.
*/
private void enumerateS3Buckets(S3StorageEnumerationContext aws) {
logInfo(() -> String.format("Running creation enumeration in refresh mode for %s", aws.request.original.resourceReference));
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
try {
List<Bucket> bucketList = aws.amazonS3Client.listBuckets();
for (Bucket bucket : bucketList) {
aws.remoteBucketsByBucketName.put(bucket.getName(), bucket);
}
OperationContext.restoreOperationContext(operationContext);
if (aws.remoteBucketsByBucketName.isEmpty()) {
aws.subStage = S3StorageEnumerationSubStage.DELETE_DISKS;
}
handleReceivedEnumerationData(aws);
} catch (Exception e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN) {
markClientInvalid(aws);
} else {
logSevere("Exception enumerating S3 buckets for [ex=%s]", e.getMessage());
aws.error = e;
aws.stage = S3StorageEnumerationStages.ERROR;
handleEnumerationRequest(aws);
}
}
});
}
Aggregations