use of com.amazonaws.services.s3.model.Region in project photon-model by vmware.
the class AWSClientManager method getOrCreateEC2Client.
/**
* Accesses the client cache to get the EC2 client for the given auth credentials and regionId.
* If a client is not found to exist, creates a new one and adds an entry in the cache for it.
*
* Note: ARN-based credentials will not be accepted unless they have already been exchanged to
* AWS for session credentials. If unset, this method will throw a
* {@link UnsupportedOperationException} exception in this circumstance. To enable ARN-based
* credentials, migrate to {@link #getOrCreateEC2ClientAsync(AuthCredentialsServiceState,
* String, StatelessService)}.
*
* @param credentials The auth credentials to be used for the client creation
* @param regionId The region of the AWS client
* @param service The stateless service making the request and for which the executor pool needs to be allocated.
* @return The AWSClient
*/
public AmazonEC2AsyncClient getOrCreateEC2Client(AuthCredentialsServiceState credentials, String regionId, StatelessService service, Consumer<Throwable> failConsumer) {
if (this.awsClientType != AwsClientType.EC2) {
throw new UnsupportedOperationException("This client manager supports only AWS " + this.awsClientType + " clients.");
}
if (isArnCredentials(credentials) && !isSetCredentials(credentials)) {
throw new UnsupportedOperationException("For ARN-based credentials, exchange for session-based access key/secret key first before retrieving the client.");
}
AmazonEC2AsyncClient amazonEC2Client = null;
String cacheKey = createCredentialRegionCacheKey(credentials, regionId);
try {
amazonEC2Client = this.ec2ClientCache.computeIfAbsent(cacheKey, key -> AWSUtils.getAsyncClient(credentials, regionId, getExecutor()));
} catch (Throwable e) {
service.logSevere(e);
failConsumer.accept(e);
}
return amazonEC2Client;
}
use of com.amazonaws.services.s3.model.Region in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateTags.
/**
* Calls getBucketTaggingConfiguration() on every bucket to enumerate bucket tags.
* getBucketTaggingConfiguration() method is region aware and only returns valid results when we
* call it with the client region same as the S3 bucket region. Since S3 bucket region is not
* available through API, when a new bucket is discovered, we try to get tags for it by calling
* getBucketTaggingConfiguration() with client in every AWS region. We then store the client region
* for which we received successful response as region in DiskState for that S3 bucket. This region
* in DiskState is then used for any subsequent calls for enumerating tags.
*/
private void enumerateTags(S3StorageEnumerationContext aws, S3StorageEnumerationSubStage next) {
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
OperationContext.restoreOperationContext(operationContext);
List<DeferredResult<S3ClientHandler>> s3ClientsEnumeratePreviousBuckets = new ArrayList<>();
// Get a client for that region and make a call to enumerate tags.
for (Map.Entry<String, DiskState> entry : aws.diskStatesToBeUpdatedByBucketName.entrySet()) {
// with valid region in subsequent enumeration runs.
if (entry.getValue().regionId != null) {
aws.regionsByBucketName.put(entry.getValue().id, entry.getValue().regionId);
} else {
logWarning("Null region found in S3 diskState");
Operation.createDelete(aws.service.getHost(), entry.getValue().documentSelfLink).setReferer(aws.service.getUri()).setBody(getDeletionState(Utils.getNowMicrosUtc())).setCompletion((o, e) -> {
if (e != null) {
logWarning("Exception deleting diskState with null " + "region [ex=%s]", e.getMessage());
return;
}
logWarning("Deleted diskState with null region [diskState=%s]", Utils.toJsonHtml(entry.getValue()));
}).sendWith(aws.service);
continue;
}
s3ClientsEnumeratePreviousBuckets.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, entry.getValue().regionId, aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.bucketName = entry.getKey();
s3ClientHandler.diskState = entry.getValue();
return s3ClientHandler;
}));
}
// Handler to enumerate new buckets not previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumerateNewBuckets = (s3ClientHandlers, throwable) -> {
OperationContext.restoreOperationContext(operationContext);
for (Bucket bucket : aws.bucketsToBeCreated) {
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(bucket.getName());
aws.regionsByBucketName.put(bucket.getName(), s3ClientHandler.region.getName());
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(bucket.getName(), new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(bucket.getName()).putAll(tagSet.getAllTags()));
}
break;
} catch (Exception e) {
// getbucketTaggingConfiguration().
if (e instanceof AmazonS3Exception && (((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_MOVED_PERM || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_BAD_REQUEST)) {
continue;
} else {
logSevere("Exception enumerating tags for S3 bucket with unknown region " + "[endpoint=%s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.region.getName(), e.getMessage());
continue;
}
}
}
}
// Once completed, move to next stage.
aws.subStage = next;
handleReceivedEnumerationData(aws);
};
// Handler to enumerate tags for buckets already previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumeratePreviousBuckets = (s3ClientHandlers, ignored) -> {
OperationContext.restoreOperationContext(operationContext);
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(s3ClientHandler.bucketName);
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(s3ClientHandler.bucketName, new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(s3ClientHandler.bucketName).putAll(tagSet.getAllTags()));
}
} catch (Exception e) {
logSevere("Exception enumerating tags for S3 bucket with known region " + "[endpoint=%s] [bucketName=%s - %s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.bucketName, s3ClientHandler.diskState.id, s3ClientHandler.diskState.regionId, e.getMessage());
}
}
// For remaining buckets, they have yet to be enumerated. Brute force and try to
// enumerate tags for these buckets over every region until we find the correct
// region and then store it in DiskState for future reference.
List<DeferredResult<S3ClientHandler>> s3ClientBucketsToEnumerate = new ArrayList<>();
for (Regions region : Regions.values()) {
// Get an s3 client in the region asynchronously. Once all completed, these
// clients will be used to test each bucket.
s3ClientBucketsToEnumerate.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, region.getName(), aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.region = region;
return s3ClientHandler;
}));
}
// Once finished, attempt to enumerate each of the "new" buckets.
DeferredResult.allOf(s3ClientBucketsToEnumerate).whenComplete(enumerateNewBuckets);
};
// Enumerate tags of previously enumerated buckets first
DeferredResult.allOf(s3ClientsEnumeratePreviousBuckets).whenComplete(enumeratePreviousBuckets);
});
}
use of com.amazonaws.services.s3.model.Region in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateS3Buckets.
/**
* Call the listBuckets() method to enumerate S3 buckets.
* AWS SDK does not have an async method for listing buckets, so we use the synchronous method
* in a fixed thread pool for S3 enumeration service.
* If listBuckets() call fails due to unsupported region, we mark the S3 client invalid,
* stop the enumeration flow and patch back to parent.
*/
private void enumerateS3Buckets(S3StorageEnumerationContext aws) {
logInfo(() -> String.format("Running creation enumeration in refresh mode for %s", aws.request.original.resourceReference));
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
try {
List<Bucket> bucketList = aws.amazonS3Client.listBuckets();
for (Bucket bucket : bucketList) {
aws.remoteBucketsByBucketName.put(bucket.getName(), bucket);
}
OperationContext.restoreOperationContext(operationContext);
if (aws.remoteBucketsByBucketName.isEmpty()) {
aws.subStage = S3StorageEnumerationSubStage.DELETE_DISKS;
}
handleReceivedEnumerationData(aws);
} catch (Exception e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN) {
markClientInvalid(aws);
} else {
logSevere("Exception enumerating S3 buckets for [ex=%s]", e.getMessage());
aws.error = e;
aws.stage = S3StorageEnumerationStages.ERROR;
handleEnumerationRequest(aws);
}
}
});
}
use of com.amazonaws.services.s3.model.Region in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method createDiskStates.
/**
* Creates the disk states that represent the buckets received from AWS during
* enumeration. Fields currently being enumerated for S3 are all immutable on AWS side, hence we only create
* disks and don't patch to them in subsequent except for changes in tagLinks.
*/
private void createDiskStates(S3StorageEnumerationContext aws, S3StorageEnumerationSubStage next) {
// For all the disks to be created, we filter them based on whether we were able to find the correct
// region for the disk using getBucketTaggingConfiguration() call and then map them and create operations.
// Filtering is done to avoid creating disk states with null region (since we don't PATCH region field
// after creating the disk, we need to ensure that disk state is initially created with the correct region).
// kick off the operation using a JOIN
List<DiskState> diskStatesToBeCreated = new ArrayList<>();
aws.bucketsToBeCreated.stream().filter(bucket -> aws.regionsByBucketName.containsKey(bucket.getName())).forEach(bucket -> {
diskStatesToBeCreated.add(mapBucketToDiskState(bucket, aws));
});
diskStatesToBeCreated.forEach(diskState -> aws.enumerationOperations.add(createPostOperation(this, diskState, DiskService.FACTORY_LINK)));
this.logFine(() -> String.format("Creating %d S3 disks", aws.bucketsToBeCreated.size()));
// For those disk states which do not have the tagLink, add the tagLink by PATCHing those states.
if (aws.internalTypeTagSelfLink != null) {
aws.diskStatesToBeUpdatedByBucketName.entrySet().stream().filter(diskMap -> diskMap.getValue().tagLinks == null || !diskMap.getValue().tagLinks.contains(aws.internalTypeTagSelfLink)).forEach(diskMap -> {
Map<String, Collection<Object>> collectionsToAddMap = Collections.singletonMap(DiskState.FIELD_NAME_TAG_LINKS, Collections.singletonList(aws.internalTypeTagSelfLink));
Map<String, Collection<Object>> collectionsToRemoveMap = Collections.singletonMap(DiskState.FIELD_NAME_TAG_LINKS, Collections.emptyList());
ServiceStateCollectionUpdateRequest updateTagLinksRequest = ServiceStateCollectionUpdateRequest.create(collectionsToAddMap, collectionsToRemoveMap);
aws.enumerationOperations.add(Operation.createPatch(this.getHost(), diskMap.getValue().documentSelfLink).setReferer(aws.service.getUri()).setBody(updateTagLinksRequest));
});
}
// update endpointLinks
aws.diskStatesToBeUpdatedByBucketName.entrySet().stream().filter(diskMap -> diskMap.getValue().endpointLinks == null || !diskMap.getValue().endpointLinks.contains(aws.request.original.endpointLink)).forEach(diskMap -> {
Map<String, Collection<Object>> collectionsToAddMap = Collections.singletonMap(DiskState.FIELD_NAME_ENDPOINT_LINKS, Collections.singletonList(aws.request.original.endpointLink));
Map<String, Collection<Object>> collectionsToRemoveMap = Collections.singletonMap(DiskState.FIELD_NAME_ENDPOINT_LINKS, Collections.emptyList());
ServiceStateCollectionUpdateRequest updateEndpointLinksRequest = ServiceStateCollectionUpdateRequest.create(collectionsToAddMap, collectionsToRemoveMap);
aws.enumerationOperations.add(Operation.createPatch(this.getHost(), diskMap.getValue().documentSelfLink).setReferer(aws.service.getUri()).setBody(updateEndpointLinksRequest));
});
OperationJoin.JoinedCompletionHandler joinCompletion = (ox, exc) -> {
if (exc != null) {
this.logSevere(() -> String.format("Error creating/updating disk %s", Utils.toString(exc)));
aws.subStage = S3StorageEnumerationSubStage.DELETE_DISKS;
handleReceivedEnumerationData(aws);
return;
}
ox.entrySet().stream().forEach(operationEntry -> {
aws.diskStatesEnumerated.add(operationEntry.getValue().getBody(DiskState.class));
});
this.logFine(() -> "Successfully created and updated all the disk states.");
aws.subStage = next;
handleReceivedEnumerationData(aws);
};
if (aws.enumerationOperations.isEmpty()) {
aws.subStage = next;
handleReceivedEnumerationData(aws);
return;
}
OperationJoin joinOp = OperationJoin.create(aws.enumerationOperations);
joinOp.setCompletion(joinCompletion);
joinOp.sendWith(this.getHost());
}
use of com.amazonaws.services.s3.model.Region in project opencast by opencast.
the class AwsS3DistributionServiceImpl method activate.
@Override
public void activate(ComponentContext cc) {
// Get the configuration
if (cc != null) {
if (!Boolean.valueOf(getAWSConfigKey(cc, AWS_S3_DISTRIBUTION_ENABLE))) {
logger.info("AWS S3 distribution disabled");
return;
}
// AWS S3 bucket name
bucketName = getAWSConfigKey(cc, AWS_S3_BUCKET_CONFIG);
logger.info("AWS S3 bucket name is {}", bucketName);
// AWS region
String regionStr = getAWSConfigKey(cc, AWS_S3_REGION_CONFIG);
logger.info("AWS region is {}", regionStr);
opencastDistributionUrl = getAWSConfigKey(cc, AWS_S3_DISTRIBUTION_BASE_CONFIG);
if (!opencastDistributionUrl.endsWith("/")) {
opencastDistributionUrl = opencastDistributionUrl + "/";
}
logger.info("AWS distribution url is {}", opencastDistributionUrl);
// Explicit credentials are optional.
AWSCredentialsProvider provider = null;
Option<String> accessKeyIdOpt = OsgiUtil.getOptCfg(cc.getProperties(), AWS_S3_ACCESS_KEY_ID_CONFIG);
Option<String> accessKeySecretOpt = OsgiUtil.getOptCfg(cc.getProperties(), AWS_S3_SECRET_ACCESS_KEY_CONFIG);
// profile credentials
if (accessKeyIdOpt.isNone() && accessKeySecretOpt.isNone())
provider = new DefaultAWSCredentialsProviderChain();
else
provider = new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKeyIdOpt.get(), accessKeySecretOpt.get()));
// Create AWS client.
s3 = AmazonS3ClientBuilder.standard().withRegion(regionStr).withCredentials(provider).build();
s3TransferManager = new TransferManager(s3);
// Create AWS S3 bucket if not there yet
createAWSBucket();
distributionChannel = OsgiUtil.getComponentContextProperty(cc, CONFIG_KEY_STORE_TYPE);
logger.info("AwsS3DistributionService activated!");
}
}
Aggregations