use of com.vmware.xenon.common.OperationContext in project photon-model by vmware.
the class VSphereIOThreadPool method executeCallback.
private void executeCallback(BasicConnection connection, ConnectionCallback callback) {
OperationContext opContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
OperationContext.restoreOperationContext(opContext);
try {
// login and session creation
connection.connect();
} catch (ConnectionException e) {
callback.doInConnection(null, e);
return;
}
try {
callback.doInConnection(connection, null);
} catch (Exception e) {
logger.log(Level.SEVERE, "Uncaught exception in vSphere IO Pool: " + Utils.toString(e));
} finally {
closeQuietly(connection);
}
});
}
use of com.vmware.xenon.common.OperationContext in project photon-model by vmware.
the class VsphereEnumerationHelper method submitWorkToVSpherePool.
static void submitWorkToVSpherePool(VSphereIncrementalEnumerationService service, Runnable work) {
// store context at the moment of submission
OperationContext orig = OperationContext.getOperationContext();
VSphereIOThreadPool pool = VSphereIOThreadPoolAllocator.getPool(service);
pool.submit(() -> {
OperationContext old = OperationContext.getOperationContext();
OperationContext.setFrom(orig);
try {
work.run();
} finally {
OperationContext.restoreOperationContext(old);
}
});
}
use of com.vmware.xenon.common.OperationContext in project photon-model by vmware.
the class AWSEnumerationAndDeletionAdapterService method validateClient.
/**
* Method to validate an EC2 Client
*/
private void validateClient(EnumerationDeletionContext aws, AWSEnumerationDeletionStages next) {
OperationContext opContext = OperationContext.getOperationContext();
AWSUtils.validateCredentials(aws.amazonEC2Client, this.clientManager, aws.endpointAuth, aws.request, this, (describeAvailabilityZonesResult) -> {
OperationContext.restoreOperationContext(opContext);
aws.stage = next;
handleEnumerationRequestForDeletion(aws);
}, t -> {
OperationContext.restoreOperationContext(opContext);
aws.error = t;
aws.stage = AWSEnumerationDeletionStages.ERROR;
handleEnumerationRequestForDeletion(aws);
}, // onUnaccessible: the region is not accessible so complete the enum Op
() -> {
OperationContext.restoreOperationContext(opContext);
aws.operation.complete();
});
}
use of com.vmware.xenon.common.OperationContext in project photon-model by vmware.
the class AWSPowerService method powerOff.
private void powerOff(AmazonEC2AsyncClient client, ComputePowerRequest pr, DefaultAdapterContext c) {
OperationContext opContext = OperationContext.getOperationContext();
StopInstancesRequest request = new StopInstancesRequest();
request.withInstanceIds(c.child.id);
client.stopInstancesAsync(request, new AsyncHandler<StopInstancesRequest, StopInstancesResult>() {
@Override
public void onSuccess(StopInstancesRequest request, StopInstancesResult result) {
AWSUtils.waitForTransitionCompletion(getHost(), result.getStoppingInstances(), "stopped", client, (is, e) -> {
OperationContext.restoreOperationContext(opContext);
if (e != null) {
onError(e);
return;
}
updateComputeState(pr, c);
});
}
@Override
public void onError(Exception e) {
OperationContext.restoreOperationContext(opContext);
c.taskManager.patchTaskToFailure(e);
}
});
}
use of com.vmware.xenon.common.OperationContext in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateTags.
/**
* Calls getBucketTaggingConfiguration() on every bucket to enumerate bucket tags.
* getBucketTaggingConfiguration() method is region aware and only returns valid results when we
* call it with the client region same as the S3 bucket region. Since S3 bucket region is not
* available through API, when a new bucket is discovered, we try to get tags for it by calling
* getBucketTaggingConfiguration() with client in every AWS region. We then store the client region
* for which we received successful response as region in DiskState for that S3 bucket. This region
* in DiskState is then used for any subsequent calls for enumerating tags.
*/
private void enumerateTags(S3StorageEnumerationContext aws, S3StorageEnumerationSubStage next) {
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
OperationContext.restoreOperationContext(operationContext);
List<DeferredResult<S3ClientHandler>> s3ClientsEnumeratePreviousBuckets = new ArrayList<>();
// Get a client for that region and make a call to enumerate tags.
for (Map.Entry<String, DiskState> entry : aws.diskStatesToBeUpdatedByBucketName.entrySet()) {
// with valid region in subsequent enumeration runs.
if (entry.getValue().regionId != null) {
aws.regionsByBucketName.put(entry.getValue().id, entry.getValue().regionId);
} else {
logWarning("Null region found in S3 diskState");
Operation.createDelete(aws.service.getHost(), entry.getValue().documentSelfLink).setReferer(aws.service.getUri()).setBody(getDeletionState(Utils.getNowMicrosUtc())).setCompletion((o, e) -> {
if (e != null) {
logWarning("Exception deleting diskState with null " + "region [ex=%s]", e.getMessage());
return;
}
logWarning("Deleted diskState with null region [diskState=%s]", Utils.toJsonHtml(entry.getValue()));
}).sendWith(aws.service);
continue;
}
s3ClientsEnumeratePreviousBuckets.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, entry.getValue().regionId, aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.bucketName = entry.getKey();
s3ClientHandler.diskState = entry.getValue();
return s3ClientHandler;
}));
}
// Handler to enumerate new buckets not previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumerateNewBuckets = (s3ClientHandlers, throwable) -> {
OperationContext.restoreOperationContext(operationContext);
for (Bucket bucket : aws.bucketsToBeCreated) {
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(bucket.getName());
aws.regionsByBucketName.put(bucket.getName(), s3ClientHandler.region.getName());
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(bucket.getName(), new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(bucket.getName()).putAll(tagSet.getAllTags()));
}
break;
} catch (Exception e) {
// getbucketTaggingConfiguration().
if (e instanceof AmazonS3Exception && (((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_MOVED_PERM || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_BAD_REQUEST)) {
continue;
} else {
logSevere("Exception enumerating tags for S3 bucket with unknown region " + "[endpoint=%s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.region.getName(), e.getMessage());
continue;
}
}
}
}
// Once completed, move to next stage.
aws.subStage = next;
handleReceivedEnumerationData(aws);
};
// Handler to enumerate tags for buckets already previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumeratePreviousBuckets = (s3ClientHandlers, ignored) -> {
OperationContext.restoreOperationContext(operationContext);
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(s3ClientHandler.bucketName);
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(s3ClientHandler.bucketName, new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(s3ClientHandler.bucketName).putAll(tagSet.getAllTags()));
}
} catch (Exception e) {
logSevere("Exception enumerating tags for S3 bucket with known region " + "[endpoint=%s] [bucketName=%s - %s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.bucketName, s3ClientHandler.diskState.id, s3ClientHandler.diskState.regionId, e.getMessage());
}
}
// For remaining buckets, they have yet to be enumerated. Brute force and try to
// enumerate tags for these buckets over every region until we find the correct
// region and then store it in DiskState for future reference.
List<DeferredResult<S3ClientHandler>> s3ClientBucketsToEnumerate = new ArrayList<>();
for (Regions region : Regions.values()) {
// Get an s3 client in the region asynchronously. Once all completed, these
// clients will be used to test each bucket.
s3ClientBucketsToEnumerate.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, region.getName(), aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.region = region;
return s3ClientHandler;
}));
}
// Once finished, attempt to enumerate each of the "new" buckets.
DeferredResult.allOf(s3ClientBucketsToEnumerate).whenComplete(enumerateNewBuckets);
};
// Enumerate tags of previously enumerated buckets first
DeferredResult.allOf(s3ClientsEnumeratePreviousBuckets).whenComplete(enumeratePreviousBuckets);
});
}
Aggregations