use of com.vmware.vim25.ObjectUpdate in project photon-model by vmware.
the class VSphereIncrementalEnumerationService method refreshResourcesOnDatacenter.
private void refreshResourcesOnDatacenter(EnumerationClient client, EnumerationProgress ctx, TaskManager mgr) {
Set<String> sharedDatastores = new HashSet<>();
List<StoragePolicyOverlay> storagePolicies = new ArrayList<>();
// put results in different buckets by type
PropertyFilterSpec spec = client.createResourcesFilterSpec();
CollectorDetails collectorDetails = new CollectorDetails();
EnumerationClient.ObjectUpdateIterator resourcesIterator;
SegregatedOverlays segregatedOverlays;
logInfo("Processing resources on datacenter: %s", ctx.getDcLink());
try {
ManagedObjectReference resourcesPropertyCollector = client.createPropertyCollectorWithFilter(spec);
// remove getObjectIterator API
resourcesIterator = new ObjectUpdateIterator(resourcesPropertyCollector, this.connection.getVimPort(), "");
List<ObjectUpdate> updates = new ArrayList<>();
while (resourcesIterator.hasNext()) {
UpdateSet page = resourcesIterator.next();
if (null != page) {
for (PropertyFilterUpdate propertyFilterUpdate : page.getFilterSet()) {
updates.addAll(propertyFilterUpdate.getObjectSet());
}
}
}
segregatedOverlays = segregateObjectUpdates(ctx, updates);
} catch (Exception e) {
String msg = "Error processing PropertyCollector results";
logWarning(() -> msg + ": " + e.toString());
mgr.patchTaskToFailure(msg, e);
return;
}
// This will split the folders into two lists. Default folders (vm, host, network, datastore) have parent as datacenter
// and are not visible in vCenter. These need not be persisted. Any other folders will have another folder as a parent
// and will be persisted with appropriate parent.
// partitioningBy will always return a map with two entries, one for where the predicate is true and one for
// where it is false. Even though the entries can be empty, they will be present in the map , i.e. map size will be
// always be two
Map<Boolean, List<FolderOverlay>> folderMap = new HashMap<>(segregatedOverlays.folders.stream().collect(Collectors.partitioningBy(s -> s.getParent().getType().equals(VimNames.TYPE_DATACENTER))));
// Process true Folder and root folder list
List<FolderOverlay> trueFolders = folderMap.get(Boolean.FALSE);
List<FolderOverlay> rootFolders = folderMap.get(Boolean.TRUE);
ctx.expectFolderCount(trueFolders.size());
logInfo("Processing folders on datacenter: %s", ctx.getDcLink());
for (FolderOverlay folder : trueFolders) {
try {
// The parent list will be passed along. This is to achieve the below
// Folder A is root folder and has datacenter as the parent.
// Folder Ac has parent as A. Since 'A' is not persisted anymore, 'Ac'
// should have the datacenter as its parent.
VsphereFolderEnumerationHelper.processFoundFolder(this, ctx, folder, rootFolders, client);
} catch (Exception e) {
logWarning(() -> "Error processing folder information : " + e.toString());
}
}
// Process HostSystem list to get the datastore access level whether local / shared
try {
for (HostSystemOverlay hs : segregatedOverlays.hosts) {
sharedDatastores.addAll(client.getDatastoresHostMountInfo(hs));
}
} catch (Exception e) {
// We can continue as we will not know whether the datastore is local or shared which
// is ok to proceed.
logWarning(() -> "Error processing datastore host mount information : " + e.toString());
}
storagePolicies = VsphereStoragePolicyEnumerationHelper.createStorageProfileOverlays(this, client);
// process results in topological order
ctx.expectNetworkCount(segregatedOverlays.networks.size());
logInfo("Processing network on datacenter: %s", ctx.getDcLink());
for (NetworkOverlay net : segregatedOverlays.networks.values()) {
VSphereNetworkEnumerationHelper.processFoundNetwork(this, ctx, net, segregatedOverlays.networks);
}
ctx.expectDatastoreCount(segregatedOverlays.datastores.size());
logInfo("Processing datastore on datacenter: %s", ctx.getDcLink());
for (DatastoreOverlay ds : segregatedOverlays.datastores) {
ds.setMultipleHostAccess(sharedDatastores.contains(ds.getName()));
VsphereDatastoreEnumerationHelper.processFoundDatastore(this, ctx, ds);
}
// checkpoint net & storage, they are not related currently
try {
ctx.getDatastoreTracker().await();
ctx.getNetworkTracker().await();
ctx.getFolderTracker().await();
} catch (InterruptedException e) {
threadInterrupted(mgr, e);
return;
}
// datastore processing is complete.
if (storagePolicies.size() > 0) {
ctx.expectStoragePolicyCount(storagePolicies.size());
for (StoragePolicyOverlay sp : storagePolicies) {
VsphereStoragePolicyEnumerationHelper.processFoundStoragePolicy(this, ctx, sp);
}
// checkpoint for storage policy
try {
ctx.getStoragePolicyTracker().await();
} catch (InterruptedException e) {
threadInterrupted(mgr, e);
return;
}
}
ctx.expectComputeResourceCount(segregatedOverlays.clusters.size());
for (ComputeResourceOverlay cluster : segregatedOverlays.clusters) {
ctx.track(cluster);
cluster.markHostAsClustered(segregatedOverlays.hosts);
VsphereComputeResourceEnumerationHelper.processFoundComputeResource(this, ctx, cluster, client);
}
// checkpoint compute
try {
ctx.getComputeResourceTracker().await();
} catch (InterruptedException e) {
threadInterrupted(mgr, e);
return;
}
// process clustered as well as non-clustered hosts
ctx.expectHostSystemCount(segregatedOverlays.hosts.size());
logInfo("Processing hosts on datacenter: %s", ctx.getDcLink());
for (HostSystemOverlay hs : segregatedOverlays.hosts) {
ctx.track(hs);
VSphereHostSystemEnumerationHelper.processFoundHostSystem(this, ctx, hs, client);
}
// exclude all root resource pools
// no need to collect the root resource pool
segregatedOverlays.resourcePools.removeIf(rp -> !VimNames.TYPE_RESOURCE_POOL.equals(rp.getParent().getType()));
MoRefKeyedMap<String> computeResourceNamesByMoref = collectComputeNames(segregatedOverlays.hosts, segregatedOverlays.clusters);
ctx.expectResourcePoolCount(segregatedOverlays.resourcePools.size());
logInfo("Processing resource pools on datacenter: %s", ctx.getDcLink());
for (ResourcePoolOverlay rp : segregatedOverlays.resourcePools) {
String ownerName = computeResourceNamesByMoref.get(rp.getOwner());
VSphereResourcePoolEnumerationHelper.processFoundResourcePool(this, ctx, rp, ownerName, client);
}
// checkpoint compute
try {
ctx.getHostSystemTracker().await();
ctx.getResourcePoolTracker().await();
} catch (InterruptedException e) {
threadInterrupted(mgr, e);
return;
}
logInfo("Updating server disks on datacenter: %s", ctx.getDcLink());
// update server disks with selfLinks of HostSystem
for (HostSystemOverlay hostSystemOverlay : segregatedOverlays.hosts) {
updateServerDisks(ctx, hostSystemOverlay);
}
logInfo("Processing VMs on datacenter: %s", ctx.getDcLink());
spec = client.createVmFilterSpec(client.getDatacenter());
List<VmOverlay> vmOverlayList = new ArrayList<>();
EnumerationClient.ObjectUpdateIterator vmIterator;
try {
ManagedObjectReference vmPropertyCollector = client.createPropertyCollectorWithFilter(spec);
vmIterator = new ObjectUpdateIterator(vmPropertyCollector, this.connection.getVimPort(), "");
while (vmIterator.hasNext()) {
UpdateSet page = vmIterator.next();
if (null != page) {
for (PropertyFilterUpdate propertyFilterUpdate : page.getFilterSet()) {
ctx.resetVmTracker();
for (ObjectUpdate cont : propertyFilterUpdate.getObjectSet()) {
if (!VimUtils.isVirtualMachine(cont.getObj())) {
continue;
}
VmOverlay vm = new VmOverlay(cont);
if (vm.isTemplate()) {
// templates are skipped, enumerated as "images" instead
continue;
}
if (vm.getInstanceUuid() == null) {
logWarning(() -> String.format("Cannot process a VM without" + " instanceUuid: %s", VimUtils.convertMoRefToString(vm.getId())));
} else {
ctx.getVmTracker().register();
vmOverlayList.add(vm);
VSphereVirtualMachineEnumerationHelper.processFoundVm(this, ctx, vm);
}
}
ctx.getVmTracker().arriveAndAwaitAdvance();
VSphereVMSnapshotEnumerationHelper.enumerateSnapshots(this, ctx, vmOverlayList);
}
}
}
} catch (Exception e) {
String msg = "Error processing PropertyCollector results";
logWarning(() -> msg + ": " + e.toString());
mgr.patchTaskToFailure(msg, e);
return;
}
// Sync disks deleted in vSphere
deleteIndependentDisksUnavailableInVSphere(ctx, client);
try {
ctx.getDeleteDiskTracker().await();
} catch (InterruptedException e) {
threadInterrupted(mgr, e);
return;
}
// if enumeration action is start and this is the initial enumeration, then store the property collectors and versions.
if (EnumerationAction.START == ctx.getRequest().enumerationAction) {
collectorDetails.vmCollectorVersion = vmIterator.getVersion();
collectorDetails.vmPropertyCollector = vmIterator.getPropertyCollector();
collectorDetails.resourcesCollectorVersion = resourcesIterator.getVersion();
collectorDetails.resourcesPropertyCollector = resourcesIterator.getPropertyCollector();
collectorDetails.datacenter = ctx.getRegionId();
this.collectors.add(collectorDetails);
}
}
use of com.vmware.vim25.ObjectUpdate in project photon-model by vmware.
the class VSphereIncrementalEnumerationService method segregateObjectUpdates.
private SegregatedOverlays segregateObjectUpdates(EnumerationProgress ctx, List<ObjectUpdate> updates) {
SegregatedOverlays segregatedOverlays = new SegregatedOverlays();
for (ObjectUpdate cont : updates) {
if (VimUtils.isNetwork(cont.getObj())) {
NetworkOverlay net = new NetworkOverlay(cont);
ctx.track(net);
String nameOrNull = net.getNameOrNull();
/*add overlay if name is null or name doesn't contain dvuplinks
When a DV port group is removed, we do not get name but we have to process it
i.e. remove the subnet document from photon-model
for that we need to add it to segregatedOverlays.*/
if ((null == nameOrNull) || (!nameOrNull.toLowerCase().contains("dvuplinks"))) {
// TODO starting with 6.5 query the property config.uplink instead
segregatedOverlays.networks.put(net.getId(), net);
}
} else if (VimUtils.isHost(cont.getObj())) {
// this includes all standalone and clustered hosts
HostSystemOverlay hs = new HostSystemOverlay(cont);
segregatedOverlays.hosts.add(hs);
} else if (VimUtils.isComputeResource(cont.getObj())) {
ComputeResourceOverlay cr = new ComputeResourceOverlay(cont);
if ((ObjectUpdateKind.ENTER.equals(cr.getObjectUpdateKind()) && cr.isDrsEnabled()) || (!ObjectUpdateKind.ENTER.equals(cr.getObjectUpdateKind()))) {
// when DRS is enabled add the cluster itself and skip the hosts
// if a cluster is modified or deleted, add it to overlays
segregatedOverlays.clusters.add(cr);
} else {
// ignore non-clusters and non-drs cluster: they are handled as hosts
continue;
}
} else if (VimUtils.isDatastore(cont.getObj())) {
DatastoreOverlay ds = new DatastoreOverlay(cont);
segregatedOverlays.datastores.add(ds);
} else if (VimUtils.isResourcePool(cont.getObj())) {
ResourcePoolOverlay rp = new ResourcePoolOverlay(cont);
segregatedOverlays.resourcePools.add(rp);
} else if (VimUtils.isFolder(cont.getObj())) {
FolderOverlay folder = new FolderOverlay(cont);
segregatedOverlays.folders.add(folder);
}
}
return segregatedOverlays;
}
use of com.vmware.vim25.ObjectUpdate in project photon-model by vmware.
the class VSphereIncrementalEnumerationService method handlePatch.
@Override
public void handlePatch(Operation patch) {
// complete the patch immediately.
patch.complete();
logInfo("Received PATCH for incremental enumeration!");
VSphereIncrementalEnumerationRequest enumerationRequest = patch.getBody(VSphereIncrementalEnumerationRequest.class);
ComputeEnumerateResourceRequest request = enumerationRequest.request;
URI parentUri = ComputeService.ComputeStateWithDescription.buildUri(PhotonModelUriUtils.createInventoryUri(getHost(), request.resourceReference));
logInfo("Creating task manager!");
TaskManager mgr = new TaskManager(this, request.taskReference, request.resourceLink());
logInfo(" Requesting GET on compute state with description!.");
Operation.createGet(parentUri).setCompletion(o -> {
logInfo("Submitting job to threadpool!");
VsphereEnumerationHelper.submitWorkToVSpherePool(this, () -> {
logInfo("Incremental enumeration job started for endpoint %s", enumerationRequest.request.endpointLink);
ComputeStateWithDescription computeStateWithDesc = o.getBody(ComputeStateWithDescription.class);
VapiConnection vapiConnection = VapiConnection.createFromVimConnection(this.connection);
logInfo("Establishing VAPI connection for endpoint %s", enumerationRequest.request.endpointLink);
try {
vapiConnection.login();
} catch (IOException | RpcException rpce) {
logWarning(() -> String.format("Cannot login into vAPI endpoint: %s", Utils.toString(rpce)));
mgr.patchTaskToFailure(rpce);
// self delete service so that full enumeration kicks in next invocation.
selfDeleteService();
return;
}
try {
// Get instanceUuid of the vCenter
AboutInfo vCenter = this.connection.getServiceContent().getAbout();
for (CollectorDetails collectorDetails : this.collectors) {
logInfo("Retrieving resources incremental data for data center: %s", collectorDetails.datacenter);
EnumerationProgress enumerationProgress = new EnumerationProgress(new HashSet<>(), request, computeStateWithDesc, vapiConnection, collectorDetails.datacenter, vCenter.getInstanceUuid());
EnumerationClient client = new EnumerationClient(this.connection, computeStateWithDesc, VimUtils.convertStringToMoRef(collectorDetails.datacenter));
List<ObjectUpdate> resourcesUpdates = collectResourcesData(collectorDetails);
List<ObjectUpdate> vmUpdates = collectVMData(collectorDetails);
logInfo("Received resources updates for datacenter: %s : %s", collectorDetails.datacenter, resourcesUpdates.size());
logInfo("Received vm updates for datacenter: %s : %s", collectorDetails.datacenter, vmUpdates.size());
logInfo("Resources Updates: %s", Utils.toJson(resourcesUpdates));
logInfo("VM Updates: %s", Utils.toJson(vmUpdates));
if (!resourcesUpdates.isEmpty()) {
SegregatedOverlays segregatedOverlays = segregateObjectUpdates(enumerationProgress, resourcesUpdates);
this.logInfo("Processing incremental changes for folders for datacenter [%s]!", collectorDetails.datacenter);
VsphereFolderEnumerationHelper.handleFolderChanges(this, segregatedOverlays.folders, enumerationProgress, client);
logInfo("Processing incremental changes for networks for datacenter [%s]!", collectorDetails.datacenter);
VSphereNetworkEnumerationHelper.handleNetworkChanges(this, segregatedOverlays.networks, enumerationProgress, client);
logInfo("Processing incremental changes for Datastores for datacenter [%s]!", collectorDetails.datacenter);
VsphereDatastoreEnumerationHelper.handleDatastoreChanges(this, segregatedOverlays.datastores, enumerationProgress);
logInfo("Processing incremental changes for compute resource for datacenter [%s]!", collectorDetails.datacenter);
VsphereComputeResourceEnumerationHelper.handleComputeResourceChanges(this, segregatedOverlays.clusters, enumerationProgress, client);
logInfo("Processing incremental changes for host system for datacenter [%s]!", collectorDetails.datacenter);
VSphereHostSystemEnumerationHelper.handleHostSystemChanges(this, segregatedOverlays.hosts, enumerationProgress, client);
logInfo("Processing incremental changes for resource pool for datacenter [%s]!", collectorDetails.datacenter);
VSphereResourcePoolEnumerationHelper.handleResourcePoolChanges(this, segregatedOverlays.resourcePools, enumerationProgress, client);
}
if (!vmUpdates.isEmpty()) {
logInfo("Processing incremental changes for virtual machines for datacenter [%s]!", collectorDetails.datacenter);
VSphereVirtualMachineEnumerationHelper.handleVMChanges(this, vmUpdates, enumerationProgress, client);
}
// sync storage profiles
logInfo("Syncing storage profiles for datacenter [%s]!", collectorDetails.datacenter);
VsphereStoragePolicyEnumerationHelper.syncStorageProfiles(this, client, enumerationProgress);
}
mgr.patchTask(TaskStage.FINISHED);
} catch (Exception exception) {
String msg = "Error processing PropertyCollector results during incremental retrieval";
logWarning(() -> msg + ": " + exception.toString());
mgr.patchTaskToFailure(exception);
// self delete service so that full enumeration kicks in next invocation.
// TODO: This is not complete. We need to enable owner selection on this service.
selfDeleteService();
return;
} finally {
vapiConnection.close();
}
});
}, mgr).sendWith(this);
}
use of com.vmware.vim25.ObjectUpdate in project photon-model by vmware.
the class VSphereIncrementalEnumerationService method collectResourcesData.
private List<ObjectUpdate> collectResourcesData(CollectorDetails collectorDetails) {
EnumerationClient.ObjectUpdateIterator resourcesIterator = new EnumerationClient.ObjectUpdateIterator(collectorDetails.resourcesPropertyCollector, this.connection.getVimPort(), collectorDetails.resourcesCollectorVersion);
List<ObjectUpdate> updates = collectUpdates(resourcesIterator);
// update the version number soon after iterating
collectorDetails.resourcesCollectorVersion = resourcesIterator.getVersion();
return updates;
}
use of com.vmware.vim25.ObjectUpdate in project photon-model by vmware.
the class VSphereVirtualMachineEnumerationHelper method handleVMChanges.
public static void handleVMChanges(VSphereIncrementalEnumerationService service, List<ObjectUpdate> resourcesUpdates, EnumerationProgress enumerationProgress, EnumerationClient client) {
List<VmOverlay> vmOverlays = new ArrayList<>();
for (ObjectUpdate objectUpdate : resourcesUpdates) {
if (VimUtils.isVirtualMachine(objectUpdate.getObj())) {
VmOverlay vm = new VmOverlay(objectUpdate);
if (vm.getInstanceUuid() != null || !objectUpdate.getKind().equals(ObjectUpdateKind.ENTER)) {
vmOverlays.add(vm);
}
}
}
for (VmOverlay vmOverlay : vmOverlays) {
if (ObjectUpdateKind.ENTER == vmOverlay.getObjectUpdateKind()) {
createNewVm(service, enumerationProgress, vmOverlay);
} else {
ComputeEnumerateResourceRequest request = enumerationProgress.getRequest();
QueryTask task = queryForVm(enumerationProgress, request.resourceLink(), null, vmOverlay.getId());
VsphereEnumerationHelper.withTaskResults(service, task, result -> {
if (!result.documentLinks.isEmpty()) {
ComputeState oldDocument = VsphereEnumerationHelper.convertOnlyResultToDocument(result, ComputeState.class);
if (ObjectUpdateKind.MODIFY == vmOverlay.getObjectUpdateKind()) {
updateVm(service, oldDocument, enumerationProgress, vmOverlay, false);
} else {
deleteVM(enumerationProgress, vmOverlay, service, oldDocument);
}
} else {
enumerationProgress.getVmTracker().arrive();
}
});
}
}
}
Aggregations