use of org.ovirt.engine.core.bll.scheduling.pending.PendingVM in project ovirt-engine by oVirt.
the class VmAffinityPolicyUnit method getAcceptableHostsWithPriorities.
/**
* Get hosts which satisfy affinity groups for a VM.
* Returns also the number of nonmigratable VMs running on each host.
*
* @param onlyEnforcing - check only enforcing affinity groups
* @return Map containing host ids and the number of nonmigratable VMs running on the host
*/
protected Map<Guid, Integer> getAcceptableHostsWithPriorities(boolean onlyEnforcing, List<VDS> hosts, VM vm, PerHostMessages messages) {
List<AffinityGroup> affinityGroups = affinityGroupDao.getAllAffinityGroupsByVmId(vm.getId());
// no affinity groups found for VM return all hosts
if (affinityGroups.isEmpty()) {
return hosts.stream().collect(Collectors.toMap(VDS::getId, h -> 0));
}
Set<Guid> allVmIdsPositive = new HashSet<>();
Set<Guid> allVmIdsNegative = new HashSet<>();
// Group by all vms in affinity groups per positive or negative
for (AffinityGroup affinityGroup : affinityGroups) {
if (affinityGroup.isVmAffinityEnabled() && (!onlyEnforcing || affinityGroup.isVmEnforcing())) {
for (Guid entityId : affinityGroup.getVmIds()) {
// Skip current VM
if (entityId.equals(vm.getId())) {
continue;
}
if (affinityGroup.isVmPositive()) {
allVmIdsPositive.add(entityId);
} else if (affinityGroup.isVmNegative()) {
allVmIdsNegative.add(entityId);
}
}
}
}
// No entities, all hosts are valid
if (allVmIdsPositive.isEmpty() && allVmIdsNegative.isEmpty()) {
return hosts.stream().collect(Collectors.toMap(VDS::getId, h -> 0));
}
// Get all running VMs in cluster
Map<Guid, VM> runningVMsMap = new HashMap<>();
for (VM iter : vmDao.getAllRunningByCluster(vm.getClusterId())) {
runningVMsMap.put(iter.getId(), iter);
}
// Update the VM list with pending VMs
for (PendingVM resource : pendingResourceManager.pendingResources(PendingVM.class)) {
VM pendingVm = new VM();
pendingVm.setId(resource.getVm());
pendingVm.setRunOnVds(resource.getHost());
runningVMsMap.put(pendingVm.getId(), pendingVm);
}
Map<Guid, Integer> acceptableHosts = new HashMap<>();
// Group all hosts for VMs with positive affinity
for (Guid id : allVmIdsPositive) {
VM runVm = runningVMsMap.get(id);
if (runVm != null && runVm.getRunOnVds() != null) {
acceptableHosts.merge(runVm.getRunOnVds(), isVmMigratable(runVm) ? 0 : 1, (a, b) -> a + b);
}
}
Set<Guid> unacceptableHosts = new HashSet<>();
// Group all hosts for VMs with negative affinity
for (Guid id : allVmIdsNegative) {
VM runVm = runningVMsMap.get(id);
if (runVm != null && runVm.getRunOnVds() != null) {
unacceptableHosts.add(runVm.getRunOnVds());
}
}
Map<Guid, VDS> hostMap = new HashMap<>();
for (VDS host : hosts) {
hostMap.put(host.getId(), host);
}
// Compute the intersection of hosts with positive and negative affinity and report that
// contradicting rules to the log
unacceptableHosts.retainAll(acceptableHosts.keySet());
for (Guid id : unacceptableHosts) {
log.warn("Host '{}' ({}) belongs to both positive and negative affinity list" + " while scheduling VM '{}' ({})", hostMap.get(id).getName(), id, vm.getName(), vm.getId());
}
// No hosts associated with positive affinity, all hosts are applicable.
if (acceptableHosts.isEmpty()) {
acceptableHosts = hosts.stream().collect(Collectors.toMap(h -> h.getId(), h -> 0));
} else if (acceptableHosts.size() > 1) {
log.warn("Invalid affinity situation was detected while scheduling VM '{}' ({})." + " VMs belonging to the same affinity groups are running on more than one host.", vm.getName(), vm.getId());
}
// Report hosts that were removed because of violating the positive affinity rules
for (VDS host : hosts) {
if (!acceptableHosts.containsKey(host.getId())) {
messages.addMessage(host.getId(), // TODO compute the affinity rule names
String.format("$affinityRules %1$s", ""));
messages.addMessage(host.getId(), EngineMessage.VAR__DETAIL__AFFINITY_FAILED_POSITIVE.toString());
}
}
// Remove hosts that contain VMs with negaive affinity to the currently scheduled Vm
for (Guid id : allVmIdsNegative) {
VM runVm = runningVMsMap.get(id);
if (runVm != null && runVm.getRunOnVds() != null && acceptableHosts.containsKey(runVm.getRunOnVds())) {
acceptableHosts.remove(runVm.getRunOnVds());
messages.addMessage(runVm.getRunOnVds(), // TODO compute the affinity rule names
String.format("$affinityRules %1$s", ""));
messages.addMessage(runVm.getRunOnVds(), EngineMessage.VAR__DETAIL__AFFINITY_FAILED_NEGATIVE.toString());
}
}
return acceptableHosts;
}
use of org.ovirt.engine.core.bll.scheduling.pending.PendingVM in project ovirt-engine by oVirt.
the class SchedulingManager method schedule.
public Optional<Guid> schedule(Cluster cluster, VM vm, List<Guid> hostBlackList, List<Guid> hostWhiteList, List<Guid> destHostIdList, List<String> messages, RunVmDelayer runVmDelayer, String correlationId) {
prepareClusterLock(cluster.getId());
try {
log.debug("Scheduling started, correlation Id: {}", correlationId);
checkAllowOverbooking(cluster);
lockCluster(cluster.getId());
List<VDS> vdsList = vdsDao.getAllForClusterWithStatus(cluster.getId(), VDSStatus.Up);
vdsList = removeBlacklistedHosts(vdsList, hostBlackList);
vdsList = keepOnlyWhitelistedHosts(vdsList, hostWhiteList);
refreshCachedPendingValues(vdsList);
ClusterPolicy policy = policyMap.get(cluster.getClusterPolicyId());
Map<String, String> parameters = createClusterPolicyParameters(cluster);
vdsList = runFilters(policy.getFilters(), cluster, vdsList, vm, parameters, policy.getFilterPositionMap(), messages, runVmDelayer, true, correlationId);
if (vdsList.isEmpty()) {
return Optional.empty();
}
Optional<Guid> bestHost = selectBestHost(cluster, vm, destHostIdList, vdsList, policy, parameters);
if (bestHost.isPresent() && !bestHost.get().equals(vm.getRunOnVds())) {
Guid bestHostId = bestHost.get();
getPendingResourceManager().addPending(new PendingCpuCores(bestHostId, vm, vm.getNumOfCpus()));
getPendingResourceManager().addPending(new PendingMemory(bestHostId, vm, vmOverheadCalculator.getStaticOverheadInMb(vm)));
getPendingResourceManager().addPending(new PendingOvercommitMemory(bestHostId, vm, vmOverheadCalculator.getTotalRequiredMemoryInMb(vm)));
getPendingResourceManager().addPending(new PendingVM(bestHostId, vm));
// Add pending records for all specified hugepage sizes
for (Map.Entry<Integer, Integer> hugepage : HugePageUtils.getHugePages(vm.getStaticData()).entrySet()) {
getPendingResourceManager().addPending(new PendingHugePages(bestHostId, vm, hugepage.getKey(), hugepage.getValue()));
}
getPendingResourceManager().notifyHostManagers(bestHostId);
markVfsAsUsedByVm(vm, bestHostId);
}
return bestHost;
} catch (InterruptedException e) {
log.error("scheduling interrupted, correlation Id: {}: {}", correlationId, e.getMessage());
log.debug("Exception: ", e);
return Optional.empty();
} finally {
releaseCluster(cluster.getId());
log.debug("Scheduling ended, correlation Id: {}", correlationId);
}
}
Aggregations