use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.
the class VmAffinityPolicyUnit method getAcceptableHostsWithPriorities.
/**
* Get hosts which satisfy affinity groups for a VM.
* Returns also the number of nonmigratable VMs running on each host.
*
* @param onlyEnforcing - check only enforcing affinity groups
* @return Map containing host ids and the number of nonmigratable VMs running on the host
*/
protected Map<Guid, Integer> getAcceptableHostsWithPriorities(boolean onlyEnforcing, List<VDS> hosts, VM vm, PerHostMessages messages) {
List<AffinityGroup> affinityGroups = affinityGroupDao.getAllAffinityGroupsByVmId(vm.getId());
// no affinity groups found for VM return all hosts
if (affinityGroups.isEmpty()) {
return hosts.stream().collect(Collectors.toMap(VDS::getId, h -> 0));
}
Set<Guid> allVmIdsPositive = new HashSet<>();
Set<Guid> allVmIdsNegative = new HashSet<>();
// Group by all vms in affinity groups per positive or negative
for (AffinityGroup affinityGroup : affinityGroups) {
if (affinityGroup.isVmAffinityEnabled() && (!onlyEnforcing || affinityGroup.isVmEnforcing())) {
for (Guid entityId : affinityGroup.getVmIds()) {
// Skip current VM
if (entityId.equals(vm.getId())) {
continue;
}
if (affinityGroup.isVmPositive()) {
allVmIdsPositive.add(entityId);
} else if (affinityGroup.isVmNegative()) {
allVmIdsNegative.add(entityId);
}
}
}
}
// No entities, all hosts are valid
if (allVmIdsPositive.isEmpty() && allVmIdsNegative.isEmpty()) {
return hosts.stream().collect(Collectors.toMap(VDS::getId, h -> 0));
}
// Get all running VMs in cluster
Map<Guid, VM> runningVMsMap = new HashMap<>();
for (VM iter : vmDao.getAllRunningByCluster(vm.getClusterId())) {
runningVMsMap.put(iter.getId(), iter);
}
// Update the VM list with pending VMs
for (PendingVM resource : pendingResourceManager.pendingResources(PendingVM.class)) {
VM pendingVm = new VM();
pendingVm.setId(resource.getVm());
pendingVm.setRunOnVds(resource.getHost());
runningVMsMap.put(pendingVm.getId(), pendingVm);
}
Map<Guid, Integer> acceptableHosts = new HashMap<>();
// Group all hosts for VMs with positive affinity
for (Guid id : allVmIdsPositive) {
VM runVm = runningVMsMap.get(id);
if (runVm != null && runVm.getRunOnVds() != null) {
acceptableHosts.merge(runVm.getRunOnVds(), isVmMigratable(runVm) ? 0 : 1, (a, b) -> a + b);
}
}
Set<Guid> unacceptableHosts = new HashSet<>();
// Group all hosts for VMs with negative affinity
for (Guid id : allVmIdsNegative) {
VM runVm = runningVMsMap.get(id);
if (runVm != null && runVm.getRunOnVds() != null) {
unacceptableHosts.add(runVm.getRunOnVds());
}
}
Map<Guid, VDS> hostMap = new HashMap<>();
for (VDS host : hosts) {
hostMap.put(host.getId(), host);
}
// Compute the intersection of hosts with positive and negative affinity and report that
// contradicting rules to the log
unacceptableHosts.retainAll(acceptableHosts.keySet());
for (Guid id : unacceptableHosts) {
log.warn("Host '{}' ({}) belongs to both positive and negative affinity list" + " while scheduling VM '{}' ({})", hostMap.get(id).getName(), id, vm.getName(), vm.getId());
}
// No hosts associated with positive affinity, all hosts are applicable.
if (acceptableHosts.isEmpty()) {
acceptableHosts = hosts.stream().collect(Collectors.toMap(h -> h.getId(), h -> 0));
} else if (acceptableHosts.size() > 1) {
log.warn("Invalid affinity situation was detected while scheduling VM '{}' ({})." + " VMs belonging to the same affinity groups are running on more than one host.", vm.getName(), vm.getId());
}
// Report hosts that were removed because of violating the positive affinity rules
for (VDS host : hosts) {
if (!acceptableHosts.containsKey(host.getId())) {
messages.addMessage(host.getId(), // TODO compute the affinity rule names
String.format("$affinityRules %1$s", ""));
messages.addMessage(host.getId(), EngineMessage.VAR__DETAIL__AFFINITY_FAILED_POSITIVE.toString());
}
}
// Remove hosts that contain VMs with negaive affinity to the currently scheduled Vm
for (Guid id : allVmIdsNegative) {
VM runVm = runningVMsMap.get(id);
if (runVm != null && runVm.getRunOnVds() != null && acceptableHosts.containsKey(runVm.getRunOnVds())) {
acceptableHosts.remove(runVm.getRunOnVds());
messages.addMessage(runVm.getRunOnVds(), // TODO compute the affinity rule names
String.format("$affinityRules %1$s", ""));
messages.addMessage(runVm.getRunOnVds(), EngineMessage.VAR__DETAIL__AFFINITY_FAILED_NEGATIVE.toString());
}
}
return acceptableHosts;
}
use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.
the class VmAffinityWeightPolicyUnit method score.
@Override
public List<Pair<Guid, Integer>> score(Cluster cluster, List<VDS> hosts, VM vm, Map<String, String> parameters) {
Map<Guid, Integer> acceptableHosts = getAcceptableHostsWithPriorities(false, hosts, vm, new PerHostMessages());
int maxNonmigratableVms = acceptableHosts.values().stream().reduce(0, Integer::max);
List<Pair<Guid, Integer>> retList = new ArrayList<>();
for (VDS host : hosts) {
int score = acceptableHosts.containsKey(host.getId()) ? DEFAULT_SCORE + maxNonmigratableVms - acceptableHosts.get(host.getId()) : MaxSchedulerWeight;
retList.add(new Pair<>(host.getId(), score));
}
return retList;
}
use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.
the class VmToHostAffinityPolicyUnit method getHostViolationCount.
/**
* Calculate and return the number of affinity group violations
* for each host that would happen if the vm was started on it.
*
* @param enforcing true - hard constraint / false - soft constraint
* @param hosts list of available hosts in the cluster
* @param vm current vm targeted for migration
* @param messages log messages
* @return map of hosts with affinity group violations count per host id.
*/
public Map<Guid, Integer> getHostViolationCount(boolean enforcing, List<VDS> hosts, VM vm, PerHostMessages messages) {
Map<Guid, Integer> hostViolations = new HashMap<>();
List<AffinityGroup> affinityGroups = affinityGroupDao.getAllAffinityGroupsByVmId(vm.getId());
// no affinity groups found for VM return all hosts with no violations
if (affinityGroups.isEmpty()) {
return hostViolations;
}
Map<Guid, VDS> hostMap = hosts.stream().collect(Collectors.toMap(VDS::getId, h -> h));
for (AffinityGroup affinityGroup : affinityGroups) {
if (affinityGroup.isVdsEnforcing() == enforcing) {
List<Guid> vdsIds = affinityGroup.getVdsIds();
if (affinityGroup.isVdsPositive()) {
// log and score hosts that violate the positive affinity rules
hostMap.keySet().stream().filter(host_id -> !vdsIds.contains(host_id)).forEach(id -> {
// TODO compute the affinity rule names
messages.addMessage(id, String.format("$affinityRules %1$s", ""));
messages.addMessage(id, EngineMessage.VAR__DETAIL__AFFINITY_FAILED_POSITIVE.toString());
hostViolations.put(id, 1 + hostViolations.getOrDefault(id, INITIAL_HOST_SCORE));
});
} else {
// log and score hosts that violate the negative affinity rules
hostMap.keySet().stream().filter(host_id -> vdsIds.contains(host_id)).forEach(id -> {
// TODO compute the affinity rule names
messages.addMessage(id, String.format("$affinityRules %1$s", ""));
messages.addMessage(id, EngineMessage.VAR__DETAIL__AFFINITY_FAILED_NEGATIVE.toString());
hostViolations.put(id, 1 + hostViolations.getOrDefault(id, INITIAL_HOST_SCORE));
});
}
}
}
return hostViolations;
}
use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.
the class VmToHostAffinityWeightPolicyUnit method score.
@Override
public List<Pair<Guid, Integer>> score(Cluster cluster, List<VDS> hosts, VM vm, Map<String, String> parameters) {
Map<Guid, Integer> hostViolations = getHostViolationCount(false, hosts, vm, new PerHostMessages());
List<Pair<Guid, Integer>> retList = new ArrayList<>();
int score;
for (VDS host : hosts) {
score = hostViolations.containsKey(host.getId()) ? hostViolations.get(host.getId()) : DEFAULT_SCORE;
retList.add(new Pair<>(host.getId(), score));
}
return retList;
}
use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.
the class VmAffinityFilterPolicyUnitTest method testPositiveAffinity.
@Test
public void testPositiveAffinity() {
List<VDS> hosts = Arrays.asList(host1, host2);
VM vm1 = createVmRunning(host2);
VM vm2 = createVmRunning(host2);
affinityGroups.add(createAffinityGroup(cluster, EntityAffinityRule.POSITIVE, true, vm1, vm2, newVm));
assertThat(policyUnit.filter(cluster, hosts, newVm, null, new PerHostMessages())).containsOnly(host2);
}
Aggregations