Search in sources :

Example 1 with PerHostMessages

use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.

the class VmAffinityPolicyUnit method getAcceptableHostsWithPriorities.

/**
 * Get hosts which satisfy affinity groups for a VM.
 * Returns also the number of nonmigratable VMs running on each host.
 *
 * @param onlyEnforcing - check only enforcing affinity groups
 * @return Map containing host ids and the number of nonmigratable VMs running on the host
 */
protected Map<Guid, Integer> getAcceptableHostsWithPriorities(boolean onlyEnforcing, List<VDS> hosts, VM vm, PerHostMessages messages) {
    List<AffinityGroup> affinityGroups = affinityGroupDao.getAllAffinityGroupsByVmId(vm.getId());
    // no affinity groups found for VM return all hosts
    if (affinityGroups.isEmpty()) {
        return hosts.stream().collect(Collectors.toMap(VDS::getId, h -> 0));
    }
    Set<Guid> allVmIdsPositive = new HashSet<>();
    Set<Guid> allVmIdsNegative = new HashSet<>();
    // Group by all vms in affinity groups per positive or negative
    for (AffinityGroup affinityGroup : affinityGroups) {
        if (affinityGroup.isVmAffinityEnabled() && (!onlyEnforcing || affinityGroup.isVmEnforcing())) {
            for (Guid entityId : affinityGroup.getVmIds()) {
                // Skip current VM
                if (entityId.equals(vm.getId())) {
                    continue;
                }
                if (affinityGroup.isVmPositive()) {
                    allVmIdsPositive.add(entityId);
                } else if (affinityGroup.isVmNegative()) {
                    allVmIdsNegative.add(entityId);
                }
            }
        }
    }
    // No entities, all hosts are valid
    if (allVmIdsPositive.isEmpty() && allVmIdsNegative.isEmpty()) {
        return hosts.stream().collect(Collectors.toMap(VDS::getId, h -> 0));
    }
    // Get all running VMs in cluster
    Map<Guid, VM> runningVMsMap = new HashMap<>();
    for (VM iter : vmDao.getAllRunningByCluster(vm.getClusterId())) {
        runningVMsMap.put(iter.getId(), iter);
    }
    // Update the VM list with pending VMs
    for (PendingVM resource : pendingResourceManager.pendingResources(PendingVM.class)) {
        VM pendingVm = new VM();
        pendingVm.setId(resource.getVm());
        pendingVm.setRunOnVds(resource.getHost());
        runningVMsMap.put(pendingVm.getId(), pendingVm);
    }
    Map<Guid, Integer> acceptableHosts = new HashMap<>();
    // Group all hosts for VMs with positive affinity
    for (Guid id : allVmIdsPositive) {
        VM runVm = runningVMsMap.get(id);
        if (runVm != null && runVm.getRunOnVds() != null) {
            acceptableHosts.merge(runVm.getRunOnVds(), isVmMigratable(runVm) ? 0 : 1, (a, b) -> a + b);
        }
    }
    Set<Guid> unacceptableHosts = new HashSet<>();
    // Group all hosts for VMs with negative affinity
    for (Guid id : allVmIdsNegative) {
        VM runVm = runningVMsMap.get(id);
        if (runVm != null && runVm.getRunOnVds() != null) {
            unacceptableHosts.add(runVm.getRunOnVds());
        }
    }
    Map<Guid, VDS> hostMap = new HashMap<>();
    for (VDS host : hosts) {
        hostMap.put(host.getId(), host);
    }
    // Compute the intersection of hosts with positive and negative affinity and report that
    // contradicting rules to the log
    unacceptableHosts.retainAll(acceptableHosts.keySet());
    for (Guid id : unacceptableHosts) {
        log.warn("Host '{}' ({}) belongs to both positive and negative affinity list" + " while scheduling VM '{}' ({})", hostMap.get(id).getName(), id, vm.getName(), vm.getId());
    }
    // No hosts associated with positive affinity, all hosts are applicable.
    if (acceptableHosts.isEmpty()) {
        acceptableHosts = hosts.stream().collect(Collectors.toMap(h -> h.getId(), h -> 0));
    } else if (acceptableHosts.size() > 1) {
        log.warn("Invalid affinity situation was detected while scheduling VM '{}' ({})." + " VMs belonging to the same affinity groups are running on more than one host.", vm.getName(), vm.getId());
    }
    // Report hosts that were removed because of violating the positive affinity rules
    for (VDS host : hosts) {
        if (!acceptableHosts.containsKey(host.getId())) {
            messages.addMessage(host.getId(), // TODO compute the affinity rule names
            String.format("$affinityRules %1$s", ""));
            messages.addMessage(host.getId(), EngineMessage.VAR__DETAIL__AFFINITY_FAILED_POSITIVE.toString());
        }
    }
    // Remove hosts that contain VMs with negaive affinity to the currently scheduled Vm
    for (Guid id : allVmIdsNegative) {
        VM runVm = runningVMsMap.get(id);
        if (runVm != null && runVm.getRunOnVds() != null && acceptableHosts.containsKey(runVm.getRunOnVds())) {
            acceptableHosts.remove(runVm.getRunOnVds());
            messages.addMessage(runVm.getRunOnVds(), // TODO compute the affinity rule names
            String.format("$affinityRules %1$s", ""));
            messages.addMessage(runVm.getRunOnVds(), EngineMessage.VAR__DETAIL__AFFINITY_FAILED_NEGATIVE.toString());
        }
    }
    return acceptableHosts;
}
Also used : Logger(org.slf4j.Logger) PolicyUnitImpl(org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl) PerHostMessages(org.ovirt.engine.core.common.scheduling.PerHostMessages) Guid(org.ovirt.engine.core.compat.Guid) EngineMessage(org.ovirt.engine.core.common.errors.EngineMessage) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) HashMap(java.util.HashMap) Collectors(java.util.stream.Collectors) VmDao(org.ovirt.engine.core.dao.VmDao) AffinityGroup(org.ovirt.engine.core.common.scheduling.AffinityGroup) HashSet(java.util.HashSet) Inject(javax.inject.Inject) List(java.util.List) VM(org.ovirt.engine.core.common.businessentities.VM) PendingVM(org.ovirt.engine.core.bll.scheduling.pending.PendingVM) PolicyUnit(org.ovirt.engine.core.common.scheduling.PolicyUnit) PendingResourceManager(org.ovirt.engine.core.bll.scheduling.pending.PendingResourceManager) Map(java.util.Map) AffinityGroupDao(org.ovirt.engine.core.dao.scheduling.AffinityGroupDao) MigrationSupport(org.ovirt.engine.core.common.businessentities.MigrationSupport) VDS(org.ovirt.engine.core.common.businessentities.VDS) VDS(org.ovirt.engine.core.common.businessentities.VDS) HashMap(java.util.HashMap) PendingVM(org.ovirt.engine.core.bll.scheduling.pending.PendingVM) Guid(org.ovirt.engine.core.compat.Guid) AffinityGroup(org.ovirt.engine.core.common.scheduling.AffinityGroup) VM(org.ovirt.engine.core.common.businessentities.VM) PendingVM(org.ovirt.engine.core.bll.scheduling.pending.PendingVM) HashSet(java.util.HashSet)

Example 2 with PerHostMessages

use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.

the class VmAffinityWeightPolicyUnit method score.

@Override
public List<Pair<Guid, Integer>> score(Cluster cluster, List<VDS> hosts, VM vm, Map<String, String> parameters) {
    Map<Guid, Integer> acceptableHosts = getAcceptableHostsWithPriorities(false, hosts, vm, new PerHostMessages());
    int maxNonmigratableVms = acceptableHosts.values().stream().reduce(0, Integer::max);
    List<Pair<Guid, Integer>> retList = new ArrayList<>();
    for (VDS host : hosts) {
        int score = acceptableHosts.containsKey(host.getId()) ? DEFAULT_SCORE + maxNonmigratableVms - acceptableHosts.get(host.getId()) : MaxSchedulerWeight;
        retList.add(new Pair<>(host.getId(), score));
    }
    return retList;
}
Also used : VDS(org.ovirt.engine.core.common.businessentities.VDS) ArrayList(java.util.ArrayList) Guid(org.ovirt.engine.core.compat.Guid) PerHostMessages(org.ovirt.engine.core.common.scheduling.PerHostMessages) Pair(org.ovirt.engine.core.common.utils.Pair)

Example 3 with PerHostMessages

use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.

the class VmToHostAffinityPolicyUnit method getHostViolationCount.

/**
 * Calculate and return the number of affinity group violations
 * for each host that would happen if the vm was started on it.
 *
 * @param enforcing true - hard constraint / false - soft constraint
 * @param hosts     list of available hosts in the cluster
 * @param vm        current vm targeted for migration
 * @param messages  log messages
 * @return map of hosts with affinity group violations count per host id.
 */
public Map<Guid, Integer> getHostViolationCount(boolean enforcing, List<VDS> hosts, VM vm, PerHostMessages messages) {
    Map<Guid, Integer> hostViolations = new HashMap<>();
    List<AffinityGroup> affinityGroups = affinityGroupDao.getAllAffinityGroupsByVmId(vm.getId());
    // no affinity groups found for VM return all hosts with no violations
    if (affinityGroups.isEmpty()) {
        return hostViolations;
    }
    Map<Guid, VDS> hostMap = hosts.stream().collect(Collectors.toMap(VDS::getId, h -> h));
    for (AffinityGroup affinityGroup : affinityGroups) {
        if (affinityGroup.isVdsEnforcing() == enforcing) {
            List<Guid> vdsIds = affinityGroup.getVdsIds();
            if (affinityGroup.isVdsPositive()) {
                // log and score hosts that violate the positive affinity rules
                hostMap.keySet().stream().filter(host_id -> !vdsIds.contains(host_id)).forEach(id -> {
                    // TODO compute the affinity rule names
                    messages.addMessage(id, String.format("$affinityRules %1$s", ""));
                    messages.addMessage(id, EngineMessage.VAR__DETAIL__AFFINITY_FAILED_POSITIVE.toString());
                    hostViolations.put(id, 1 + hostViolations.getOrDefault(id, INITIAL_HOST_SCORE));
                });
            } else {
                // log and score hosts that violate the negative affinity rules
                hostMap.keySet().stream().filter(host_id -> vdsIds.contains(host_id)).forEach(id -> {
                    // TODO compute the affinity rule names
                    messages.addMessage(id, String.format("$affinityRules %1$s", ""));
                    messages.addMessage(id, EngineMessage.VAR__DETAIL__AFFINITY_FAILED_NEGATIVE.toString());
                    hostViolations.put(id, 1 + hostViolations.getOrDefault(id, INITIAL_HOST_SCORE));
                });
            }
        }
    }
    return hostViolations;
}
Also used : Logger(org.slf4j.Logger) PolicyUnitImpl(org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl) PerHostMessages(org.ovirt.engine.core.common.scheduling.PerHostMessages) Guid(org.ovirt.engine.core.compat.Guid) EngineMessage(org.ovirt.engine.core.common.errors.EngineMessage) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) Collectors(java.util.stream.Collectors) AffinityGroup(org.ovirt.engine.core.common.scheduling.AffinityGroup) Inject(javax.inject.Inject) List(java.util.List) VM(org.ovirt.engine.core.common.businessentities.VM) PolicyUnit(org.ovirt.engine.core.common.scheduling.PolicyUnit) PendingResourceManager(org.ovirt.engine.core.bll.scheduling.pending.PendingResourceManager) Map(java.util.Map) AffinityGroupDao(org.ovirt.engine.core.dao.scheduling.AffinityGroupDao) VDS(org.ovirt.engine.core.common.businessentities.VDS) VDS(org.ovirt.engine.core.common.businessentities.VDS) HashMap(java.util.HashMap) Guid(org.ovirt.engine.core.compat.Guid) AffinityGroup(org.ovirt.engine.core.common.scheduling.AffinityGroup)

Example 4 with PerHostMessages

use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.

the class VmToHostAffinityWeightPolicyUnit method score.

@Override
public List<Pair<Guid, Integer>> score(Cluster cluster, List<VDS> hosts, VM vm, Map<String, String> parameters) {
    Map<Guid, Integer> hostViolations = getHostViolationCount(false, hosts, vm, new PerHostMessages());
    List<Pair<Guid, Integer>> retList = new ArrayList<>();
    int score;
    for (VDS host : hosts) {
        score = hostViolations.containsKey(host.getId()) ? hostViolations.get(host.getId()) : DEFAULT_SCORE;
        retList.add(new Pair<>(host.getId(), score));
    }
    return retList;
}
Also used : VDS(org.ovirt.engine.core.common.businessentities.VDS) ArrayList(java.util.ArrayList) Guid(org.ovirt.engine.core.compat.Guid) PerHostMessages(org.ovirt.engine.core.common.scheduling.PerHostMessages) Pair(org.ovirt.engine.core.common.utils.Pair)

Example 5 with PerHostMessages

use of org.ovirt.engine.core.common.scheduling.PerHostMessages in project ovirt-engine by oVirt.

the class VmAffinityFilterPolicyUnitTest method testPositiveAffinity.

@Test
public void testPositiveAffinity() {
    List<VDS> hosts = Arrays.asList(host1, host2);
    VM vm1 = createVmRunning(host2);
    VM vm2 = createVmRunning(host2);
    affinityGroups.add(createAffinityGroup(cluster, EntityAffinityRule.POSITIVE, true, vm1, vm2, newVm));
    assertThat(policyUnit.filter(cluster, hosts, newVm, null, new PerHostMessages())).containsOnly(host2);
}
Also used : VDS(org.ovirt.engine.core.common.businessentities.VDS) VM(org.ovirt.engine.core.common.businessentities.VM) PerHostMessages(org.ovirt.engine.core.common.scheduling.PerHostMessages) Test(org.junit.Test)

Aggregations

PerHostMessages (org.ovirt.engine.core.common.scheduling.PerHostMessages)31 Test (org.junit.Test)24 VDS (org.ovirt.engine.core.common.businessentities.VDS)24 VM (org.ovirt.engine.core.common.businessentities.VM)9 HugePage (org.ovirt.engine.core.common.businessentities.HugePage)8 HashMap (java.util.HashMap)5 AffinityGroup (org.ovirt.engine.core.common.scheduling.AffinityGroup)5 ArrayList (java.util.ArrayList)4 PendingHugePages (org.ovirt.engine.core.bll.scheduling.pending.PendingHugePages)4 Guid (org.ovirt.engine.core.compat.Guid)4 List (java.util.List)3 Map (java.util.Map)3 PolicyUnitImpl (org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl)3 PendingResourceManager (org.ovirt.engine.core.bll.scheduling.pending.PendingResourceManager)3 Cluster (org.ovirt.engine.core.common.businessentities.Cluster)3 Label (org.ovirt.engine.core.common.businessentities.Label)3 LabelBuilder (org.ovirt.engine.core.common.businessentities.LabelBuilder)3 EngineMessage (org.ovirt.engine.core.common.errors.EngineMessage)3 PolicyUnit (org.ovirt.engine.core.common.scheduling.PolicyUnit)3 Logger (org.slf4j.Logger)3