use of org.ovirt.engine.core.bll.scheduling.external.BalanceResult in project ovirt-engine by oVirt.
the class HaReservationBalancePolicyUnit method balance.
@Override
public Optional<BalanceResult> balance(Cluster cluster, List<VDS> hosts, Map<String, String> parameters, ArrayList<String> messages) {
Objects.requireNonNull(hosts);
Objects.requireNonNull(cluster);
log.debug("Started HA reservation balancing method for cluster '{}'", cluster.getName());
if (!cluster.supportsHaReservation()) {
return Optional.empty();
}
if (hosts.size() < 2) {
log.debug("No balancing for cluster '{}', contains only {} host(s)", cluster.getName(), hosts.size());
return Optional.empty();
}
int haVmsInCluster = 0;
Map<Guid, List<VM>> hostId2HaVmMapping = HaReservationHandling.mapHaVmToHostByCluster(cluster.getId());
haVmsInCluster = countHaVmsInCluster(hostId2HaVmMapping);
int optimalHaDistribution = (int) Math.ceil((double) haVmsInCluster / hosts.size());
int overUtilizationParam = DEFAULT_OVER_UTILIZATION_VALUE;
if (parameters.get("OverUtilization") != null) {
overUtilizationParam = NumberUtils.toInt(parameters.get("OverUtilization"));
} else {
overUtilizationParam = Config.<Integer>getValue(ConfigValues.OverUtilizationForHaReservation);
}
log.debug("optimalHaDistribution value: {}", optimalHaDistribution);
int overUtilizationThreshold = (int) Math.ceil(optimalHaDistribution * (overUtilizationParam / 100.0));
log.debug("overUtilizationThreshold value: {}", overUtilizationThreshold);
List<VDS> overUtilizedHosts = getHostUtilizedByCondition(hosts, hostId2HaVmMapping, overUtilizationThreshold, Condition.MORE_THAN);
if (overUtilizedHosts.isEmpty()) {
log.debug("No over utilized hosts for cluster '{}'", cluster.getName());
return Optional.empty();
}
List<VDS> underUtilizedHosts = getHostUtilizedByCondition(hosts, hostId2HaVmMapping, overUtilizationParam, Condition.LESS_THAN);
if (underUtilizedHosts.size() == 0) {
log.debug("No under utilized hosts for cluster '{}'", cluster.getName());
return Optional.empty();
}
// Get random host from the over utilized hosts
VDS randomHost = overUtilizedHosts.get(new Random().nextInt(overUtilizedHosts.size()));
List<VM> migrableVmsOnRandomHost = getMigrableVmsRunningOnVds(randomHost.getId(), hostId2HaVmMapping);
if (migrableVmsOnRandomHost.isEmpty()) {
log.debug("No migratable hosts were found for cluster '{}'", cluster.getName());
return Optional.empty();
}
// Get random vm to migrate
VM vm = migrableVmsOnRandomHost.get(new Random().nextInt(migrableVmsOnRandomHost.size()));
log.info("VM to be migrated '{}'", vm.getName());
List<Guid> underUtilizedHostsKeys = new ArrayList<>();
for (VDS vds : underUtilizedHosts) {
underUtilizedHostsKeys.add(vds.getId());
}
return Optional.of(new BalanceResult(vm.getId(), underUtilizedHostsKeys));
}
use of org.ovirt.engine.core.bll.scheduling.external.BalanceResult in project ovirt-engine by oVirt.
the class SchedulingManager method externalRunBalance.
private Optional<BalanceResult> externalRunBalance(PolicyUnitImpl policyUnit, Cluster cluster, List<VDS> hosts) {
List<Guid> hostIDs = new ArrayList<>();
for (VDS vds : hosts) {
hostIDs.add(vds.getId());
}
Optional<BalanceResult> balanceResult = externalBroker.runBalance(policyUnit.getPolicyUnit().getName(), hostIDs, cluster.getClusterPolicyProperties());
if (balanceResult.isPresent()) {
return balanceResult;
}
log.warn("All external schedulers returned empty balancing result.");
return Optional.empty();
}
use of org.ovirt.engine.core.bll.scheduling.external.BalanceResult in project ovirt-engine by oVirt.
the class SchedulingManager method performLoadBalancingImpl.
private void performLoadBalancingImpl() {
log.debug("Load Balancer timer entered.");
List<Cluster> clusters = clusterDao.getAll();
for (Cluster cluster : clusters) {
ClusterPolicy policy = policyMap.get(cluster.getClusterPolicyId());
PolicyUnitImpl policyUnit = policyUnits.get(policy.getBalance());
Optional<BalanceResult> balanceResult = Optional.empty();
if (policyUnit.getPolicyUnit().isEnabled()) {
List<VDS> hosts = vdsDao.getAllForClusterWithoutMigrating(cluster.getId());
if (policyUnit.getPolicyUnit().isInternal()) {
balanceResult = internalRunBalance(policyUnit, cluster, hosts);
} else if (Config.<Boolean>getValue(ConfigValues.ExternalSchedulerEnabled)) {
balanceResult = externalRunBalance(policyUnit, cluster, hosts);
}
}
if (balanceResult.isPresent() && balanceResult.get().isValid()) {
migrationHandler.migrateVM(balanceResult.get().getCandidateHosts(), balanceResult.get().getVmToMigrate(), MessageBundler.getMessage(AuditLogType.MIGRATION_REASON_LOAD_BALANCING));
}
}
}
use of org.ovirt.engine.core.bll.scheduling.external.BalanceResult in project ovirt-engine by oVirt.
the class CpuAndMemoryBalancingPolicyUnit method balance.
@Override
public Optional<BalanceResult> balance(final Cluster cluster, List<VDS> hosts, Map<String, String> parameters, ArrayList<String> messages) {
Objects.requireNonNull(hosts);
Objects.requireNonNull(cluster);
if (hosts.size() < 2) {
log.debug("No balancing for cluster '{}', contains only {} host(s)", cluster.getName(), hosts.size());
return Optional.empty();
}
final List<VDS> overUtilizedPrimaryHosts = getPrimarySources(cluster, hosts, parameters);
final List<VDS> overUtilizedSecondaryHosts = getSecondarySources(cluster, hosts, parameters);
// if there aren't any overutilized hosts, then there is nothing to balance...
if ((overUtilizedPrimaryHosts == null || overUtilizedPrimaryHosts.size() == 0) && (overUtilizedSecondaryHosts == null || overUtilizedSecondaryHosts.size() == 0)) {
log.debug("There is no over-utilized host in cluster '{}'", cluster.getName());
return Optional.empty();
}
FindVmAndDestinations findVmAndDestinations = getFindVmAndDestinations(cluster, parameters);
Optional<BalanceResult> result = Optional.empty();
// try balancing based on CPU first
if (overUtilizedPrimaryHosts != null && overUtilizedPrimaryHosts.size() > 0) {
// returns hosts with utilization lower than the specified threshold
List<VDS> underUtilizedHosts = getPrimaryDestinations(cluster, hosts, parameters);
/* if no host has a spare power, then there is nothing we can do to balance it here, try
the secondary aporoach */
if (underUtilizedHosts == null || underUtilizedHosts.size() == 0) {
log.warn("All candidate hosts have been filtered, can't balance the cluster '{}'" + " based on the CPU usage, will try memory based approach", cluster.getName());
} else {
result = getBalance(findVmAndDestinations, overUtilizedPrimaryHosts, underUtilizedHosts);
}
}
// if it is not possible (or necessary) to balance based on CPU, try with memory
if (!result.isPresent() && (overUtilizedSecondaryHosts != null && overUtilizedSecondaryHosts.size() > 0)) {
// returns hosts with more free memory than the specified threshold
List<VDS> underUtilizedHosts = getSecondaryDestinations(cluster, hosts, parameters);
// if no host has memory to spare, then there is nothing we can do to balance it..
if (underUtilizedHosts == null || underUtilizedHosts.size() == 0) {
log.warn("All candidate hosts have been filtered, can't balance the cluster '{}'" + " using memory based approach", cluster.getName());
return Optional.empty();
}
result = getBalance(findVmAndDestinations, overUtilizedSecondaryHosts, underUtilizedHosts);
}
// add the current host, it is possible it is the best host after all,
// because the balancer does not know about affinity for example
Optional<BalanceResult> finalResult = result;
result.map(BalanceResult::getCurrentHost).filter(Objects::nonNull).ifPresent(h -> finalResult.ifPresent(res -> res.getCandidateHosts().add(h)));
return result;
}
Aggregations