use of org.apache.cloudstack.affinity.AffinityGroupProcessor in project cloudstack by apache.
the class ManagementServerImpl method listHostsForMigrationOfVM.
@Override
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final Long vmId, final Long startIndex, final Long pageSize, final String keyword) {
final Account caller = getCaller();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the VM");
}
throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!");
}
final VMInstanceVO vm = _vmInstanceDao.findById(vmId);
if (vm == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with given id");
throw ex;
}
if (vm.getState() != State.Running) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is not running, cannot migrate the vm" + vm);
}
final InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported");
// Return empty list.
return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>(new Pair<List<? extends Host>, Integer>(new ArrayList<HostVO>(), new Integer(0)), new ArrayList<Host>(), new HashMap<Host, Boolean>());
}
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) && !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator) && !vm.getHypervisorType().equals(HypervisorType.Ovm3)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM.");
}
throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " + "XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
}
if (vm.getType().equals(VirtualMachine.Type.User) && vm.getHypervisorType().equals(HypervisorType.LXC)) {
throw new InvalidParameterValueException("Unsupported Hypervisor Type for User VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
}
final long srcHostId = vm.getHostId();
final Host srcHost = _hostDao.findById(srcHostId);
if (srcHost == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find the host with id: " + srcHostId + " of this VM:" + vm);
}
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the host (with specified id) of VM with specified id");
ex.addProxyObject(String.valueOf(srcHostId), "hostId");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
// Check if the vm can be migrated with storage.
boolean canMigrateWithStorage = false;
if (vm.getType() == VirtualMachine.Type.User) {
final HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
if (capabilities != null) {
canMigrateWithStorage = capabilities.isStorageMotionSupported();
}
}
// Check if the vm is using any disks on local storage.
final VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null);
final List<VolumeVO> volumes = _volumeDao.findCreatedByInstance(vmProfile.getId());
boolean usesLocal = false;
for (final VolumeVO volume : volumes) {
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, vmProfile.getHypervisorType());
if (diskProfile.useLocalStorage()) {
usesLocal = true;
break;
}
}
if (!canMigrateWithStorage && usesLocal) {
throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate");
}
final Type hostType = srcHost.getType();
Pair<List<HostVO>, Integer> allHostsPair = null;
List<HostVO> allHosts = null;
final Map<Host, Boolean> requiresStorageMotion = new HashMap<Host, Boolean>();
DataCenterDeployment plan = null;
if (canMigrateWithStorage) {
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, null, null, srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
allHosts = allHostsPair.first();
allHosts.remove(srcHost);
for (final VolumeVO volume : volumes) {
final StoragePool storagePool = _poolDao.findById(volume.getPoolId());
final Long volClusterId = storagePool.getClusterId();
for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext(); ) {
final Host host = iterator.next();
if (volClusterId != null) {
if (!host.getClusterId().equals(volClusterId) || usesLocal) {
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
requiresStorageMotion.put(host, true);
} else {
iterator.remove();
}
}
} else {
if (storagePool.isManaged()) {
if (srcHost.getClusterId() != host.getClusterId()) {
requiresStorageMotion.put(host, true);
}
}
}
}
}
plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
} else {
final Long cluster = srcHost.getClusterId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm);
}
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, keyword, null, null, null, null);
// Filter out the current host.
allHosts = allHostsPair.first();
allHosts.remove(srcHost);
plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
}
final Pair<List<? extends Host>, Integer> otherHosts = new Pair<List<? extends Host>, Integer>(allHosts, new Integer(allHosts.size()));
List<Host> suitableHosts = new ArrayList<Host>();
final ExcludeList excludes = new ExcludeList();
excludes.addHost(srcHostId);
// call affinitygroup chain
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
if (vmGroupCount > 0) {
for (final AffinityGroupProcessor processor : _affinityProcessors) {
processor.process(vmProfile, plan, excludes);
}
}
for (final HostAllocator allocator : hostAllocators) {
if (canMigrateWithStorage) {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
} else {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
}
if (suitableHosts != null && !suitableHosts.isEmpty()) {
break;
}
}
if (s_logger.isDebugEnabled()) {
if (suitableHosts.isEmpty()) {
s_logger.debug("No suitable hosts found");
} else {
s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts);
}
}
return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>(otherHosts, suitableHosts, requiresStorageMotion);
}
use of org.apache.cloudstack.affinity.AffinityGroupProcessor in project cloudstack by apache.
the class DeploymentPlanningManagerImpl method finalizeReservation.
@DB
@Override
public String finalizeReservation(final DeployDestination plannedDestination, final VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, final DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
final VirtualMachine vm = vmProfile.getVirtualMachine();
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
return Transaction.execute(new TransactionCallback<String>() {
@Override
public String doInTransaction(TransactionStatus status) {
boolean saveReservation = true;
if (vmGroupCount > 0) {
List<Long> groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId());
SearchCriteria<AffinityGroupVO> criteria = _affinityGroupDao.createSearchCriteria();
criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()]));
_affinityGroupDao.lockRows(criteria, null, true);
for (AffinityGroupProcessor processor : _affinityProcessors) {
if (!processor.check(vmProfile, plannedDestination)) {
saveReservation = false;
break;
}
}
}
if (saveReservation) {
VMReservationVO vmReservation = new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(), plannedDestination.getPod().getId(), plannedDestination.getCluster().getId(), plannedDestination.getHost().getId());
if (planner != null) {
vmReservation.setDeploymentPlanner(planner.getName());
}
Map<Long, Long> volumeReservationMap = new HashMap<Long, Long>();
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
for (Volume vo : plannedDestination.getStorageForDisks().keySet()) {
volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId());
}
vmReservation.setVolumeReservation(volumeReservationMap);
}
_reservationDao.persist(vmReservation);
return vmReservation.getUuid();
}
return null;
}
});
}
use of org.apache.cloudstack.affinity.AffinityGroupProcessor in project cloudstack by apache.
the class DeploymentPlanningManagerImpl method planDeployment.
@Override
public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
ServiceOffering offering = vmProfile.getServiceOffering();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
VirtualMachine vm = vmProfile.getVirtualMachine();
DataCenter dc = _dcDao.findById(vm.getDataCenterId());
if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) {
checkForNonDedicatedResources(vmProfile, dc, avoids);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
}
String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
if (plan.getHostId() != null && haVmTag == null) {
Long hostIdSpecified = plan.getHostId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified);
}
HostVO host = _hostDao.findById(hostIdSpecified);
if (host == null) {
s_logger.debug("The specified host cannot be found");
} else if (avoids.shouldAvoid(host)) {
s_logger.debug("The specified host is in avoid set");
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
}
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
// search for storage under the zone, pod, cluster of the host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// choose the potential pool for this VM for this host
if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
if (potentialResources != null) {
pod = _podDao.findById(host.getPodId());
cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// we don't have to prepare this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
}
}
s_logger.debug("Cannot deploy to specified host, returning.");
return null;
}
// call affinitygroup chain
long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
if (vmGroupCount > 0) {
for (AffinityGroupProcessor processor : _affinityProcessors) {
processor.process(vmProfile, plan, avoids);
}
}
if (vm.getType() == VirtualMachine.Type.User) {
checkForNonDedicatedResources(vmProfile, dc, avoids);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
// check if datacenter is in avoid set
if (avoids.shouldAvoid(dc)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.");
}
return null;
}
if (planner == null) {
String plannerName = offering.getDeploymentPlanner();
if (plannerName == null) {
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
plannerName = "BareMetalPlanner";
} else {
plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
}
}
planner = getDeploymentPlannerByName(plannerName);
}
if (vm.getLastHostId() != null && haVmTag == null) {
s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());
HostVO host = _hostDao.findById(vm.getLastHostId());
ServiceOfferingDetailsVO offeringDetails = null;
if (host == null) {
s_logger.debug("The last host of this VM cannot be found");
} else if (avoids.shouldAvoid(host)) {
s_logger.debug("The last host of this VM is in avoid set");
} else if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) {
s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + plan.getClusterId());
} else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
s_logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
} else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
if (!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) {
s_logger.debug("The last host of this VM does not have required GPU devices available");
}
} else {
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
boolean hostTagsMatch = true;
if (offering.getHostTag() != null) {
_hostDao.loadHostTags(host);
if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
hostTagsMatch = false;
}
}
if (hostTagsMatch) {
long cluster_id = host.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
boolean hostHasCpuCapability, hostHasCapacity = false;
hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
if (hostHasCpuCapability) {
// first check from reserved capacity
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true);
// if not reserved, check the free capacity
if (!hostHasCapacity)
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true);
}
if (hostHasCapacity && hostHasCpuCapability) {
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
// search for storage under the zone, pod, cluster
// of
// the last host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// host
if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
if (potentialResources != null) {
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
}
} else {
s_logger.debug("The last host of this VM does not have enough capacity");
}
} else {
s_logger.debug("Service Offering host tag does not match the last host of this VM");
}
} else {
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + host.getResourceState());
}
}
s_logger.debug("Cannot choose the last host to deploy this VM ");
}
DeployDestination dest = null;
List<Long> clusterList = null;
if (planner != null && planner.canHandle(vmProfile, plan, avoids)) {
while (true) {
if (planner instanceof DeploymentClusterPlanner) {
ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
if (clusterList != null && !clusterList.isEmpty()) {
// planner refactoring. call allocators to list hosts
ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
if (dest != null) {
return dest;
}
// reset the avoid input to the planners
resetAvoidSet(avoids, plannerAvoidOutput);
} else {
return null;
}
} else {
dest = planner.plan(vmProfile, plan, avoids);
if (dest != null) {
long hostId = dest.getHost().getId();
avoids.addHost(dest.getHost().getId());
if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
// found destination
return dest;
} else {
// deployment picked it up for dedicated access
continue;
}
} else {
return null;
}
}
}
}
return dest;
}
Aggregations