use of com.cloud.affinity.AffinityGroupProcessor in project cosmic by MissionCriticalCloud.
the class DeploymentPlanningManagerImpl method finalizeReservation.
@DB
@Override
public String finalizeReservation(final DeployDestination plannedDestination, final VirtualMachineProfile vmProfile, final DeploymentPlan plan, final ExcludeList avoids, final DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
final VirtualMachine vm = vmProfile.getVirtualMachine();
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
return Transaction.execute(new TransactionCallback<String>() {
@Override
public String doInTransaction(final TransactionStatus status) {
boolean saveReservation = true;
if (vmGroupCount > 0) {
final List<Long> groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId());
final SearchCriteria<AffinityGroupVO> criteria = _affinityGroupDao.createSearchCriteria();
criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()]));
_affinityGroupDao.lockRows(criteria, null, true);
for (final AffinityGroupProcessor processor : _affinityProcessors) {
if (!processor.check(vmProfile, plannedDestination)) {
saveReservation = false;
break;
}
}
}
if (saveReservation) {
final VMReservationVO vmReservation = new VMReservationVO(vm.getId(), plannedDestination.getZone().getId(), plannedDestination.getPod().getId(), plannedDestination.getCluster().getId(), plannedDestination.getHost().getId());
if (planner != null) {
vmReservation.setDeploymentPlanner(planner.getName());
}
final Map<Long, Long> volumeReservationMap = new HashMap<>();
for (final Volume vo : plannedDestination.getStorageForDisks().keySet()) {
volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId());
}
vmReservation.setVolumeReservation(volumeReservationMap);
_reservationDao.persist(vmReservation);
return vmReservation.getUuid();
}
return null;
}
});
}
use of com.cloud.affinity.AffinityGroupProcessor in project cosmic by MissionCriticalCloud.
the class ManagementServerImpl method listHostsForMigrationOfVM.
@Override
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final Long vmId, final Long startIndex, final Long pageSize, final String keyword) {
final Account caller = getCaller();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the VM");
}
throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!");
}
final VMInstanceVO vm = _vmInstanceDao.findById(vmId);
if (vm == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with given id");
throw ex;
}
if (vm.getState() != State.Running) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is not running, cannot migrate the vm" + vm);
}
final InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported");
// Return empty list.
return new Ternary<>(new Pair<>(new ArrayList<HostVO>(), new Integer(0)), new ArrayList<>(), new HashMap<>());
}
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.KVM)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vm + " is not XenServer/KVM, cannot migrate this VM.");
}
throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " + "XenServer/KVM only");
}
final long srcHostId = vm.getHostId();
final Host srcHost = _hostDao.findById(srcHostId);
if (srcHost == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find the host with id: " + srcHostId + " of this VM:" + vm);
}
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the host (with specified id) of VM with specified id");
ex.addProxyObject(String.valueOf(srcHostId), "hostId");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
// Check if the vm can be migrated with storage.
boolean canMigrateWithStorage = false;
if (vm.getType() == VirtualMachine.Type.User) {
final HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
if (capabilities != null) {
canMigrateWithStorage = capabilities.isStorageMotionSupported();
}
}
// Check if the vm is using any disks on local storage.
final VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null);
final List<VolumeVO> volumes = _volumeDao.findCreatedByInstance(vmProfile.getId());
boolean usesLocal = false;
for (final VolumeVO volume : volumes) {
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, vmProfile.getHypervisorType());
if (diskProfile.useLocalStorage()) {
usesLocal = true;
break;
}
}
if (!canMigrateWithStorage && usesLocal) {
throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate");
}
final Type hostType = srcHost.getType();
final Pair<List<HostVO>, Integer> allHostsPair;
final List<HostVO> allHosts;
final Map<Host, Boolean> requiresStorageMotion = new HashMap<>();
final DataCenterDeployment plan;
if (canMigrateWithStorage) {
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, null, null, srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
allHosts = allHostsPair.first();
allHosts.remove(srcHost);
for (final VolumeVO volume : volumes) {
final Long volClusterId = _poolDao.findById(volume.getPoolId()).getClusterId();
// only check for volume which are not in zone wide primary store, as only those may require storage motion
if (volClusterId != null) {
for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext(); ) {
final Host host = iterator.next();
if (!host.getClusterId().equals(volClusterId) || usesLocal) {
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
requiresStorageMotion.put(host, true);
} else {
iterator.remove();
}
}
}
}
}
plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
} else {
final Long cluster = srcHost.getClusterId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm);
}
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, keyword, null, null, null, null);
// Filter out the current host.
allHosts = allHostsPair.first();
allHosts.remove(srcHost);
plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
}
final Pair<List<? extends Host>, Integer> otherHosts = new Pair<>(allHosts, new Integer(allHosts.size()));
List<Host> suitableHosts = new ArrayList<>();
final ExcludeList excludes = new ExcludeList();
excludes.addHost(srcHostId);
// call affinitygroup chain
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
if (vmGroupCount > 0) {
for (final AffinityGroupProcessor processor : _affinityProcessors) {
processor.process(vmProfile, plan, excludes);
}
}
final Account account = vmProfile.getOwner();
for (final HostVO host : allHosts) {
final DedicatedResourceVO dedicatedResourceVO = dedicatedResourceDao.findByHostId(host.getId());
if (dedicatedResourceVO != null && dedicatedResourceVO.getDomainId() != account.getDomainId()) {
final Domain domain = _domainDao.findById(dedicatedResourceVO.getDomainId());
if (domain != null) {
s_logger.debug("Host " + host.getName() + " is dedicated to domain " + domain.getName() + " so not suitable for migration for VM " + vmProfile.getInstanceName());
}
excludes.addHost(host.getId());
}
}
for (final HostAllocator allocator : hostAllocators) {
if (canMigrateWithStorage) {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
} else {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
}
if (suitableHosts != null && !suitableHosts.isEmpty()) {
break;
}
}
if (s_logger.isDebugEnabled()) {
if (suitableHosts.isEmpty()) {
s_logger.debug("No suitable hosts found");
} else {
s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts);
}
}
return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion);
}
use of com.cloud.affinity.AffinityGroupProcessor in project cosmic by MissionCriticalCloud.
the class DeploymentPlanningManagerImpl method planDeployment.
@Override
public DeployDestination planDeployment(final VirtualMachineProfile vmProfile, final DeploymentPlan plan, final ExcludeList avoids, DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
// call affinitygroup chain
final VirtualMachine vm = vmProfile.getVirtualMachine();
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
final Zone zone = zoneRepository.findOne(vm.getDataCenterId());
if (vmGroupCount > 0) {
for (final AffinityGroupProcessor processor : _affinityProcessors) {
processor.process(vmProfile, plan, avoids);
}
}
if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) {
checkForNonDedicatedResources(vmProfile, zone, avoids);
}
s_logger.debug("Deployment will {}", avoids.toString());
// check if datacenter is in avoid set
if (avoids.shouldAvoid(zone)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("DataCenter id = '" + zone.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.");
}
return null;
}
final ServiceOffering offering = vmProfile.getServiceOffering();
if (planner == null) {
String plannerName = offering.getDeploymentPlanner();
if (plannerName == null) {
plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
}
planner = getDeploymentPlannerByName(plannerName);
}
final int cpu_requested = offering.getCpu();
final long ram_requested = offering.getRamSize() * 1024L * 1024L;
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
s_logger.debug("Trying to allocate a host and storage pools from zone:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
}
final String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
if (plan.getHostId() != null && haVmTag == null) {
final Long hostIdSpecified = plan.getHostId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified);
}
final HostVO host = _hostDao.findById(hostIdSpecified);
if (host == null) {
s_logger.debug("The specified host cannot be found");
} else if (avoids.shouldAvoid(host)) {
s_logger.debug("The specified host is in avoid set");
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
}
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
// search for storage under the zone, pod, cluster of the host.
final DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
final Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
final Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
final List<Volume> readyAndReusedVolumes = result.second();
// choose the potential pool for this VM for this host
if (!suitableVolumeStoragePools.isEmpty()) {
final List<Host> suitableHosts = new ArrayList<>();
suitableHosts.add(host);
final Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
if (potentialResources != null) {
pod = _podDao.findById(host.getPodId());
cluster = _clusterDao.findById(host.getClusterId());
final Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// we don't have to prepare this volume.
for (final Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
final DeployDestination dest = new DeployDestination(zone, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
}
}
s_logger.debug("Cannot deploy to specified host, returning.");
return null;
}
DeployDestination dest = null;
List<Long> clusterList = null;
if (planner != null && planner.canHandle(vmProfile, plan, avoids)) {
while (true) {
if (planner instanceof DeploymentClusterPlanner) {
final ExcludeList plannerAvoidInput = new ExcludeList(avoids.getZonesToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
if (clusterList != null && !clusterList.isEmpty()) {
// planner refactoring. call allocators to list hosts
final ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getZonesToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, zone, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
if (dest != null) {
return dest;
}
// reset the avoid input to the planners
resetAvoidSet(avoids, plannerAvoidOutput);
} else {
return null;
}
} else {
dest = planner.plan(vmProfile, plan, avoids);
if (dest != null) {
final long hostId = dest.getHost().getId();
avoids.addHost(dest.getHost().getId());
if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
// found destination
return dest;
} else {
// deployment picked it up for dedicated access
continue;
}
} else {
return null;
}
}
}
}
return dest;
}
Aggregations