use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class FirstFitPlannerTest method checkClusterReorderingBasedOnImplicitHostTags.
@Test
public void checkClusterReorderingBasedOnImplicitHostTags() throws InsufficientServerCapacityException {
VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
DataCenterDeployment plan = mock(DataCenterDeployment.class);
ExcludeList avoids = mock(ExcludeList.class);
initializeForTest(vmProfile, plan, avoids);
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
List<Long> reorderedClusterList = new ArrayList<Long>();
reorderedClusterList.add(4L);
reorderedClusterList.add(3L);
reorderedClusterList.add(1L);
reorderedClusterList.add(5L);
reorderedClusterList.add(6L);
reorderedClusterList.add(2L);
assertTrue("Reordered cluster list is not honoring the implict host tags", (clusterList.equals(reorderedClusterList)));
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class ImplicitPlannerTest method checkStrictModeWithCurrentAccountVmsPresent.
@Test
public void checkStrictModeWithCurrentAccountVmsPresent() throws InsufficientServerCapacityException {
VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
DataCenterDeployment plan = mock(DataCenterDeployment.class);
ExcludeList avoids = new ExcludeList();
initializeForTest(vmProfile, plan);
initializeForImplicitPlannerTest(false);
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
// Validations.
// Check cluster 2 and 3 are not in the cluster list.
// Host 6 and 7 should also be in avoid list.
assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
boolean foundNeededCluster = false;
for (Long cluster : clusterList) {
if (cluster != 1) {
fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
} else {
foundNeededCluster = true;
}
}
assertTrue("Didn't find cluster 1 in the list. It should have been present", foundNeededCluster);
Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
assertFalse("Host 5 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(5L));
Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
hostsThatShouldBeInAvoidList.add(6L);
hostsThatShouldBeInAvoidList.add(7L);
assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list", hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class ManagementServerImpl method listStoragePoolsForMigrationOfVolume.
@Override
public Pair<List<? extends StoragePool>, List<? extends StoragePool>> listStoragePoolsForMigrationOfVolume(final Long volumeId) {
final Account caller = getCaller();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the volume");
}
throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume");
}
final VolumeVO volume = _volumeDao.findById(volumeId);
if (volume == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with" + " specified id.");
ex.addProxyObject(volumeId.toString(), "volumeId");
throw ex;
}
// Volume must be attached to an instance for live migration.
final List<StoragePool> allPools = new ArrayList<StoragePool>();
final List<StoragePool> suitablePools = new ArrayList<StoragePool>();
// Volume must be in Ready state to be migrated.
if (!Volume.State.Ready.equals(volume.getState())) {
s_logger.info("Volume " + volume + " must be in ready state for migration.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
if (!_volumeMgr.volumeOnSharedStoragePool(volume)) {
s_logger.info("Volume " + volume + " is on local storage. It cannot be migrated to another pool.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
final Long instanceId = volume.getInstanceId();
VMInstanceVO vm = null;
if (instanceId != null) {
vm = _vmInstanceDao.findById(instanceId);
}
if (vm == null) {
s_logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated.");
} else if (vm.getState() != State.Running) {
s_logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
} else {
s_logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
boolean storageMotionSupported = false;
// Check if the underlying hypervisor supports storage motion.
final Long hostId = vm.getHostId();
if (hostId != null) {
final HostVO host = _hostDao.findById(hostId);
HypervisorCapabilitiesVO capabilities = null;
if (host != null) {
capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion());
} else {
s_logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved.");
}
if (capabilities != null) {
storageMotionSupported = capabilities.isStorageMotionSupported();
} else {
s_logger.error("Capabilities for host " + host + " couldn't be retrieved.");
}
}
if (!storageMotionSupported) {
s_logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
}
// Source pool of the volume.
final StoragePoolVO srcVolumePool = _poolDao.findById(volume.getPoolId());
// Get all the pools available. Only shared pools are considered because only a volume on a shared pools
// can be live migrated while the virtual machine stays on the same host.
List<StoragePoolVO> storagePools = null;
if (srcVolumePool.getClusterId() == null) {
storagePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null);
} else {
storagePools = _poolDao.findPoolsByTags(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null);
}
storagePools.remove(srcVolumePool);
for (final StoragePoolVO pool : storagePools) {
if (pool.isShared()) {
allPools.add((StoragePool) dataStoreMgr.getPrimaryDataStore(pool.getId()));
}
}
// Get all the suitable pools.
// Exclude the current pool from the list of pools to which the volume can be migrated.
final ExcludeList avoid = new ExcludeList();
avoid.addPool(srcVolumePool.getId());
// Volume stays in the same cluster after migration.
final DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null);
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
// Call the storage pool allocator to find the list of storage pools.
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> pools = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
if (pools != null && !pools.isEmpty()) {
suitablePools.addAll(pools);
break;
}
}
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class ManagementServerImpl method listHostsForMigrationOfVM.
@Override
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final Long vmId, final Long startIndex, final Long pageSize, final String keyword) {
final Account caller = getCaller();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the VM");
}
throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!");
}
final VMInstanceVO vm = _vmInstanceDao.findById(vmId);
if (vm == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with given id");
throw ex;
}
if (vm.getState() != State.Running) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is not running, cannot migrate the vm" + vm);
}
final InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported");
// Return empty list.
return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>(new Pair<List<? extends Host>, Integer>(new ArrayList<HostVO>(), new Integer(0)), new ArrayList<Host>(), new HashMap<Host, Boolean>());
}
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) && !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator) && !vm.getHypervisorType().equals(HypervisorType.Ovm3)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM.");
}
throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " + "XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
}
if (vm.getType().equals(VirtualMachine.Type.User) && vm.getHypervisorType().equals(HypervisorType.LXC)) {
throw new InvalidParameterValueException("Unsupported Hypervisor Type for User VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
}
final long srcHostId = vm.getHostId();
final Host srcHost = _hostDao.findById(srcHostId);
if (srcHost == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find the host with id: " + srcHostId + " of this VM:" + vm);
}
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the host (with specified id) of VM with specified id");
ex.addProxyObject(String.valueOf(srcHostId), "hostId");
ex.addProxyObject(vm.getUuid(), "vmId");
throw ex;
}
// Check if the vm can be migrated with storage.
boolean canMigrateWithStorage = false;
if (vm.getType() == VirtualMachine.Type.User) {
final HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
if (capabilities != null) {
canMigrateWithStorage = capabilities.isStorageMotionSupported();
}
}
// Check if the vm is using any disks on local storage.
final VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null);
final List<VolumeVO> volumes = _volumeDao.findCreatedByInstance(vmProfile.getId());
boolean usesLocal = false;
for (final VolumeVO volume : volumes) {
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, vmProfile.getHypervisorType());
if (diskProfile.useLocalStorage()) {
usesLocal = true;
break;
}
}
if (!canMigrateWithStorage && usesLocal) {
throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate");
}
final Type hostType = srcHost.getType();
Pair<List<HostVO>, Integer> allHostsPair = null;
List<HostVO> allHosts = null;
final Map<Host, Boolean> requiresStorageMotion = new HashMap<Host, Boolean>();
DataCenterDeployment plan = null;
if (canMigrateWithStorage) {
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, null, null, srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
allHosts = allHostsPair.first();
allHosts.remove(srcHost);
for (final VolumeVO volume : volumes) {
final StoragePool storagePool = _poolDao.findById(volume.getPoolId());
final Long volClusterId = storagePool.getClusterId();
for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext(); ) {
final Host host = iterator.next();
if (volClusterId != null) {
if (!host.getClusterId().equals(volClusterId) || usesLocal) {
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
requiresStorageMotion.put(host, true);
} else {
iterator.remove();
}
}
} else {
if (storagePool.isManaged()) {
if (srcHost.getClusterId() != host.getClusterId()) {
requiresStorageMotion.put(host, true);
}
}
}
}
}
plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
} else {
final Long cluster = srcHost.getClusterId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm);
}
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, keyword, null, null, null, null);
// Filter out the current host.
allHosts = allHostsPair.first();
allHosts.remove(srcHost);
plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
}
final Pair<List<? extends Host>, Integer> otherHosts = new Pair<List<? extends Host>, Integer>(allHosts, new Integer(allHosts.size()));
List<Host> suitableHosts = new ArrayList<Host>();
final ExcludeList excludes = new ExcludeList();
excludes.addHost(srcHostId);
// call affinitygroup chain
final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
if (vmGroupCount > 0) {
for (final AffinityGroupProcessor processor : _affinityProcessors) {
processor.process(vmProfile, plan, excludes);
}
}
for (final HostAllocator allocator : hostAllocators) {
if (canMigrateWithStorage) {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
} else {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
}
if (suitableHosts != null && !suitableHosts.isEmpty()) {
break;
}
}
if (s_logger.isDebugEnabled()) {
if (suitableHosts.isEmpty()) {
s_logger.debug("No suitable hosts found");
} else {
s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts);
}
}
return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>(otherHosts, suitableHosts, requiresStorageMotion);
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class VolumeOrchestrator method findStoragePool.
@Override
public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool> avoid) {
Long podId = null;
if (pod != null) {
podId = pod.getId();
} else if (clusterId != null) {
Cluster cluster = _entityMgr.findById(Cluster.class, clusterId);
if (cluster != null) {
podId = cluster.getPodId();
}
}
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
ExcludeList avoidList = new ExcludeList();
for (StoragePool pool : avoid) {
avoidList.addPool(pool.getId());
}
DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), podId, clusterId, hostId, null, null);
final List<StoragePool> poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1);
if (poolList != null && !poolList.isEmpty()) {
return (StoragePool) dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
}
}
return null;
}
Aggregations