use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class ManagementServerImpl method listStoragePoolsForMigrationOfVolumeInternal.
public Pair<List<? extends StoragePool>, List<? extends StoragePool>> listStoragePoolsForMigrationOfVolumeInternal(final Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean keepSourceStoragePool) {
final Account caller = getCaller();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the volume");
}
throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume");
}
final VolumeVO volume = _volumeDao.findById(volumeId);
if (volume == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with" + " specified id.");
ex.addProxyObject(volumeId.toString(), "volumeId");
throw ex;
}
Long diskOfferingId = volume.getDiskOfferingId();
if (newDiskOfferingId != null) {
diskOfferingId = newDiskOfferingId;
}
// Volume must be attached to an instance for live migration.
List<? extends StoragePool> allPools = new ArrayList<StoragePool>();
List<? extends StoragePool> suitablePools = new ArrayList<StoragePool>();
// Volume must be in Ready state to be migrated.
if (!Volume.State.Ready.equals(volume.getState())) {
s_logger.info("Volume " + volume + " must be in ready state for migration.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
final Long instanceId = volume.getInstanceId();
VMInstanceVO vm = null;
if (instanceId != null) {
vm = _vmInstanceDao.findById(instanceId);
}
if (vm == null) {
s_logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated.");
} else if (vm.getState() != State.Running) {
s_logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
} else {
s_logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
boolean storageMotionSupported = false;
// Check if the underlying hypervisor supports storage motion.
final Long hostId = vm.getHostId();
if (hostId != null) {
final HostVO host = _hostDao.findById(hostId);
HypervisorCapabilitiesVO capabilities = null;
if (host != null) {
capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion());
} else {
s_logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved.");
}
if (capabilities != null) {
storageMotionSupported = capabilities.isStorageMotionSupported();
} else {
s_logger.error("Capabilities for host " + host + " couldn't be retrieved.");
}
}
if (!storageMotionSupported) {
s_logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
}
StoragePool srcVolumePool = _poolDao.findById(volume.getPoolId());
HypervisorType hypervisorType = getHypervisorType(vm, srcVolumePool);
Pair<Host, List<Cluster>> hostClusterPair = getVolumeVmHostClusters(srcVolumePool, vm, hypervisorType);
Host vmHost = hostClusterPair.first();
List<Cluster> clusters = hostClusterPair.second();
allPools = getAllStoragePoolCompatibleWithVolumeSourceStoragePool(srcVolumePool, hypervisorType, clusters);
ExcludeList avoid = new ExcludeList();
if (!keepSourceStoragePool) {
allPools.remove(srcVolumePool);
avoid.addPool(srcVolumePool.getId());
}
if (vm != null) {
suitablePools = findAllSuitableStoragePoolsForVm(volume, diskOfferingId, newSize, newMinIops, newMaxIops, vm, vmHost, avoid, CollectionUtils.isNotEmpty(clusters) ? clusters.get(0) : null, hypervisorType);
} else {
suitablePools = findAllSuitableStoragePoolsForDetachedVolume(volume, diskOfferingId, allPools);
}
removeDataStoreClusterParents((List<StoragePool>) allPools);
removeDataStoreClusterParents((List<StoragePool>) suitablePools);
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class StorageAllocatorTest method testCLOUDSTACK3481.
@Test
public void testCLOUDSTACK3481() {
try {
createDb();
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setHypervisor(HypervisorType.KVM);
pool.setScope(ScopeType.ZONE);
pool.setClusterId(null);
pool.setPodId(null);
storagePoolDao.update(pool.getId(), pool);
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.KVM);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getAccountId()).thenReturn(1L);
Mockito.when(vmProfile.getHypervisorType()).thenReturn(HypervisorType.KVM);
Mockito.when(vmProfile.getOwner()).thenReturn(account);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(Matchers.anyListOf(Volume.class), Matchers.any(StoragePool.class))).thenReturn(true);
Mockito.when(storageMgr.storagePoolHasEnoughIops(Matchers.anyListOf(Volume.class), Matchers.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class StorageAllocatorTest method testClusterAllocatorWithWrongTag.
@Test
public void testClusterAllocatorWithWrongTag() {
try {
createDb();
StoragePoolDetailVO detailVO = new StoragePoolDetailVO(this.storagePoolId, "high", "true", true);
poolDetailsDao.persist(detailVO);
DiskOfferingVO diskOff = this.diskOfferingDao.findById(diskOffering.getId());
List<String> tags = new ArrayList<String>();
tags.add("low");
diskOff.setTagsArray(tags);
diskOfferingDao.update(diskOff.getId(), diskOff);
DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(Matchers.anyListOf(Volume.class), Matchers.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
foundAcct++;
}
}
if (foundAcct != 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class StorageAllocatorTest method testPoolStateIsNotUp.
@Test
public void testPoolStateIsNotUp() {
try {
createDb();
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setScope(ScopeType.ZONE);
pool.setStatus(StoragePoolStatus.Maintenance);
storagePoolDao.update(pool.getId(), pool);
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(Matchers.anyListOf(Volume.class), Matchers.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct == 1) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
use of com.cloud.deploy.DeploymentPlanner.ExcludeList in project cloudstack by apache.
the class ImplicitPlannerTest method checkStrictModeWithCurrentAccountVmsPresent.
@Test
public void checkStrictModeWithCurrentAccountVmsPresent() throws InsufficientServerCapacityException {
VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
DataCenterDeployment plan = mock(DataCenterDeployment.class);
ExcludeList avoids = new ExcludeList();
initializeForTest(vmProfile, plan);
initializeForImplicitPlannerTest(false);
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
// Validations.
// Check cluster 2 and 3 are not in the cluster list.
// Host 6 and 7 should also be in avoid list.
assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
boolean foundNeededCluster = false;
for (Long cluster : clusterList) {
if (cluster != 1) {
fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
} else {
foundNeededCluster = true;
}
}
assertTrue("Didn't find cluster 1 in the list. It should have been present", foundNeededCluster);
Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
assertFalse("Host 5 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(5L));
Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
hostsThatShouldBeInAvoidList.add(6L);
hostsThatShouldBeInAvoidList.add(7L);
assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list", hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
}
Aggregations