use of com.cloud.vm.VirtualMachineProfileImpl in project cloudstack by apache.
the class ImplicitPlannerTest method checkPreferredModeNoHostsAvailable.
@Test
public void checkPreferredModeNoHostsAvailable() throws InsufficientServerCapacityException {
@SuppressWarnings("unchecked") VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
DataCenterDeployment plan = mock(DataCenterDeployment.class);
ExcludeList avoids = new ExcludeList();
initializeForTest(vmProfile, plan);
initializeForImplicitPlannerTest(false);
// Mark the host 5, 6 and 7 to be in avoid list.
avoids.addHost(5L);
avoids.addHost(6L);
avoids.addHost(7L);
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
// Validations.
// Check cluster list is empty.
assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
}
use of com.cloud.vm.VirtualMachineProfileImpl in project cloudstack by apache.
the class ImplicitPlannerTest method checkPreferredModePreferredHostAvailable.
@Test
public void checkPreferredModePreferredHostAvailable() throws InsufficientServerCapacityException {
@SuppressWarnings("unchecked") VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class);
DataCenterDeployment plan = mock(DataCenterDeployment.class);
ExcludeList avoids = new ExcludeList();
initializeForTest(vmProfile, plan);
initializeForImplicitPlannerTest(true);
// Mark the host 5 and 6 to be in avoid list.
avoids.addHost(5L);
avoids.addHost(6L);
List<Long> clusterList = planner.orderClusters(vmProfile, plan, avoids);
// Validations.
// Check cluster 1 and 2 are not in the cluster list.
// Host 5 and 6 should also be in avoid list.
assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty()));
boolean foundNeededCluster = false;
for (Long cluster : clusterList) {
if (cluster != 3) {
fail("Found a cluster that shouldn't have been present, cluster id : " + cluster);
} else {
foundNeededCluster = true;
}
}
assertTrue("Didn't find cluster 3 in the list. It should have been present", foundNeededCluster);
Set<Long> hostsInAvoidList = avoids.getHostsToAvoid();
assertFalse("Host 7 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(7L));
Set<Long> hostsThatShouldBeInAvoidList = new HashSet<Long>();
hostsThatShouldBeInAvoidList.add(5L);
hostsThatShouldBeInAvoidList.add(6L);
assertTrue("Hosts 5 and 6 that should have been present were not found in avoid list", hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList));
}
use of com.cloud.vm.VirtualMachineProfileImpl in project cloudstack by apache.
the class VMEntityManagerImpl method reserveVirtualMachine.
@Override
public String reserveVirtualMachine(VMEntityVO vmEntityVO, DeploymentPlanner plannerToUse, DeploymentPlan planToDeploy, ExcludeList exclude) throws InsufficientCapacityException, ResourceUnavailableException {
//call planner and get the deployDestination.
//load vm instance and offerings and call virtualMachineManagerImpl
//FIXME: profile should work on VirtualMachineEntity
VMInstanceVO vm = _vmDao.findByUuid(vmEntityVO.getUuid());
VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm);
vmProfile.setServiceOffering(_serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId()));
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId());
}
boolean planChangedByReadyVolume = false;
List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
if (!vols.isEmpty()) {
VolumeVO vol = vols.get(0);
StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
long rootVolDcId = pool.getDataCenterId();
Long rootVolPodId = pool.getPodId();
Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
// planner
throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified);
}
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, null);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, null);
planChangedByReadyVolume = true;
}
}
}
while (true) {
DeployDestination dest = null;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, exclude, plannerToUse);
} catch (AffinityConflictException e) {
throw new CloudRuntimeException("Unable to create deployment, affinity rules associated to the VM conflict");
}
if (dest != null) {
String reservationId = _dpMgr.finalizeReservation(dest, vmProfile, plan, exclude, plannerToUse);
if (reservationId != null) {
return reservationId;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot finalize the VM reservation for this destination found, retrying");
}
exclude.addHost(dest.getHost().getId());
continue;
}
} else if (planChangedByReadyVolume) {
// call retry it.
return UUID.randomUUID().toString();
} else {
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
}
}
use of com.cloud.vm.VirtualMachineProfileImpl in project cloudstack by apache.
the class VolumeOrchestrator method findStoragePool.
@Override
public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool> avoid) {
Long podId = null;
if (pod != null) {
podId = pod.getId();
} else if (clusterId != null) {
Cluster cluster = _entityMgr.findById(Cluster.class, clusterId);
if (cluster != null) {
podId = cluster.getPodId();
}
}
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
ExcludeList avoidList = new ExcludeList();
for (StoragePool pool : avoid) {
avoidList.addPool(pool.getId());
}
DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), podId, clusterId, hostId, null, null);
final List<StoragePool> poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1);
if (poolList != null && !poolList.isEmpty()) {
return (StoragePool) dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
}
}
return null;
}
use of com.cloud.vm.VirtualMachineProfileImpl in project cloudstack by apache.
the class ManagementServerImpl method listStoragePoolsForMigrationOfVolume.
@Override
public Pair<List<? extends StoragePool>, List<? extends StoragePool>> listStoragePoolsForMigrationOfVolume(final Long volumeId) {
final Account caller = getCaller();
if (!_accountMgr.isRootAdmin(caller.getId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Caller is not a root admin, permission denied to migrate the volume");
}
throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume");
}
final VolumeVO volume = _volumeDao.findById(volumeId);
if (volume == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with" + " specified id.");
ex.addProxyObject(volumeId.toString(), "volumeId");
throw ex;
}
// Volume must be attached to an instance for live migration.
final List<StoragePool> allPools = new ArrayList<StoragePool>();
final List<StoragePool> suitablePools = new ArrayList<StoragePool>();
// Volume must be in Ready state to be migrated.
if (!Volume.State.Ready.equals(volume.getState())) {
s_logger.info("Volume " + volume + " must be in ready state for migration.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
if (!_volumeMgr.volumeOnSharedStoragePool(volume)) {
s_logger.info("Volume " + volume + " is on local storage. It cannot be migrated to another pool.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
final Long instanceId = volume.getInstanceId();
VMInstanceVO vm = null;
if (instanceId != null) {
vm = _vmInstanceDao.findById(instanceId);
}
if (vm == null) {
s_logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated.");
} else if (vm.getState() != State.Running) {
s_logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
} else {
s_logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
boolean storageMotionSupported = false;
// Check if the underlying hypervisor supports storage motion.
final Long hostId = vm.getHostId();
if (hostId != null) {
final HostVO host = _hostDao.findById(hostId);
HypervisorCapabilitiesVO capabilities = null;
if (host != null) {
capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion());
} else {
s_logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved.");
}
if (capabilities != null) {
storageMotionSupported = capabilities.isStorageMotionSupported();
} else {
s_logger.error("Capabilities for host " + host + " couldn't be retrieved.");
}
}
if (!storageMotionSupported) {
s_logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion.");
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
}
// Source pool of the volume.
final StoragePoolVO srcVolumePool = _poolDao.findById(volume.getPoolId());
// Get all the pools available. Only shared pools are considered because only a volume on a shared pools
// can be live migrated while the virtual machine stays on the same host.
List<StoragePoolVO> storagePools = null;
if (srcVolumePool.getClusterId() == null) {
storagePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null);
} else {
storagePools = _poolDao.findPoolsByTags(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null);
}
storagePools.remove(srcVolumePool);
for (final StoragePoolVO pool : storagePools) {
if (pool.isShared()) {
allPools.add((StoragePool) dataStoreMgr.getPrimaryDataStore(pool.getId()));
}
}
// Get all the suitable pools.
// Exclude the current pool from the list of pools to which the volume can be migrated.
final ExcludeList avoid = new ExcludeList();
avoid.addPool(srcVolumePool.getId());
// Volume stays in the same cluster after migration.
final DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null);
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
// Call the storage pool allocator to find the list of storage pools.
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> pools = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
if (pools != null && !pools.isEmpty()) {
suitablePools.addAll(pools);
break;
}
}
return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
Aggregations