use of com.cloud.deploy.DataCenterDeployment in project cloudstack by apache.
the class RouterDeploymentDefinition method generateDeploymentPlan.
protected void generateDeploymentPlan() {
final long dcId = dest.getDataCenter().getId();
Long podId = null;
if (isBasic()) {
if (dest.getPod() == null) {
throw new CloudRuntimeException("Pod id is expected in deployment destination");
}
podId = dest.getPod().getId();
}
plan = new DataCenterDeployment(dcId, podId, null, null, null, null);
}
use of com.cloud.deploy.DataCenterDeployment in project cloudstack by apache.
the class UnmanagedVMsManagerImpl method migrateImportedVM.
private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate template, ServiceOfferingVO serviceOffering, UserVm userVm, final Account owner, List<Pair<DiskProfile, StoragePool>> diskProfileStoragePoolList) {
UserVm vm = userVm;
if (vm == null) {
LOGGER.error(String.format("Failed to check migrations need during VM import"));
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during VM import"));
}
if (sourceHost == null || serviceOffering == null || diskProfileStoragePoolList == null) {
LOGGER.error(String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName()));
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName()));
}
if (!hostSupportsServiceOffering(sourceHost, serviceOffering)) {
LOGGER.debug(String.format("VM %s needs to be migrated", vm.getUuid()));
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, template, serviceOffering, owner, null);
DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList();
excludeList.addHost(sourceHost.getId());
final DataCenterDeployment plan = new DataCenterDeployment(sourceHost.getDataCenterId(), sourceHost.getPodId(), sourceHost.getClusterId(), null, null, null);
DeployDestination dest = null;
try {
dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null);
} catch (Exception e) {
LOGGER.warn(String.format("VM import failed for unmanaged vm: %s during vm migration, finding deployment destination", vm.getInstanceName()), e);
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration, finding deployment destination", vm.getInstanceName()));
}
if (dest != null) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(" Found " + dest + " for migrating the vm to");
}
}
if (dest == null) {
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration, no deployment destination found", vm.getInstanceName()));
}
try {
if (vm.getState().equals(VirtualMachine.State.Stopped)) {
VMInstanceVO vmInstanceVO = vmDao.findById(userVm.getId());
vmInstanceVO.setHostId(dest.getHost().getId());
vmInstanceVO.setLastHostId(dest.getHost().getId());
vmDao.update(vmInstanceVO.getId(), vmInstanceVO);
} else {
virtualMachineManager.migrate(vm.getUuid(), sourceHost.getId(), dest);
}
vm = userVmManager.getUserVm(vm.getId());
} catch (Exception e) {
LOGGER.error(String.format("VM import failed for unmanaged vm: %s during vm migration", vm.getInstanceName()), e);
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during vm migration. %s", userVm.getInstanceName(), e.getMessage()));
}
}
for (Pair<DiskProfile, StoragePool> diskProfileStoragePool : diskProfileStoragePoolList) {
if (diskProfileStoragePool == null || diskProfileStoragePool.first() == null || diskProfileStoragePool.second() == null) {
continue;
}
DiskProfile profile = diskProfileStoragePool.first();
DiskOffering dOffering = diskOfferingDao.findById(profile.getDiskOfferingId());
if (dOffering == null) {
continue;
}
VolumeVO volumeVO = volumeDao.findById(profile.getVolumeId());
if (volumeVO == null) {
continue;
}
boolean poolSupportsOfferings = storagePoolSupportsDiskOffering(diskProfileStoragePool.second(), dOffering);
if (poolSupportsOfferings) {
continue;
}
LOGGER.debug(String.format("Volume %s needs to be migrated", volumeVO.getUuid()));
Pair<List<? extends StoragePool>, List<? extends StoragePool>> poolsPair = managementService.listStoragePoolsForMigrationOfVolumeInternal(profile.getVolumeId(), null, null, null, null, false);
if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) {
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume ID: %s migration as no suitable pool(s) found", userVm.getInstanceName(), volumeVO.getUuid()));
}
List<? extends StoragePool> storagePools = poolsPair.second();
StoragePool storagePool = null;
if (CollectionUtils.isNotEmpty(storagePools)) {
for (StoragePool pool : storagePools) {
if (diskProfileStoragePool.second().getId() != pool.getId() && storagePoolSupportsDiskOffering(pool, dOffering)) {
storagePool = pool;
break;
}
}
}
// For zone-wide pools, at times, suitable storage pools are not returned therefore consider all pools.
if (storagePool == null && CollectionUtils.isNotEmpty(poolsPair.first())) {
storagePools = poolsPair.first();
for (StoragePool pool : storagePools) {
if (diskProfileStoragePool.second().getId() != pool.getId() && storagePoolSupportsDiskOffering(pool, dOffering)) {
storagePool = pool;
break;
}
}
}
if (storagePool == null) {
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume ID: %s migration as no suitable pool found", userVm.getInstanceName(), volumeVO.getUuid()));
} else {
LOGGER.debug(String.format("Found storage pool %s(%s) for migrating the volume %s to", storagePool.getName(), storagePool.getUuid(), volumeVO.getUuid()));
}
try {
Volume volume = null;
if (vm.getState().equals(VirtualMachine.State.Running)) {
volume = volumeManager.liveMigrateVolume(volumeVO, storagePool);
} else {
volume = volumeManager.migrateVolume(volumeVO, storagePool);
}
if (volume == null) {
String msg = "";
if (vm.getState().equals(VirtualMachine.State.Running)) {
msg = String.format("Live migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid());
} else {
msg = String.format("Migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid());
}
LOGGER.error(msg);
throw new CloudRuntimeException(msg);
}
} catch (Exception e) {
LOGGER.error(String.format("VM import failed for unmanaged vm: %s during volume migration", vm.getInstanceName()), e);
cleanupFailedImportVM(vm);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume migration. %s", userVm.getInstanceName(), StringUtils.defaultString(e.getMessage())));
}
}
return userVm;
}
use of com.cloud.deploy.DataCenterDeployment in project cloudstack by apache.
the class UserVmManagerImpl method chooseVmMigrationDestination.
private DeployDestination chooseVmMigrationDestination(VMInstanceVO vm, Host srcHost) {
// Last host does not have higher priority in vm migration
vm.setLastHostId(null);
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, offering, null, null);
final Long srcHostId = srcHost.getId();
final Host host = _hostDao.findById(srcHostId);
ExcludeList excludes = new ExcludeList();
excludes.addHost(srcHostId);
final DataCenterDeployment plan = _itMgr.getMigrationDeployment(vm, host, null, excludes);
try {
return _planningMgr.planDeployment(profile, plan, excludes, null);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
} catch (final InsufficientServerCapacityException e3) {
throw new CloudRuntimeException("Unable to find a server to migrate the vm to");
}
}
use of com.cloud.deploy.DataCenterDeployment in project cosmic by MissionCriticalCloud.
the class VirtualMachineManagerImpl method orchestrateStart.
@Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
final User caller = cctxt.getCallingUser();
VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vm, caller, account);
if (start == null) {
return;
}
vm = start.first();
final ReservationContext ctx = start.second();
ItWorkVO work = start.third();
VMInstanceVO startedVm = null;
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId());
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx);
}
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
final VirtualMachineGuru vmGuru = getVmGuru(vm);
boolean canRetry = true;
ExcludeList avoids = null;
try {
final Journal journal = start.second().getJournal();
if (planToDeploy != null) {
avoids = planToDeploy.getAvoids();
}
if (avoids == null) {
avoids = new ExcludeList();
}
s_logger.debug("VM start orchestration will {}", avoids.toString());
boolean planChangedByVolume = false;
boolean reuseVolume = true;
final DataCenterDeployment originalPlan = plan;
int retry = StartRetry.value();
while (retry-- != 0) {
if (reuseVolume) {
// edit plan if this vm's ROOT volume is in READY state already
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO vol : vols) {
// make sure if the templateId is unchanged. If it is changed,
// let planner
// reassign pool for the volume even if it ready.
final Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId != template.getId()) {
s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
continue;
}
final StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
final long rootVolDcId = pool.getDataCenterId();
final Long rootVolPodId = pool.getPodId();
final Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
checkIfPlanIsDeployable(vm, rootVolClusterId, clusterIdSpecified);
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
planChangedByVolume = true;
}
}
}
}
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
DeployDestination dest;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest == null) {
if (planChangedByVolume) {
plan = originalPlan;
planChangedByVolume = false;
// do not enter volume reuse for next retry, since we want to look for resources outside the volume's cluster
reuseVolume = false;
continue;
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
if (dest != null) {
avoids.addHost(dest.getHost().getId());
journal.record("Deployment found ", vmProfile, dest);
}
long destHostId = dest.getHost().getId();
vm.setPodIdToDeployIn(dest.getPod().getId());
final Long cluster_id = dest.getCluster().getId();
final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
// storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
} else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
}
vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine " + vm.getUuid() + " oldstate: " + vm.getState() + "Event :" + Event.OperationRetry);
}
} catch (final NoTransitionException e1) {
throw new ConcurrentOperationException(e1.getMessage());
}
try {
s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
_networkMgr.prepare(vmProfile, dest, ctx);
volumeMgr.prepare(vmProfile, dest);
// since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
if (!reuseVolume) {
reuseVolume = true;
}
vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
handlePath(vmTO.getDisks(), vm.getHypervisorType());
final Commands cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));
vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);
work = _workDao.findById(work.getId());
if (work == null || work.getStep() != Step.Prepare) {
throw new ConcurrentOperationException("Work steps have been changed: " + work);
}
_workDao.updateStep(work, Step.Starting);
_agentMgr.send(destHostId, cmds);
_workDao.updateStep(work, Step.Started);
startAnswer = cmds.getAnswer(StartAnswer.class);
if (startAnswer != null && startAnswer.getResult()) {
handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
final String host_guid = startAnswer.getHost_guid();
if (host_guid != null) {
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
if (finalHost == null) {
throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: " + startAnswer);
}
destHostId = finalHost.getId();
}
if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
s_logger.error("Unable to transition to a new state. VM uuid: " + vm.getUuid() + "VM oldstate:" + vm.getState() + "Event:" + Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM" + vm.getUuid());
}
// Update GPU device capacity
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
if (gpuDevice != null) {
_resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
}
startedVm = vm;
s_logger.debug("Start completed for VM " + vm);
return;
} else {
s_logger.info("The guru did not like the answers so stopping " + vm);
final StopCommand cmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
final Answer answer = _agentMgr.easySend(destHostId, cmd);
if (answer != null && answer instanceof StopAnswer) {
final StopAnswer stopAns = (StopAnswer) answer;
if (vm.getType() == VirtualMachine.Type.User) {
final String platform = stopAns.getPlatform();
if (platform != null) {
final Map<String, String> vmmetadata = new HashMap<>();
vmmetadata.put(vm.getInstanceName(), platform);
syncVMMetaData(vmmetadata);
}
}
}
if (answer == null || !answer.getResult()) {
s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
_haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
}
throw new ExecutionException("Unable to start VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
} catch (final OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost() + " failed to start VM: " + vm.getUuid());
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.info("Unable to contact resource.", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
throw e;
}
}
} catch (final InsufficientCapacityException e) {
s_logger.info("Insufficient capacity ", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
}
}
} catch (final ExecutionException | NoTransitionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} finally {
if (startedVm == null && canRetry) {
final Step prevStep = work.getStep();
_workDao.updateStep(work, Step.Release);
// If previous step was started/ing && we got a valid answer
if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) {
// TODO check the response of cleanup
// and record it in DB for retry
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false);
} else {
// if step is not starting/started, send cleanup command with force=true
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true);
}
}
}
}
} finally {
if (startedVm == null) {
if (canRetry) {
try {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
} catch (final NoTransitionException e) {
throw new ConcurrentOperationException(e.getMessage());
}
}
}
if (planToDeploy != null) {
planToDeploy.setAvoids(avoids);
}
}
if (startedVm == null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
use of com.cloud.deploy.DataCenterDeployment in project cosmic by MissionCriticalCloud.
the class VirtualMachineManagerImpl method getPoolListForVolumesForMigration.
private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
for (final VolumeVO volume : allVolumes) {
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
if (pool != null) {
// created is compliant with the pool type.
if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null || pool.isLocal() != diskOffering.getUseLocalStorage()) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool + " while migrating vm to host " + host + ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + "the given pool.");
} else if (pool.getId() == currentPool.getId()) {
// If the pool to migrate too is the same as current pool, the volume doesn't need to be migrated.
} else {
volumeToPoolObjectMap.put(volume, pool);
}
} else {
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
final ExcludeList avoid = new ExcludeList();
boolean currentPoolAvailable = false;
final List<StoragePool> poolList = new ArrayList<>();
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
poolList.addAll(poolListFromAllocator);
}
}
if (poolList != null && !poolList.isEmpty()) {
// Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
// volume to a pool only if it is required; that is the current pool on which the volume resides
// is not available on the destination host.
final Iterator<StoragePool> iter = poolList.iterator();
while (iter.hasNext()) {
if (currentPool.getId() == iter.next().getId()) {
currentPoolAvailable = true;
break;
}
}
if (!currentPoolAvailable) {
volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
}
}
if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
// Cannot find a pool for the volume. Throw an exception.
throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + profile.getVirtualMachine() + " to host " + host);
}
}
}
return volumeToPoolObjectMap;
}
Aggregations