use of com.cloud.agent.manager.Commands in project cloudstack by apache.
the class NicPlugInOutRules method accept.
@Override
public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter router) throws ResourceUnavailableException {
_router = router;
Pair<Map<String, PublicIpAddress>, Map<String, PublicIpAddress>> nicsToChange = getNicsToChangeOnRouter(visitor);
Map<String, PublicIpAddress> nicsToPlug = nicsToChange.first();
Map<String, PublicIpAddress> nicsToUnplug = nicsToChange.second();
NetworkModel networkModel = visitor.getVirtualNetworkApplianceFactory().getNetworkModel();
VirtualMachineManager itMgr = visitor.getVirtualNetworkApplianceFactory().getItMgr();
// 1) Unplug the nics
for (Entry<String, PublicIpAddress> entry : nicsToUnplug.entrySet()) {
Network publicNtwk = null;
try {
publicNtwk = networkModel.getNetwork(entry.getValue().getNetworkId());
URI broadcastUri = BroadcastDomainType.Vlan.toUri(entry.getKey());
itMgr.removeVmFromNetwork(_router, publicNtwk, broadcastUri);
} catch (ConcurrentOperationException e) {
s_logger.warn("Failed to remove router " + _router + " from vlan " + entry.getKey() + " in public network " + publicNtwk + " due to ", e);
return false;
}
}
_netUsageCommands = new Commands(Command.OnError.Continue);
VpcDao vpcDao = visitor.getVirtualNetworkApplianceFactory().getVpcDao();
VpcVO vpc = vpcDao.findById(_router.getVpcId());
// 2) Plug the nics
for (String vlanTag : nicsToPlug.keySet()) {
PublicIpAddress ip = nicsToPlug.get(vlanTag);
// have to plug the nic(s)
NicProfile defaultNic = new NicProfile();
if (ip.isSourceNat()) {
defaultNic.setDefaultNic(true);
}
defaultNic.setIPv4Address(ip.getAddress().addr());
defaultNic.setIPv4Gateway(ip.getGateway());
defaultNic.setIPv4Netmask(ip.getNetmask());
defaultNic.setMacAddress(ip.getMacAddress());
defaultNic.setBroadcastType(BroadcastDomainType.Vlan);
defaultNic.setBroadcastUri(BroadcastDomainType.Vlan.toUri(ip.getVlanTag()));
defaultNic.setIsolationUri(IsolationType.Vlan.toUri(ip.getVlanTag()));
NicProfile publicNic = null;
Network publicNtwk = null;
try {
publicNtwk = networkModel.getNetwork(ip.getNetworkId());
publicNic = itMgr.addVmToNetwork(_router, publicNtwk, defaultNic);
} catch (ConcurrentOperationException e) {
s_logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk + " due to ", e);
} catch (InsufficientCapacityException e) {
s_logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk + " due to ", e);
} finally {
if (publicNic == null) {
s_logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk);
return false;
}
}
// Create network usage commands. Send commands to router after
// IPAssoc
NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(_router.getPrivateIpAddress(), _router.getInstanceName(), true, defaultNic.getIPv4Address(), vpc.getCidr());
_netUsageCommands.addCommand(netUsageCmd);
UserStatisticsDao userStatsDao = visitor.getVirtualNetworkApplianceFactory().getUserStatsDao();
UserStatisticsVO stats = userStatsDao.findBy(_router.getAccountId(), _router.getDataCenterId(), publicNtwk.getId(), publicNic.getIPv4Address(), _router.getId(), _router.getType().toString());
if (stats == null) {
stats = new UserStatisticsVO(_router.getAccountId(), _router.getDataCenterId(), publicNic.getIPv4Address(), _router.getId(), _router.getType().toString(), publicNtwk.getId());
userStatsDao.persist(stats);
}
}
// VpcIpAssociation is done.
return true;
}
use of com.cloud.agent.manager.Commands in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateStart.
@Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
final User caller = cctxt.getCallingUser();
VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vmGuru, vm, caller, account);
if (start == null) {
return;
}
vm = start.first();
final ReservationContext ctx = start.second();
ItWorkVO work = start.third();
VMInstanceVO startedVm = null;
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
}
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId());
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx);
}
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
boolean canRetry = true;
ExcludeList avoids = null;
try {
final Journal journal = start.second().getJournal();
if (planToDeploy != null) {
avoids = planToDeploy.getAvoids();
}
if (avoids == null) {
avoids = new ExcludeList();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
boolean planChangedByVolume = false;
boolean reuseVolume = true;
final DataCenterDeployment originalPlan = plan;
int retry = StartRetry.value();
while (retry-- != 0) {
if (reuseVolume) {
// edit plan if this vm's ROOT volume is in READY state already
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO vol : vols) {
// make sure if the templateId is unchanged. If it is changed,
// let planner
// reassign pool for the volume even if it ready.
final Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
}
continue;
}
final StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
}
final long rootVolDcId = pool.getDataCenterId();
final Long rootVolPodId = pool.getPodId();
final Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
// planner
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
}
throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified);
}
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
}
planChangedByVolume = true;
}
}
}
}
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
DeployDestination dest = null;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest == null) {
if (planChangedByVolume) {
plan = originalPlan;
planChangedByVolume = false;
//do not enter volume reuse for next retry, since we want to look for resources outside the volume's cluster
reuseVolume = false;
continue;
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
if (dest != null) {
avoids.addHost(dest.getHost().getId());
journal.record("Deployment found ", vmProfile, dest);
}
long destHostId = dest.getHost().getId();
vm.setPodIdToDeployIn(dest.getPod().getId());
final Long cluster_id = dest.getCluster().getId();
final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
//storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
} else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
}
vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine " + vm.getUuid() + " oldstate: " + vm.getState() + "Event :" + Event.OperationRetry);
}
} catch (final NoTransitionException e1) {
throw new ConcurrentOperationException(e1.getMessage());
}
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
}
_networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx);
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
volumeMgr.prepare(vmProfile, dest);
}
//since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
if (!reuseVolume) {
reuseVolume = true;
}
Commands cmds = null;
vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
handlePath(vmTO.getDisks(), vm.getHypervisorType());
cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));
vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);
work = _workDao.findById(work.getId());
if (work == null || work.getStep() != Step.Prepare) {
throw new ConcurrentOperationException("Work steps have been changed: " + work);
}
_workDao.updateStep(work, Step.Starting);
_agentMgr.send(destHostId, cmds);
_workDao.updateStep(work, Step.Started);
startAnswer = cmds.getAnswer(StartAnswer.class);
if (startAnswer != null && startAnswer.getResult()) {
handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
final String host_guid = startAnswer.getHost_guid();
if (host_guid != null) {
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
if (finalHost == null) {
throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: " + startAnswer);
}
destHostId = finalHost.getId();
}
if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
s_logger.error("Unable to transition to a new state. VM uuid: " + vm.getUuid() + "VM oldstate:" + vm.getState() + "Event:" + Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM" + vm.getUuid());
}
// Update GPU device capacity
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
if (gpuDevice != null) {
_resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
}
// information isn't set,
if (_uservmDetailsDao.findDetail(vm.getId(), "deployvm") != null) {
_uservmDetailsDao.removeDetail(vm.getId(), "deployvm");
}
startedVm = vm;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Start completed for VM " + vm);
}
return;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.info("The guru did not like the answers so stopping " + vm);
}
StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
stopCmd.setControlIp(getControlNicIpForVM(vm));
final StopCommand cmd = stopCmd;
final Answer answer = _agentMgr.easySend(destHostId, cmd);
if (answer != null && answer instanceof StopAnswer) {
final StopAnswer stopAns = (StopAnswer) answer;
if (vm.getType() == VirtualMachine.Type.User) {
final String platform = stopAns.getPlatform();
if (platform != null) {
final Map<String, String> vmmetadata = new HashMap<String, String>();
vmmetadata.put(vm.getInstanceName(), platform);
syncVMMetaData(vmmetadata);
}
}
}
if (answer == null || !answer.getResult()) {
s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
_haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
}
throw new ExecutionException("Unable to start VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
} catch (OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost() + " failed to start VM: " + vm.getUuid());
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.info("Unable to contact resource.", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
throw e;
}
}
} catch (final InsufficientCapacityException e) {
s_logger.info("Insufficient capacity ", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
}
}
} catch (final ExecutionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} catch (final NoTransitionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} finally {
if (startedVm == null && canRetry) {
final Step prevStep = work.getStep();
_workDao.updateStep(work, Step.Release);
// If previous step was started/ing && we got a valid answer
if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) {
//TODO check the response of cleanup and record it in DB for retry
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false);
} else {
//if step is not starting/started, send cleanup command with force=true
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true);
}
}
}
}
} finally {
if (startedVm == null) {
if (canRetry) {
try {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
} catch (final NoTransitionException e) {
throw new ConcurrentOperationException(e.getMessage());
}
}
}
if (planToDeploy != null) {
planToDeploy.setAvoids(avoids);
}
}
if (startedVm == null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
use of com.cloud.agent.manager.Commands in project cloudstack by apache.
the class VirtualMachineManagerImpl method plugNic.
public boolean plugNic(final Network network, final NicTO nic, final VirtualMachineTO vm, final ReservationContext context, final DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException {
boolean result = true;
final VMInstanceVO router = _vmDao.findById(vm.getId());
if (router.getState() == State.Running) {
try {
final PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType(), vm.getDetails());
final Commands cmds = new Commands(Command.OnError.Stop);
cmds.addCommand("plugnic", plugNicCmd);
_agentMgr.send(dest.getHost().getId(), cmds);
final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class);
if (!(plugNicAnswer != null && plugNicAnswer.getResult())) {
s_logger.warn("Unable to plug nic for vm " + vm.getName());
result = false;
}
} catch (final OperationTimedoutException e) {
throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e);
}
} else {
s_logger.warn("Unable to apply PlugNic, vm " + router + " is not in the right state " + router.getState());
throw new ResourceUnavailableException("Unable to apply PlugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, router.getDataCenterId());
}
return result;
}
use of com.cloud.agent.manager.Commands in project cloudstack by apache.
the class VirtualMachineManagerImpl method advanceExpunge.
protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException {
if (vm == null || vm.getRemoved() != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find vm or vm is destroyed: " + vm);
}
return;
}
advanceStop(vm.getUuid(), false);
vm = _vmDao.findByUuid(vm.getUuid());
try {
if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) {
s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
throw new CloudRuntimeException("Unable to destroy " + vm);
}
} catch (final NoTransitionException e) {
s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
throw new CloudRuntimeException("Unable to destroy " + vm, e);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Destroying vm " + vm);
}
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
s_logger.debug("Cleaning up NICS");
final List<Command> nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics());
_networkMgr.cleanupNics(profile);
s_logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage");
final List<Command> volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm);
final Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
if (volumeExpungeCommands != null && volumeExpungeCommands.size() > 0 && hostId != null) {
final Commands cmds = new Commands(Command.OnError.Stop);
for (final Command volumeExpungeCommand : volumeExpungeCommands) {
cmds.addCommand(volumeExpungeCommand);
}
_agentMgr.send(hostId, cmds);
if (!cmds.isSuccessful()) {
for (final Answer answer : cmds.getAnswers()) {
if (!answer.getResult()) {
s_logger.warn("Failed to expunge vm due to: " + answer.getDetails());
throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails());
}
}
}
}
if (hostId != null) {
volumeMgr.revokeAccess(vm.getId(), hostId);
}
// Clean up volumes based on the vm's instance id
volumeMgr.cleanupVolumes(vm.getId());
final VirtualMachineGuru guru = getVmGuru(vm);
guru.finalizeExpunge(vm);
//remove the overcommit detials from the uservm details
_uservmDetailsDao.removeDetails(vm.getId());
// send hypervisor-dependent commands before removing
final List<Command> finalizeExpungeCommands = hvGuru.finalizeExpunge(vm);
if (finalizeExpungeCommands != null && finalizeExpungeCommands.size() > 0) {
if (hostId != null) {
final Commands cmds = new Commands(Command.OnError.Stop);
for (final Command command : finalizeExpungeCommands) {
cmds.addCommand(command);
}
if (nicExpungeCommands != null) {
for (final Command command : nicExpungeCommands) {
cmds.addCommand(command);
}
}
_agentMgr.send(hostId, cmds);
if (!cmds.isSuccessful()) {
for (final Answer answer : cmds.getAnswers()) {
if (!answer.getResult()) {
s_logger.warn("Failed to expunge vm due to: " + answer.getDetails());
throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails());
}
}
}
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Expunged " + vm);
}
}
use of com.cloud.agent.manager.Commands in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateMigrateWithStorage.
private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException {
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final HostVO srcHost = _hostDao.findById(srcHostId);
final HostVO destHost = _hostDao.findById(destHostId);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final DataCenterVO dc = _dcDao.findById(destHost.getDataCenterId());
final HostPodVO pod = _podDao.findById(destHost.getPodId());
final Cluster cluster = _clusterDao.findById(destHost.getClusterId());
final DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost);
// Create a map of which volume should go in which storage pool.
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final Map<Volume, StoragePool> volumeToPoolMap = getPoolListForVolumesForMigration(profile, destHost, volumeToPool);
// a vm and not migrating a vm with storage.
if (volumeToPoolMap == null || volumeToPoolMap.isEmpty()) {
throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destHost + " doesn't involve migrating the volumes.");
}
AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM_MIGRATE;
if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE;
} else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY_MIGRATE;
}
_networkMgr.prepareNicForMigration(profile, destination);
volumeMgr.prepareForMigration(profile, destination);
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
final VirtualMachineTO to = hvGuru.implement(profile);
ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId());
work.setStep(Step.Prepare);
work.setResourceType(ItWorkVO.ResourceType.Host);
work.setResourceId(destHostId);
work = _workDao.persist(work);
// Put the vm in migrating state.
vm.setLastHostId(srcHostId);
vm.setPodIdToDeployIn(destHost.getPodId());
moveVmToMigratingState(vm, destHostId, work);
boolean migrated = false;
try {
// config drive: Detach the config drive at source host
// After migration successful attach the config drive in destination host
// On migration failure VM will be stopped, So configIso will be deleted
Nic defaultNic = _networkModel.getDefaultNic(vm.getId());
List<String[]> vmData = null;
if (defaultNic != null) {
UserVmVO userVm = _userVmDao.findById(vm.getId());
Map<String, String> details = _vmDetailsDao.listDetailsKeyPairs(vm.getId());
vm.setDetails(details);
Network network = _networkModel.getNetwork(defaultNic.getNetworkId());
if (_networkModel.isSharedNetworkWithoutServices(network.getId())) {
final String serviceOffering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId()).getDisplayText();
final String zoneName = _dcDao.findById(vm.getDataCenterId()).getName();
boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
vmData = _networkModel.generateVmData(userVm.getUserData(), serviceOffering, zoneName, vm.getInstanceName(), vm.getId(), (String) profile.getParameter(VirtualMachineProfile.Param.VmSshPubKey), (String) profile.getParameter(VirtualMachineProfile.Param.VmPassword), isWindows);
String vmName = vm.getInstanceName();
String configDriveIsoRootFolder = "/tmp";
String isoFile = configDriveIsoRootFolder + "/" + vmName + "/configDrive/" + vmName + ".iso";
profile.setVmData(vmData);
profile.setConfigDriveLabel(VmConfigDriveLabel.value());
profile.setConfigDriveIsoRootFolder(configDriveIsoRootFolder);
profile.setConfigDriveIsoFile(isoFile);
// At source host detach the config drive iso.
AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false);
try {
_agentMgr.send(srcHost.getId(), dettachCommand);
s_logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost);
} catch (OperationTimedoutException e) {
s_logger.debug("TIme out occured while exeuting command AttachOrDettachConfigDrive " + e.getMessage());
}
}
}
// Migrate the vm and its volume.
volumeMgr.migrateVolumes(vm, to, srcHost, destHost, volumeToPoolMap);
// Put the vm back to running state.
moveVmOutofMigratingStateOnSuccess(vm, destHost.getId(), work);
try {
if (!checkVmOnHost(vm, destHostId)) {
s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm);
try {
_agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
} catch (final AgentUnavailableException e) {
s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId);
}
cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
throw new CloudRuntimeException("VM not found on desintation host. Unable to complete migration for " + vm);
}
} catch (final OperationTimedoutException e) {
s_logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e);
}
migrated = true;
} finally {
if (!migrated) {
s_logger.info("Migration was unsuccessful. Cleaning up: " + vm);
_alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(), "Migrate Command failed. Please check logs.");
try {
_agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null);
vm.setPodIdToDeployIn(srcHost.getPodId());
stateTransitTo(vm, Event.OperationFailed, srcHostId);
} catch (final AgentUnavailableException e) {
s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e);
} catch (final NoTransitionException e) {
s_logger.error("Error while transitioning vm from migrating to running state.", e);
}
}
work.setStep(Step.Done);
_workDao.update(work.getId(), work);
}
}
Aggregations