use of com.cloud.hypervisor.HypervisorGuru in project cloudstack by apache.
the class VpcVirtualNetworkApplianceManagerImpl method finalizeCommandsOnStart.
@Override
public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachineProfile profile) {
final DomainRouterVO domainRouterVO = _routerDao.findById(profile.getId());
Map<String, String> details = new HashMap<String, String>();
if (profile.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(profile.getHypervisorType());
VirtualMachineTO vmTO = hvGuru.implement(profile);
if (vmTO.getDetails() != null) {
details = vmTO.getDetails();
}
}
final boolean isVpc = domainRouterVO.getVpcId() != null;
if (!isVpc) {
return super.finalizeCommandsOnStart(cmds, profile);
}
if (domainRouterVO.getState() == State.Starting || domainRouterVO.getState() == State.Running) {
// 1) FORM SSH CHECK COMMAND
final NicProfile controlNic = getControlNic(profile);
if (controlNic == null) {
s_logger.error("Control network doesn't exist for the router " + domainRouterVO);
return false;
}
finalizeSshAndVersionAndNetworkUsageOnStart(cmds, profile, domainRouterVO, controlNic);
// 2) FORM PLUG NIC COMMANDS
final List<Pair<Nic, Network>> guestNics = new ArrayList<Pair<Nic, Network>>();
final List<Pair<Nic, Network>> publicNics = new ArrayList<Pair<Nic, Network>>();
final Map<String, String> vlanMacAddress = new HashMap<String, String>();
final List<? extends Nic> routerNics = _nicDao.listByVmId(profile.getId());
for (final Nic routerNic : routerNics) {
final Network network = _networkModel.getNetwork(routerNic.getNetworkId());
if (network.getTrafficType() == TrafficType.Guest) {
final Pair<Nic, Network> guestNic = new Pair<Nic, Network>(routerNic, network);
guestNics.add(guestNic);
} else if (network.getTrafficType() == TrafficType.Public) {
final Pair<Nic, Network> publicNic = new Pair<Nic, Network>(routerNic, network);
publicNics.add(publicNic);
final String vlanTag = BroadcastDomainType.getValue(routerNic.getBroadcastUri());
vlanMacAddress.put(vlanTag, routerNic.getMacAddress());
}
}
final List<Command> usageCmds = new ArrayList<Command>();
// 3) PREPARE PLUG NIC COMMANDS
try {
// add VPC router to public networks
final List<PublicIp> sourceNat = new ArrayList<PublicIp>(1);
for (final Pair<Nic, Network> nicNtwk : publicNics) {
final Nic publicNic = nicNtwk.first();
final Network publicNtwk = nicNtwk.second();
final IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(publicNtwk.getId(), publicNic.getIPv4Address());
if (userIp.isSourceNat()) {
final PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId()));
sourceNat.add(publicIp);
if (domainRouterVO.getPublicIpAddress() == null) {
final DomainRouterVO routerVO = _routerDao.findById(domainRouterVO.getId());
routerVO.setPublicIpAddress(publicNic.getIPv4Address());
routerVO.setPublicNetmask(publicNic.getIPv4Netmask());
routerVO.setPublicMacAddress(publicNic.getMacAddress());
_routerDao.update(routerVO.getId(), routerVO);
}
}
final PlugNicCommand plugNicCmd = new PlugNicCommand(_nwHelper.getNicTO(domainRouterVO, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), domainRouterVO.getInstanceName(), domainRouterVO.getType(), details);
cmds.addCommand(plugNicCmd);
final VpcVO vpc = _vpcDao.findById(domainRouterVO.getVpcId());
final NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(domainRouterVO.getPrivateIpAddress(), domainRouterVO.getInstanceName(), true, publicNic.getIPv4Address(), vpc.getCidr());
usageCmds.add(netUsageCmd);
UserStatisticsVO stats = _userStatsDao.findBy(domainRouterVO.getAccountId(), domainRouterVO.getDataCenterId(), publicNtwk.getId(), publicNic.getIPv4Address(), domainRouterVO.getId(), domainRouterVO.getType().toString());
if (stats == null) {
stats = new UserStatisticsVO(domainRouterVO.getAccountId(), domainRouterVO.getDataCenterId(), publicNic.getIPv4Address(), domainRouterVO.getId(), domainRouterVO.getType().toString(), publicNtwk.getId());
_userStatsDao.persist(stats);
}
}
// create ip assoc for source nat
if (!sourceNat.isEmpty()) {
_commandSetupHelper.createVpcAssociatePublicIPCommands(domainRouterVO, sourceNat, cmds, vlanMacAddress);
}
// add VPC router to guest networks
for (final Pair<Nic, Network> nicNtwk : guestNics) {
final Nic guestNic = nicNtwk.first();
// plug guest nic
final PlugNicCommand plugNicCmd = new PlugNicCommand(_nwHelper.getNicTO(domainRouterVO, guestNic.getNetworkId(), null), domainRouterVO.getInstanceName(), domainRouterVO.getType(), details);
cmds.addCommand(plugNicCmd);
if (!_networkModel.isPrivateGateway(guestNic.getNetworkId())) {
// set guest network
final VirtualMachine vm = _vmDao.findById(domainRouterVO.getId());
final NicProfile nicProfile = _networkModel.getNicProfile(vm, guestNic.getNetworkId(), null);
final SetupGuestNetworkCommand setupCmd = _commandSetupHelper.createSetupGuestNetworkCommand(domainRouterVO, true, nicProfile);
cmds.addCommand(setupCmd);
} else {
// set private network
final PrivateIpVO ipVO = _privateIpDao.findByIpAndSourceNetworkId(guestNic.getNetworkId(), guestNic.getIPv4Address());
final Network network = _networkDao.findById(guestNic.getNetworkId());
BroadcastDomainType.getValue(network.getBroadcastUri());
final String netmask = NetUtils.getCidrNetmask(network.getCidr());
final PrivateIpAddress ip = new PrivateIpAddress(ipVO, network.getBroadcastUri().toString(), network.getGateway(), netmask, guestNic.getMacAddress());
final List<PrivateIpAddress> privateIps = new ArrayList<PrivateIpAddress>(1);
privateIps.add(ip);
_commandSetupHelper.createVpcAssociatePrivateIPCommands(domainRouterVO, privateIps, cmds, true);
final Long privateGwAclId = _vpcGatewayDao.getNetworkAclIdForPrivateIp(ipVO.getVpcId(), ipVO.getNetworkId(), ipVO.getIpAddress());
if (privateGwAclId != null) {
// set network acl on private gateway
final List<NetworkACLItemVO> networkACLs = _networkACLItemDao.listByACL(privateGwAclId);
s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for private gateway ip = " + ipVO.getIpAddress());
_commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, ipVO.getNetworkId(), true);
}
}
}
} catch (final Exception ex) {
s_logger.warn("Failed to add router " + domainRouterVO + " to network due to exception ", ex);
return false;
}
// 4) RE-APPLY ALL STATIC ROUTE RULES
final List<? extends StaticRoute> routes = _staticRouteDao.listByVpcId(domainRouterVO.getVpcId());
final List<StaticRouteProfile> staticRouteProfiles = new ArrayList<StaticRouteProfile>(routes.size());
final Map<Long, VpcGateway> gatewayMap = new HashMap<Long, VpcGateway>();
for (final StaticRoute route : routes) {
VpcGateway gateway = gatewayMap.get(route.getVpcGatewayId());
if (gateway == null) {
gateway = _entityMgr.findById(VpcGateway.class, route.getVpcGatewayId());
gatewayMap.put(gateway.getId(), gateway);
}
staticRouteProfiles.add(new StaticRouteProfile(route, gateway));
}
s_logger.debug("Found " + staticRouteProfiles.size() + " static routes to apply as a part of vpc route " + domainRouterVO + " start");
if (!staticRouteProfiles.isEmpty()) {
_commandSetupHelper.createStaticRouteCommands(staticRouteProfiles, domainRouterVO, cmds);
}
// 5) RE-APPLY ALL REMOTE ACCESS VPNs
final RemoteAccessVpnVO vpn = _vpnDao.findByAccountAndVpc(domainRouterVO.getAccountId(), domainRouterVO.getVpcId());
if (vpn != null) {
_commandSetupHelper.createApplyVpnCommands(true, vpn, domainRouterVO, cmds);
}
// 6) REPROGRAM GUEST NETWORK
boolean reprogramGuestNtwks = true;
if (profile.getParameter(Param.ReProgramGuestNetworks) != null && (Boolean) profile.getParameter(Param.ReProgramGuestNetworks) == false) {
reprogramGuestNtwks = false;
}
final VirtualRouterProvider vrProvider = _vrProviderDao.findById(domainRouterVO.getElementId());
if (vrProvider == null) {
throw new CloudRuntimeException("Cannot find related virtual router provider of router: " + domainRouterVO.getHostName());
}
final Provider provider = Network.Provider.getProvider(vrProvider.getType().toString());
if (provider == null) {
throw new CloudRuntimeException("Cannot find related provider of virtual router provider: " + vrProvider.getType().toString());
}
for (final Pair<Nic, Network> nicNtwk : guestNics) {
final Nic guestNic = nicNtwk.first();
final AggregationControlCommand startCmd = new AggregationControlCommand(Action.Start, domainRouterVO.getInstanceName(), controlNic.getIPv4Address(), _routerControlHelper.getRouterIpInNetwork(guestNic.getNetworkId(), domainRouterVO.getId()));
cmds.addCommand(startCmd);
if (reprogramGuestNtwks) {
finalizeIpAssocForNetwork(cmds, domainRouterVO, provider, guestNic.getNetworkId(), vlanMacAddress);
finalizeNetworkRulesForNetwork(cmds, domainRouterVO, provider, guestNic.getNetworkId());
}
finalizeUserDataAndDhcpOnStart(cmds, domainRouterVO, provider, guestNic.getNetworkId());
final AggregationControlCommand finishCmd = new AggregationControlCommand(Action.Finish, domainRouterVO.getInstanceName(), controlNic.getIPv4Address(), _routerControlHelper.getRouterIpInNetwork(guestNic.getNetworkId(), domainRouterVO.getId()));
cmds.addCommand(finishCmd);
}
// Add network usage commands
cmds.addCommands(usageCmds);
}
return true;
}
use of com.cloud.hypervisor.HypervisorGuru in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateStart.
@Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
final User caller = cctxt.getCallingUser();
VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vmGuru, vm, caller, account);
if (start == null) {
return;
}
vm = start.first();
final ReservationContext ctx = start.second();
ItWorkVO work = start.third();
VMInstanceVO startedVm = null;
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
}
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId());
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx);
}
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
boolean canRetry = true;
ExcludeList avoids = null;
try {
final Journal journal = start.second().getJournal();
if (planToDeploy != null) {
avoids = planToDeploy.getAvoids();
}
if (avoids == null) {
avoids = new ExcludeList();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
boolean planChangedByVolume = false;
boolean reuseVolume = true;
final DataCenterDeployment originalPlan = plan;
int retry = StartRetry.value();
while (retry-- != 0) {
if (reuseVolume) {
// edit plan if this vm's ROOT volume is in READY state already
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO vol : vols) {
// make sure if the templateId is unchanged. If it is changed,
// let planner
// reassign pool for the volume even if it ready.
final Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
}
continue;
}
final StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
}
final long rootVolDcId = pool.getDataCenterId();
final Long rootVolPodId = pool.getPodId();
final Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
// planner
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
}
throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified);
}
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
}
planChangedByVolume = true;
}
}
}
}
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
DeployDestination dest = null;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest == null) {
if (planChangedByVolume) {
plan = originalPlan;
planChangedByVolume = false;
//do not enter volume reuse for next retry, since we want to look for resources outside the volume's cluster
reuseVolume = false;
continue;
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
if (dest != null) {
avoids.addHost(dest.getHost().getId());
journal.record("Deployment found ", vmProfile, dest);
}
long destHostId = dest.getHost().getId();
vm.setPodIdToDeployIn(dest.getPod().getId());
final Long cluster_id = dest.getCluster().getId();
final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
//storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
} else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
}
vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine " + vm.getUuid() + " oldstate: " + vm.getState() + "Event :" + Event.OperationRetry);
}
} catch (final NoTransitionException e1) {
throw new ConcurrentOperationException(e1.getMessage());
}
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
}
_networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx);
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
volumeMgr.prepare(vmProfile, dest);
}
//since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
if (!reuseVolume) {
reuseVolume = true;
}
Commands cmds = null;
vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
handlePath(vmTO.getDisks(), vm.getHypervisorType());
cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));
vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);
work = _workDao.findById(work.getId());
if (work == null || work.getStep() != Step.Prepare) {
throw new ConcurrentOperationException("Work steps have been changed: " + work);
}
_workDao.updateStep(work, Step.Starting);
_agentMgr.send(destHostId, cmds);
_workDao.updateStep(work, Step.Started);
startAnswer = cmds.getAnswer(StartAnswer.class);
if (startAnswer != null && startAnswer.getResult()) {
handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
final String host_guid = startAnswer.getHost_guid();
if (host_guid != null) {
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
if (finalHost == null) {
throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: " + startAnswer);
}
destHostId = finalHost.getId();
}
if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
s_logger.error("Unable to transition to a new state. VM uuid: " + vm.getUuid() + "VM oldstate:" + vm.getState() + "Event:" + Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM" + vm.getUuid());
}
// Update GPU device capacity
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
if (gpuDevice != null) {
_resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
}
// information isn't set,
if (_uservmDetailsDao.findDetail(vm.getId(), "deployvm") != null) {
_uservmDetailsDao.removeDetail(vm.getId(), "deployvm");
}
startedVm = vm;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Start completed for VM " + vm);
}
return;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.info("The guru did not like the answers so stopping " + vm);
}
StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
stopCmd.setControlIp(getControlNicIpForVM(vm));
final StopCommand cmd = stopCmd;
final Answer answer = _agentMgr.easySend(destHostId, cmd);
if (answer != null && answer instanceof StopAnswer) {
final StopAnswer stopAns = (StopAnswer) answer;
if (vm.getType() == VirtualMachine.Type.User) {
final String platform = stopAns.getPlatform();
if (platform != null) {
final Map<String, String> vmmetadata = new HashMap<String, String>();
vmmetadata.put(vm.getInstanceName(), platform);
syncVMMetaData(vmmetadata);
}
}
}
if (answer == null || !answer.getResult()) {
s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
_haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
}
throw new ExecutionException("Unable to start VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
} catch (OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost() + " failed to start VM: " + vm.getUuid());
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.info("Unable to contact resource.", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
throw e;
}
}
} catch (final InsufficientCapacityException e) {
s_logger.info("Insufficient capacity ", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
}
}
} catch (final ExecutionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} catch (final NoTransitionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} finally {
if (startedVm == null && canRetry) {
final Step prevStep = work.getStep();
_workDao.updateStep(work, Step.Release);
// If previous step was started/ing && we got a valid answer
if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) {
//TODO check the response of cleanup and record it in DB for retry
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false);
} else {
//if step is not starting/started, send cleanup command with force=true
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true);
}
}
}
}
} finally {
if (startedVm == null) {
if (canRetry) {
try {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
} catch (final NoTransitionException e) {
throw new ConcurrentOperationException(e.getMessage());
}
}
}
if (planToDeploy != null) {
planToDeploy.setAvoids(avoids);
}
}
if (startedVm == null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
use of com.cloud.hypervisor.HypervisorGuru in project cloudstack by apache.
the class VirtualMachineManagerImpl method advanceExpunge.
protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException {
if (vm == null || vm.getRemoved() != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find vm or vm is destroyed: " + vm);
}
return;
}
advanceStop(vm.getUuid(), false);
vm = _vmDao.findByUuid(vm.getUuid());
try {
if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) {
s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
throw new CloudRuntimeException("Unable to destroy " + vm);
}
} catch (final NoTransitionException e) {
s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
throw new CloudRuntimeException("Unable to destroy " + vm, e);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Destroying vm " + vm);
}
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
s_logger.debug("Cleaning up NICS");
final List<Command> nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics());
_networkMgr.cleanupNics(profile);
s_logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage");
final List<Command> volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm);
final Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
if (volumeExpungeCommands != null && volumeExpungeCommands.size() > 0 && hostId != null) {
final Commands cmds = new Commands(Command.OnError.Stop);
for (final Command volumeExpungeCommand : volumeExpungeCommands) {
cmds.addCommand(volumeExpungeCommand);
}
_agentMgr.send(hostId, cmds);
if (!cmds.isSuccessful()) {
for (final Answer answer : cmds.getAnswers()) {
if (!answer.getResult()) {
s_logger.warn("Failed to expunge vm due to: " + answer.getDetails());
throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails());
}
}
}
}
if (hostId != null) {
volumeMgr.revokeAccess(vm.getId(), hostId);
}
// Clean up volumes based on the vm's instance id
volumeMgr.cleanupVolumes(vm.getId());
final VirtualMachineGuru guru = getVmGuru(vm);
guru.finalizeExpunge(vm);
//remove the overcommit detials from the uservm details
_uservmDetailsDao.removeDetails(vm.getId());
// send hypervisor-dependent commands before removing
final List<Command> finalizeExpungeCommands = hvGuru.finalizeExpunge(vm);
if (finalizeExpungeCommands != null && finalizeExpungeCommands.size() > 0) {
if (hostId != null) {
final Commands cmds = new Commands(Command.OnError.Stop);
for (final Command command : finalizeExpungeCommands) {
cmds.addCommand(command);
}
if (nicExpungeCommands != null) {
for (final Command command : nicExpungeCommands) {
cmds.addCommand(command);
}
}
_agentMgr.send(hostId, cmds);
if (!cmds.isSuccessful()) {
for (final Answer answer : cmds.getAnswers()) {
if (!answer.getResult()) {
s_logger.warn("Failed to expunge vm due to: " + answer.getDetails());
throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails());
}
}
}
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Expunged " + vm);
}
}
use of com.cloud.hypervisor.HypervisorGuru in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateRemoveNicFromVm.
private boolean orchestrateRemoveNicFromVm(final VirtualMachine vm, final Nic nic) throws ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctx = CallContext.current();
final VMInstanceVO vmVO = _vmDao.findById(vm.getId());
final NetworkVO network = _networkDao.findById(nic.getNetworkId());
final ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount());
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null);
final DataCenter dc = _entityMgr.findById(DataCenter.class, network.getDataCenterId());
final Host host = _hostDao.findById(vm.getHostId());
final DeployDestination dest = new DeployDestination(dc, null, null, host);
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vmProfile.getVirtualMachine().getHypervisorType());
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
final NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), _networkModel.getNetworkRate(network.getId(), vm.getId()), _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network));
//1) Unplug the nic
if (vm.getState() == State.Running) {
final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType());
s_logger.debug("Un-plugging nic " + nic + " for vm " + vm + " from network " + network);
final boolean result = unplugNic(network, nicTO, vmTO, context, dest);
if (result) {
s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network);
final long isDefault = nic.isDefaultNic() ? 1 : 0;
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay());
} else {
s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network);
return false;
}
} else if (vm.getState() != State.Stopped) {
s_logger.warn("Unable to remove vm " + vm + " from network " + network);
throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId());
}
//2) Release the nic
_networkMgr.releaseNic(vmProfile, nic);
s_logger.debug("Successfully released nic " + nic + "for vm " + vm);
//3) Remove the nic
_networkMgr.removeNic(vmProfile, nic);
_nicsDao.expunge(nic.getId());
return true;
}
use of com.cloud.hypervisor.HypervisorGuru in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateMigrateWithStorage.
private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException {
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final HostVO srcHost = _hostDao.findById(srcHostId);
final HostVO destHost = _hostDao.findById(destHostId);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final DataCenterVO dc = _dcDao.findById(destHost.getDataCenterId());
final HostPodVO pod = _podDao.findById(destHost.getPodId());
final Cluster cluster = _clusterDao.findById(destHost.getClusterId());
final DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost);
// Create a map of which volume should go in which storage pool.
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final Map<Volume, StoragePool> volumeToPoolMap = getPoolListForVolumesForMigration(profile, destHost, volumeToPool);
// a vm and not migrating a vm with storage.
if (volumeToPoolMap == null || volumeToPoolMap.isEmpty()) {
throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destHost + " doesn't involve migrating the volumes.");
}
AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM_MIGRATE;
if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE;
} else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
alertType = AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY_MIGRATE;
}
_networkMgr.prepareNicForMigration(profile, destination);
volumeMgr.prepareForMigration(profile, destination);
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
final VirtualMachineTO to = hvGuru.implement(profile);
ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId());
work.setStep(Step.Prepare);
work.setResourceType(ItWorkVO.ResourceType.Host);
work.setResourceId(destHostId);
work = _workDao.persist(work);
// Put the vm in migrating state.
vm.setLastHostId(srcHostId);
vm.setPodIdToDeployIn(destHost.getPodId());
moveVmToMigratingState(vm, destHostId, work);
boolean migrated = false;
try {
// config drive: Detach the config drive at source host
// After migration successful attach the config drive in destination host
// On migration failure VM will be stopped, So configIso will be deleted
Nic defaultNic = _networkModel.getDefaultNic(vm.getId());
List<String[]> vmData = null;
if (defaultNic != null) {
UserVmVO userVm = _userVmDao.findById(vm.getId());
Map<String, String> details = _vmDetailsDao.listDetailsKeyPairs(vm.getId());
vm.setDetails(details);
Network network = _networkModel.getNetwork(defaultNic.getNetworkId());
if (_networkModel.isSharedNetworkWithoutServices(network.getId())) {
final String serviceOffering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId()).getDisplayText();
final String zoneName = _dcDao.findById(vm.getDataCenterId()).getName();
boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
vmData = _networkModel.generateVmData(userVm.getUserData(), serviceOffering, zoneName, vm.getInstanceName(), vm.getId(), (String) profile.getParameter(VirtualMachineProfile.Param.VmSshPubKey), (String) profile.getParameter(VirtualMachineProfile.Param.VmPassword), isWindows);
String vmName = vm.getInstanceName();
String configDriveIsoRootFolder = "/tmp";
String isoFile = configDriveIsoRootFolder + "/" + vmName + "/configDrive/" + vmName + ".iso";
profile.setVmData(vmData);
profile.setConfigDriveLabel(VmConfigDriveLabel.value());
profile.setConfigDriveIsoRootFolder(configDriveIsoRootFolder);
profile.setConfigDriveIsoFile(isoFile);
// At source host detach the config drive iso.
AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false);
try {
_agentMgr.send(srcHost.getId(), dettachCommand);
s_logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost);
} catch (OperationTimedoutException e) {
s_logger.debug("TIme out occured while exeuting command AttachOrDettachConfigDrive " + e.getMessage());
}
}
}
// Migrate the vm and its volume.
volumeMgr.migrateVolumes(vm, to, srcHost, destHost, volumeToPoolMap);
// Put the vm back to running state.
moveVmOutofMigratingStateOnSuccess(vm, destHost.getId(), work);
try {
if (!checkVmOnHost(vm, destHostId)) {
s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm);
try {
_agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
} catch (final AgentUnavailableException e) {
s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId);
}
cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
throw new CloudRuntimeException("VM not found on desintation host. Unable to complete migration for " + vm);
}
} catch (final OperationTimedoutException e) {
s_logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e);
}
migrated = true;
} finally {
if (!migrated) {
s_logger.info("Migration was unsuccessful. Cleaning up: " + vm);
_alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(), "Migrate Command failed. Please check logs.");
try {
_agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null);
vm.setPodIdToDeployIn(srcHost.getPodId());
stateTransitTo(vm, Event.OperationFailed, srcHostId);
} catch (final AgentUnavailableException e) {
s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e);
} catch (final NoTransitionException e) {
s_logger.error("Error while transitioning vm from migrating to running state.", e);
}
}
work.setStep(Step.Done);
_workDao.update(work.getId(), work);
}
}
Aggregations