use of com.cloud.agent.api.StartCommand in project cloudstack by apache.
the class LibvirtComputingResourceTest method testStartCommandInternalError.
@Test
public void testStartCommandInternalError() {
final VirtualMachineTO vmSpec = Mockito.mock(VirtualMachineTO.class);
final com.cloud.host.Host host = Mockito.mock(com.cloud.host.Host.class);
final boolean executeInSequence = false;
final StartCommand command = new StartCommand(vmSpec, host, executeInSequence);
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class);
final Connect conn = Mockito.mock(Connect.class);
final LibvirtVMDef vmDef = Mockito.mock(LibvirtVMDef.class);
final NicTO nic = Mockito.mock(NicTO.class);
final NicTO[] nics = new NicTO[] { nic };
final String vmName = "Test";
when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr);
when(vmSpec.getNics()).thenReturn(nics);
when(vmSpec.getType()).thenReturn(VirtualMachine.Type.DomainRouter);
when(vmSpec.getName()).thenReturn(vmName);
when(libvirtComputingResource.createVMFromSpec(vmSpec)).thenReturn(vmDef);
when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
try {
when(libvirtUtilitiesHelper.getConnectionByType(vmDef.getHvsType())).thenReturn(conn);
doThrow(InternalErrorException.class).when(libvirtComputingResource).createVbd(conn, vmSpec, vmName, vmDef);
} catch (final LibvirtException e) {
fail(e.getMessage());
} catch (final InternalErrorException e) {
fail(e.getMessage());
} catch (final URISyntaxException e) {
fail(e.getMessage());
}
final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
assertNotNull(wrapper);
final Answer answer = wrapper.execute(command, libvirtComputingResource);
assertFalse(answer.getResult());
verify(libvirtComputingResource, times(1)).getStoragePoolMgr();
verify(libvirtComputingResource, times(1)).getLibvirtUtilitiesHelper();
try {
verify(libvirtUtilitiesHelper, times(1)).getConnectionByType(vmDef.getHvsType());
} catch (final LibvirtException e) {
fail(e.getMessage());
}
}
use of com.cloud.agent.api.StartCommand in project cloudstack by apache.
the class HypervDirectConnectResource method executeRequest.
// TODO: Is it valid to return NULL, or should we throw on error?
@Override
public final Answer executeRequest(final Command cmd) {
// Set HTTP POST destination URI
// Using java.net.URI, see
// http://docs.oracle.com/javase/1.5.0/docs/api/java/net/URI.html
URI agentUri = null;
final Class<? extends Command> clazz = cmd.getClass();
Answer answer = null;
try {
final String cmdName = cmd.getClass().getName();
agentUri = new URI("https", null, _agentIp, _port, "/api/HypervResource/" + cmdName, null, null);
} catch (final URISyntaxException e) {
// TODO add proper logging
final String errMsg = "Could not generate URI for Hyper-V agent";
s_logger.error(errMsg, e);
return null;
}
if (cmd instanceof NetworkElementCommand) {
return _vrResource.executeRequest((NetworkElementCommand) cmd);
}
if (clazz == CheckSshCommand.class) {
answer = execute((CheckSshCommand) cmd);
} else if (clazz == GetDomRVersionCmd.class) {
answer = execute((GetDomRVersionCmd) cmd);
} else if (cmd instanceof NetworkUsageCommand) {
answer = execute((NetworkUsageCommand) cmd);
} else if (clazz == IpAssocCommand.class) {
answer = execute((IpAssocCommand) cmd);
} else if (clazz == DnsMasqConfigCommand.class) {
return execute((DnsMasqConfigCommand) cmd);
} else if (clazz == CreateIpAliasCommand.class) {
return execute((CreateIpAliasCommand) cmd);
} else if (clazz == DhcpEntryCommand.class) {
answer = execute((DhcpEntryCommand) cmd);
} else if (clazz == VmDataCommand.class) {
answer = execute((VmDataCommand) cmd);
} else if (clazz == SavePasswordCommand.class) {
answer = execute((SavePasswordCommand) cmd);
} else if (clazz == SetFirewallRulesCommand.class) {
answer = execute((SetFirewallRulesCommand) cmd);
} else if (clazz == LoadBalancerConfigCommand.class) {
answer = execute((LoadBalancerConfigCommand) cmd);
} else if (clazz == DeleteIpAliasCommand.class) {
return execute((DeleteIpAliasCommand) cmd);
} else if (clazz == PingTestCommand.class) {
answer = execute((PingTestCommand) cmd);
} else if (clazz == SetStaticNatRulesCommand.class) {
answer = execute((SetStaticNatRulesCommand) cmd);
} else if (clazz == CheckRouterCommand.class) {
answer = execute((CheckRouterCommand) cmd);
} else if (clazz == SetPortForwardingRulesCommand.class) {
answer = execute((SetPortForwardingRulesCommand) cmd);
} else if (clazz == SetSourceNatCommand.class) {
answer = execute((SetSourceNatCommand) cmd);
} else if (clazz == Site2SiteVpnCfgCommand.class) {
answer = execute((Site2SiteVpnCfgCommand) cmd);
} else if (clazz == CheckS2SVpnConnectionsCommand.class) {
answer = execute((CheckS2SVpnConnectionsCommand) cmd);
} else if (clazz == RemoteAccessVpnCfgCommand.class) {
answer = execute((RemoteAccessVpnCfgCommand) cmd);
} else if (clazz == VpnUsersCfgCommand.class) {
answer = execute((VpnUsersCfgCommand) cmd);
} else if (clazz == SetStaticRouteCommand.class) {
answer = execute((SetStaticRouteCommand) cmd);
} else if (clazz == SetMonitorServiceCommand.class) {
answer = execute((SetMonitorServiceCommand) cmd);
} else if (clazz == PlugNicCommand.class) {
answer = execute((PlugNicCommand) cmd);
} else if (clazz == UnPlugNicCommand.class) {
answer = execute((UnPlugNicCommand) cmd);
} else if (clazz == CopyCommand.class) {
answer = execute((CopyCommand) cmd);
} else {
if (clazz == StartCommand.class) {
final VirtualMachineTO vmSpec = ((StartCommand) cmd).getVirtualMachine();
if (vmSpec.getType() != VirtualMachine.Type.User) {
if (s_hypervMgr != null) {
final String secondary = s_hypervMgr.prepareSecondaryStorageStore(Long.parseLong(_zoneId));
if (secondary != null) {
((StartCommand) cmd).setSecondaryStorage(secondary);
}
} else {
s_logger.error("Hyperv manager isn't available. Couldn't check and copy the systemvm iso.");
}
}
}
// Send the cmd to hyperv agent.
final String ansStr = postHttpRequest(s_gson.toJson(cmd), agentUri);
if (ansStr == null) {
return Answer.createUnsupportedCommandAnswer(cmd);
}
// Only Answer instances are returned by remote agents.
// E.g. see Response.getAnswers()
final Answer[] result = s_gson.fromJson(ansStr, Answer[].class);
final String logResult = cleanPassword(s_gson.toJson(result));
s_logger.debug("executeRequest received response " + logResult);
if (result.length > 0) {
return result[0];
}
}
return answer;
}
use of com.cloud.agent.api.StartCommand in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateStart.
@Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
final User caller = cctxt.getCallingUser();
VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vmGuru, vm, caller, account);
if (start == null) {
return;
}
vm = start.first();
final ReservationContext ctx = start.second();
ItWorkVO work = start.third();
VMInstanceVO startedVm = null;
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
}
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId());
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx);
}
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
boolean canRetry = true;
ExcludeList avoids = null;
try {
final Journal journal = start.second().getJournal();
if (planToDeploy != null) {
avoids = planToDeploy.getAvoids();
}
if (avoids == null) {
avoids = new ExcludeList();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
boolean planChangedByVolume = false;
boolean reuseVolume = true;
final DataCenterDeployment originalPlan = plan;
int retry = StartRetry.value();
while (retry-- != 0) {
if (reuseVolume) {
// edit plan if this vm's ROOT volume is in READY state already
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO vol : vols) {
// make sure if the templateId is unchanged. If it is changed,
// let planner
// reassign pool for the volume even if it ready.
final Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
}
continue;
}
final StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
}
final long rootVolDcId = pool.getDataCenterId();
final Long rootVolPodId = pool.getPodId();
final Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
// planner
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
}
throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified);
}
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
}
planChangedByVolume = true;
}
}
}
}
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
DeployDestination dest = null;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest == null) {
if (planChangedByVolume) {
plan = originalPlan;
planChangedByVolume = false;
//do not enter volume reuse for next retry, since we want to look for resources outside the volume's cluster
reuseVolume = false;
continue;
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
if (dest != null) {
avoids.addHost(dest.getHost().getId());
journal.record("Deployment found ", vmProfile, dest);
}
long destHostId = dest.getHost().getId();
vm.setPodIdToDeployIn(dest.getPod().getId());
final Long cluster_id = dest.getCluster().getId();
final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
//storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
} else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
}
vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine " + vm.getUuid() + " oldstate: " + vm.getState() + "Event :" + Event.OperationRetry);
}
} catch (final NoTransitionException e1) {
throw new ConcurrentOperationException(e1.getMessage());
}
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
}
_networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx);
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
volumeMgr.prepare(vmProfile, dest);
}
//since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
if (!reuseVolume) {
reuseVolume = true;
}
Commands cmds = null;
vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
handlePath(vmTO.getDisks(), vm.getHypervisorType());
cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));
vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);
work = _workDao.findById(work.getId());
if (work == null || work.getStep() != Step.Prepare) {
throw new ConcurrentOperationException("Work steps have been changed: " + work);
}
_workDao.updateStep(work, Step.Starting);
_agentMgr.send(destHostId, cmds);
_workDao.updateStep(work, Step.Started);
startAnswer = cmds.getAnswer(StartAnswer.class);
if (startAnswer != null && startAnswer.getResult()) {
handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
final String host_guid = startAnswer.getHost_guid();
if (host_guid != null) {
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
if (finalHost == null) {
throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: " + startAnswer);
}
destHostId = finalHost.getId();
}
if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
s_logger.error("Unable to transition to a new state. VM uuid: " + vm.getUuid() + "VM oldstate:" + vm.getState() + "Event:" + Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM" + vm.getUuid());
}
// Update GPU device capacity
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
if (gpuDevice != null) {
_resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
}
// information isn't set,
if (_uservmDetailsDao.findDetail(vm.getId(), "deployvm") != null) {
_uservmDetailsDao.removeDetail(vm.getId(), "deployvm");
}
startedVm = vm;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Start completed for VM " + vm);
}
return;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.info("The guru did not like the answers so stopping " + vm);
}
StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
stopCmd.setControlIp(getControlNicIpForVM(vm));
final StopCommand cmd = stopCmd;
final Answer answer = _agentMgr.easySend(destHostId, cmd);
if (answer != null && answer instanceof StopAnswer) {
final StopAnswer stopAns = (StopAnswer) answer;
if (vm.getType() == VirtualMachine.Type.User) {
final String platform = stopAns.getPlatform();
if (platform != null) {
final Map<String, String> vmmetadata = new HashMap<String, String>();
vmmetadata.put(vm.getInstanceName(), platform);
syncVMMetaData(vmmetadata);
}
}
}
if (answer == null || !answer.getResult()) {
s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
_haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
}
throw new ExecutionException("Unable to start VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
} catch (OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost() + " failed to start VM: " + vm.getUuid());
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.info("Unable to contact resource.", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
throw e;
}
}
} catch (final InsufficientCapacityException e) {
s_logger.info("Insufficient capacity ", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
}
}
} catch (final ExecutionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} catch (final NoTransitionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} finally {
if (startedVm == null && canRetry) {
final Step prevStep = work.getStep();
_workDao.updateStep(work, Step.Release);
// If previous step was started/ing && we got a valid answer
if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) {
//TODO check the response of cleanup and record it in DB for retry
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false);
} else {
//if step is not starting/started, send cleanup command with force=true
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true);
}
}
}
}
} finally {
if (startedVm == null) {
if (canRetry) {
try {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
} catch (final NoTransitionException e) {
throw new ConcurrentOperationException(e.getMessage());
}
}
}
if (planToDeploy != null) {
planToDeploy.setAvoids(avoids);
}
}
if (startedVm == null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
use of com.cloud.agent.api.StartCommand in project cloudstack by apache.
the class VmwareResource method executeRequest.
@Override
public Answer executeRequest(Command cmd) {
if (s_logger.isTraceEnabled())
s_logger.trace("Begin executeRequest(), cmd: " + cmd.getClass().getSimpleName());
Answer answer = null;
NDC.push(getCommandLogTitle(cmd));
try {
long cmdSequence = _cmdSequence++;
Date startTime = DateUtil.currentGMTTime();
PropertyMapDynamicBean mbean = new PropertyMapDynamicBean();
mbean.addProp("StartTime", DateUtil.getDateDisplayString(TimeZone.getDefault(), startTime));
mbean.addProp("Command", _gson.toJson(cmd));
mbean.addProp("Sequence", String.valueOf(cmdSequence));
mbean.addProp("Name", cmd.getClass().getSimpleName());
Class<? extends Command> clz = cmd.getClass();
if (cmd instanceof NetworkElementCommand) {
return _vrResource.executeRequest((NetworkElementCommand) cmd);
} else if (clz == ReadyCommand.class) {
answer = execute((ReadyCommand) cmd);
} else if (clz == GetHostStatsCommand.class) {
answer = execute((GetHostStatsCommand) cmd);
} else if (clz == GetVmStatsCommand.class) {
answer = execute((GetVmStatsCommand) cmd);
} else if (clz == GetVmDiskStatsCommand.class) {
answer = execute((GetVmDiskStatsCommand) cmd);
} else if (clz == CheckHealthCommand.class) {
answer = execute((CheckHealthCommand) cmd);
} else if (clz == StopCommand.class) {
answer = execute((StopCommand) cmd);
} else if (clz == RebootRouterCommand.class) {
answer = execute((RebootRouterCommand) cmd);
} else if (clz == RebootCommand.class) {
answer = execute((RebootCommand) cmd);
} else if (clz == CheckVirtualMachineCommand.class) {
answer = execute((CheckVirtualMachineCommand) cmd);
} else if (clz == PrepareForMigrationCommand.class) {
answer = execute((PrepareForMigrationCommand) cmd);
} else if (clz == MigrateCommand.class) {
answer = execute((MigrateCommand) cmd);
} else if (clz == MigrateWithStorageCommand.class) {
answer = execute((MigrateWithStorageCommand) cmd);
} else if (clz == MigrateVolumeCommand.class) {
answer = execute((MigrateVolumeCommand) cmd);
} else if (clz == DestroyCommand.class) {
answer = execute((DestroyCommand) cmd);
} else if (clz == CreateStoragePoolCommand.class) {
return execute((CreateStoragePoolCommand) cmd);
} else if (clz == ModifyTargetsCommand.class) {
answer = execute((ModifyTargetsCommand) cmd);
} else if (clz == ModifyStoragePoolCommand.class) {
answer = execute((ModifyStoragePoolCommand) cmd);
} else if (clz == DeleteStoragePoolCommand.class) {
answer = execute((DeleteStoragePoolCommand) cmd);
} else if (clz == CopyVolumeCommand.class) {
answer = execute((CopyVolumeCommand) cmd);
} else if (clz == AttachIsoCommand.class) {
answer = execute((AttachIsoCommand) cmd);
} else if (clz == ValidateSnapshotCommand.class) {
answer = execute((ValidateSnapshotCommand) cmd);
} else if (clz == ManageSnapshotCommand.class) {
answer = execute((ManageSnapshotCommand) cmd);
} else if (clz == BackupSnapshotCommand.class) {
answer = execute((BackupSnapshotCommand) cmd);
} else if (clz == CreateVolumeFromSnapshotCommand.class) {
answer = execute((CreateVolumeFromSnapshotCommand) cmd);
} else if (clz == CreatePrivateTemplateFromVolumeCommand.class) {
answer = execute((CreatePrivateTemplateFromVolumeCommand) cmd);
} else if (clz == CreatePrivateTemplateFromSnapshotCommand.class) {
answer = execute((CreatePrivateTemplateFromSnapshotCommand) cmd);
} else if (clz == UpgradeSnapshotCommand.class) {
answer = execute((UpgradeSnapshotCommand) cmd);
} else if (clz == GetStorageStatsCommand.class) {
answer = execute((GetStorageStatsCommand) cmd);
} else if (clz == PrimaryStorageDownloadCommand.class) {
answer = execute((PrimaryStorageDownloadCommand) cmd);
} else if (clz == GetVncPortCommand.class) {
answer = execute((GetVncPortCommand) cmd);
} else if (clz == SetupCommand.class) {
answer = execute((SetupCommand) cmd);
} else if (clz == MaintainCommand.class) {
answer = execute((MaintainCommand) cmd);
} else if (clz == PingTestCommand.class) {
answer = execute((PingTestCommand) cmd);
} else if (clz == CheckOnHostCommand.class) {
answer = execute((CheckOnHostCommand) cmd);
} else if (clz == ModifySshKeysCommand.class) {
answer = execute((ModifySshKeysCommand) cmd);
} else if (clz == NetworkUsageCommand.class) {
answer = execute((NetworkUsageCommand) cmd);
} else if (clz == StartCommand.class) {
answer = execute((StartCommand) cmd);
} else if (clz == CheckSshCommand.class) {
answer = execute((CheckSshCommand) cmd);
} else if (clz == CheckNetworkCommand.class) {
answer = execute((CheckNetworkCommand) cmd);
} else if (clz == PlugNicCommand.class) {
answer = execute((PlugNicCommand) cmd);
} else if (clz == UnPlugNicCommand.class) {
answer = execute((UnPlugNicCommand) cmd);
} else if (cmd instanceof CreateVMSnapshotCommand) {
return execute((CreateVMSnapshotCommand) cmd);
} else if (cmd instanceof DeleteVMSnapshotCommand) {
return execute((DeleteVMSnapshotCommand) cmd);
} else if (cmd instanceof RevertToVMSnapshotCommand) {
return execute((RevertToVMSnapshotCommand) cmd);
} else if (clz == ResizeVolumeCommand.class) {
return execute((ResizeVolumeCommand) cmd);
} else if (clz == UnregisterVMCommand.class) {
return execute((UnregisterVMCommand) cmd);
} else if (cmd instanceof StorageSubSystemCommand) {
checkStorageProcessorAndHandlerNfsVersionAttribute((StorageSubSystemCommand) cmd);
return storageHandler.handleStorageCommands((StorageSubSystemCommand) cmd);
} else if (clz == ScaleVmCommand.class) {
return execute((ScaleVmCommand) cmd);
} else if (clz == PvlanSetupCommand.class) {
return execute((PvlanSetupCommand) cmd);
} else if (clz == GetVmIpAddressCommand.class) {
return execute((GetVmIpAddressCommand) cmd);
} else if (clz == UnregisterNicCommand.class) {
answer = execute((UnregisterNicCommand) cmd);
} else {
answer = Answer.createUnsupportedCommandAnswer(cmd);
}
if (cmd.getContextParam("checkpoint") != null) {
answer.setContextParam("checkpoint", cmd.getContextParam("checkpoint"));
}
Date doneTime = DateUtil.currentGMTTime();
mbean.addProp("DoneTime", DateUtil.getDateDisplayString(TimeZone.getDefault(), doneTime));
mbean.addProp("Answer", _gson.toJson(answer));
synchronized (this) {
try {
JmxUtil.registerMBean("VMware " + _morHyperHost.getValue(), "Command " + cmdSequence + "-" + cmd.getClass().getSimpleName(), mbean);
_cmdMBeans.add(mbean);
if (_cmdMBeans.size() >= MazCmdMBean) {
PropertyMapDynamicBean mbeanToRemove = _cmdMBeans.get(0);
_cmdMBeans.remove(0);
JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name"));
}
} catch (Exception e) {
if (s_logger.isTraceEnabled())
s_logger.trace("Unable to register JMX monitoring due to exception " + ExceptionUtil.toString(e));
}
}
} finally {
recycleServiceContext();
NDC.pop();
}
if (s_logger.isTraceEnabled())
s_logger.trace("End executeRequest(), cmd: " + cmd.getClass().getSimpleName());
return answer;
}
use of com.cloud.agent.api.StartCommand in project cloudstack by apache.
the class HypervDirectConnectResourceTest method simpleVmStart.
private StartAnswer simpleVmStart(final String sample) {
StartCommand cmd = s_gson.fromJson(sample, StartCommand.class);
s_logger.info("StartCommand sample " + s_gson.toJson(cmd));
StartAnswer ans = (StartAnswer) s_hypervresource.executeRequest(cmd);
return ans;
}
Aggregations