use of com.cloud.agent.api.to.VirtualMachineTO in project cosmic by MissionCriticalCloud.
the class XenServer610MigrateWithStorageSendCommandWrapper method execute.
@Override
public Answer execute(final MigrateWithStorageSendCommand command, final XenServer610Resource xenServer610Resource) {
final Connection connection = xenServer610Resource.getConnection();
final VirtualMachineTO vmSpec = command.getVirtualMachine();
final List<Pair<VolumeTO, Object>> volumeToSr = command.getVolumeToSr();
final List<Pair<NicTO, Object>> nicToNetwork = command.getNicToNetwork();
final Map<String, String> token = command.getToken();
final String vmName = vmSpec.getName();
Task task = null;
try {
// In a cluster management server setup, the migrate with storage receive and send
// commands and answers may have to be forwarded to another management server. This
// happens when the host/resource on which the command has to be executed is owned
// by the second management server. The serialization/deserialization of the command
// and answers fails as the xapi SR and Network class type isn't understand by the
// agent attache. Seriliaze the SR and Network objects here to a string and pass in
// the answer object. It'll be deserialzed and object created in migrate with
// storage send command execution.
final Gson gson = new Gson();
final Map<String, String> other = new HashMap<>();
other.put("live", "true");
// Create the vdi map which tells what volumes of the vm need to go
// on which sr on the destination.
final Map<VDI, SR> vdiMap = new HashMap<>();
for (final Pair<VolumeTO, Object> entry : volumeToSr) {
if (entry.second() instanceof SR) {
final SR sr = (SR) entry.second();
final VDI vdi = xenServer610Resource.getVDIbyUuid(connection, entry.first().getPath());
vdiMap.put(vdi, sr);
} else {
throw new CloudRuntimeException("The object " + entry.second() + " passed is not of type SR.");
}
}
final Set<VM> vms = VM.getByNameLabel(connection, vmSpec.getName());
VM vmToMigrate = null;
if (vms != null) {
vmToMigrate = vms.iterator().next();
}
// Create the vif map.
final Map<VIF, Network> vifMap = new HashMap<>();
for (final Pair<NicTO, Object> entry : nicToNetwork) {
if (entry.second() instanceof Network) {
final Network network = (Network) entry.second();
final VIF vif = xenServer610Resource.getVifByMac(connection, vmToMigrate, entry.first().getMac());
vifMap.put(vif, network);
} else {
throw new CloudRuntimeException("The object " + entry.second() + " passed is not of type Network.");
}
}
// Check migration with storage is possible.
task = vmToMigrate.assertCanMigrateAsync(connection, token, true, vdiMap, vifMap, other);
try {
// poll every 1 seconds.
final long timeout = xenServer610Resource.getMigrateWait() * 1000L;
xenServer610Resource.waitForTask(connection, task, 1000, timeout);
xenServer610Resource.checkForSuccess(connection, task);
} catch (final Types.HandleInvalid e) {
s_logger.error("Error while checking if vm " + vmName + " can be migrated.", e);
throw new CloudRuntimeException("Error while checking if vm " + vmName + " can be migrated.", e);
}
// Migrate now.
task = vmToMigrate.migrateSendAsync(connection, token, true, vdiMap, vifMap, other);
try {
// poll every 1 seconds.
final long timeout = xenServer610Resource.getMigrateWait() * 1000L;
xenServer610Resource.waitForTask(connection, task, 1000, timeout);
xenServer610Resource.checkForSuccess(connection, task);
} catch (final Types.HandleInvalid e) {
s_logger.error("Error while migrating vm " + vmName, e);
throw new CloudRuntimeException("Error while migrating vm " + vmName, e);
}
final Set<VolumeTO> volumeToSet = null;
return new MigrateWithStorageSendAnswer(command, volumeToSet);
} catch (final CloudRuntimeException e) {
s_logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e);
return new MigrateWithStorageSendAnswer(command, e);
} catch (final Exception e) {
s_logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e);
return new MigrateWithStorageSendAnswer(command, e);
} finally {
if (task != null) {
try {
task.destroy(connection);
} catch (final Exception e) {
s_logger.debug("Unable to destroy task " + task.toString() + " on host " + xenServer610Resource.getHost().getUuid() + " due to " + e.toString());
}
}
}
}
use of com.cloud.agent.api.to.VirtualMachineTO in project cosmic by MissionCriticalCloud.
the class NotAValidCommand method testMigrateCommand.
@Test
public void testMigrateCommand() {
final VirtualMachineTO machineTO = Mockito.mock(VirtualMachineTO.class);
final MigrateCommand migrateCommand = new MigrateCommand("Test", "127.0.0.1", false, machineTO, false);
final CitrixRequestWrapper wrapper = CitrixRequestWrapper.getInstance();
assertNotNull(wrapper);
final Answer answer = wrapper.execute(migrateCommand, citrixResourceBase);
verify(citrixResourceBase, times(1)).getConnection();
assertFalse(answer.getResult());
}
use of com.cloud.agent.api.to.VirtualMachineTO in project cosmic by MissionCriticalCloud.
the class CitrixStartCommandWrapper method execute.
@Override
public Answer execute(final StartCommand command, final CitrixResourceBase citrixResourceBase) {
final Connection conn = citrixResourceBase.getConnection();
final VirtualMachineTO vmSpec = command.getVirtualMachine();
final String vmName = vmSpec.getName();
VmPowerState state = VmPowerState.HALTED;
VM vm = null;
// if a VDI is created, record its UUID to send back to the CS MS
final Map<String, String> iqnToPath = new HashMap<>();
try {
final Set<VM> vms = VM.getByNameLabel(conn, vmName);
if (vms != null) {
for (final VM v : vms) {
final VM.Record vRec = v.getRecord(conn);
if (vRec.powerState == VmPowerState.HALTED) {
v.destroy(conn);
} else if (vRec.powerState == VmPowerState.RUNNING) {
final String host = vRec.residentOn.getUuid(conn);
final String msg = "VM " + vmName + " is runing on host " + host;
s_logger.debug(msg);
return new StartAnswer(command, msg, host);
} else {
final String msg = "There is already a VM having the same name " + vmName + " vm record " + vRec.toString();
s_logger.warn(msg);
return new StartAnswer(command, msg);
}
}
}
s_logger.debug("1. The VM " + vmName + " is in Starting state.");
final Host host = Host.getByUuid(conn, citrixResourceBase.getHost().getUuid());
vm = citrixResourceBase.createVmFromTemplate(conn, vmSpec, host);
final GPUDeviceTO gpuDevice = vmSpec.getGpuDevice();
if (gpuDevice != null) {
s_logger.debug("Creating VGPU for of VGPU type: " + gpuDevice.getVgpuType() + " in GPU group " + gpuDevice.getGpuGroup() + " for VM " + vmName);
citrixResourceBase.createVGPU(conn, command, vm, gpuDevice);
}
if (vmSpec.getType() != VirtualMachine.Type.User) {
citrixResourceBase.createPatchVbd(conn, vmName, vm);
}
// put cdrom at the first place in the list
final List<DiskTO> disks = new ArrayList<>(vmSpec.getDisks().length);
int index = 0;
for (final DiskTO disk : vmSpec.getDisks()) {
if (Volume.Type.ISO.equals(disk.getType())) {
disks.add(0, disk);
} else {
disks.add(index, disk);
}
index++;
}
for (final DiskTO disk : disks) {
final VDI newVdi = citrixResourceBase.prepareManagedDisk(conn, disk, vmName);
if (newVdi != null) {
final String path = newVdi.getUuid(conn);
iqnToPath.put(disk.getDetails().get(DiskTO.IQN), path);
}
citrixResourceBase.createVbd(conn, disk, vmName, vm, vmSpec.getBootloader(), newVdi);
}
for (final NicTO nic : vmSpec.getNics()) {
citrixResourceBase.createVif(conn, vmName, vm, vmSpec, nic);
}
citrixResourceBase.startVM(conn, host, vm, vmName);
state = VmPowerState.RUNNING;
final StartAnswer startAnswer = new StartAnswer(command);
startAnswer.setIqnToPath(iqnToPath);
return startAnswer;
} catch (final Exception e) {
s_logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e);
final String msg = citrixResourceBase.handleVmStartFailure(conn, vmName, vm, "", e);
final StartAnswer startAnswer = new StartAnswer(command, msg);
startAnswer.setIqnToPath(iqnToPath);
return startAnswer;
} finally {
if (state != VmPowerState.HALTED) {
s_logger.debug("2. The VM " + vmName + " is in " + state + " state.");
} else {
s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName);
}
}
}
use of com.cloud.agent.api.to.VirtualMachineTO in project cosmic by MissionCriticalCloud.
the class CitrixPrepareForMigrationCommandWrapper method execute.
@Override
public Answer execute(final PrepareForMigrationCommand command, final CitrixResourceBase citrixResourceBase) {
final Connection conn = citrixResourceBase.getConnection();
final VirtualMachineTO vm = command.getVirtualMachine();
final List<String[]> vmDataList = vm.getVmData();
String configDriveLabel = vm.getConfigDriveLabel();
if (configDriveLabel == null) {
configDriveLabel = "config";
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Preparing host for migrating " + vm);
}
final NicTO[] nics = vm.getNics();
try {
citrixResourceBase.prepareISO(conn, vm.getName(), vmDataList, configDriveLabel);
for (final NicTO nic : nics) {
citrixResourceBase.getNetwork(conn, nic);
}
s_logger.debug("4. The VM " + vm.getName() + " is in Migrating state");
return new PrepareForMigrationAnswer(command);
} catch (final Exception e) {
s_logger.warn("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to " + e.toString(), e);
return new PrepareForMigrationAnswer(command, e);
}
}
use of com.cloud.agent.api.to.VirtualMachineTO in project cosmic by MissionCriticalCloud.
the class HypervisorGuruBase method toVirtualMachineTO.
protected VirtualMachineTO toVirtualMachineTO(final VirtualMachineProfile vmProfile) {
final ServiceOffering offering = _serviceOfferingDao.findById(vmProfile.getId(), vmProfile.getServiceOfferingId());
final VirtualMachine vm = vmProfile.getVirtualMachine();
final Long minMemory = (long) (offering.getRamSize() / vmProfile.getMemoryOvercommitRatio());
final VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), minMemory * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword());
to.setBootArgs(vmProfile.getBootArgs());
final List<NicProfile> nicProfiles = vmProfile.getNics();
final NicTO[] nics = new NicTO[nicProfiles.size()];
int i = 0;
for (final NicProfile nicProfile : nicProfiles) {
nics[i++] = toNicTO(nicProfile);
}
to.setNics(nics);
to.setDisks(vmProfile.getDisks().toArray(new DiskTO[vmProfile.getDisks().size()]));
if (vmProfile.getTemplate().getBits() == 32) {
to.setArch("i686");
} else {
to.setArch("x86_64");
}
final Map<String, String> detailsInVm = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (detailsInVm != null) {
to.setDetails(detailsInVm);
}
// Set GPU details
ServiceOfferingDetailsVO offeringDetail;
if ((offeringDetail = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
final ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
to.setGpuDevice(_resourceMgr.getGPUDevice(vm.getHostId(), groupName.getValue(), offeringDetail.getValue()));
}
// Workaround to make sure the TO has the UUID we need for Niciri integration
final VMInstanceVO vmInstance = _virtualMachineDao.findById(to.getId());
// check if XStools tools are present in the VM and dynamic scaling feature is enabled (per zone/global)
final Boolean isDynamicallyScalable = vmInstance.isDynamicallyScalable() && UserVmManager.EnableDynamicallyScaleVm.valueIn(vm.getDataCenterId());
to.setEnableDynamicallyScaleVm(isDynamicallyScalable);
to.setUuid(vmInstance.getUuid());
to.setVmData(vmProfile.getVmData());
to.setConfigDriveLabel(vmProfile.getConfigDriveLabel());
to.setConfigDriveIsoRootFolder(vmProfile.getConfigDriveIsoRootFolder());
to.setConfigDriveIsoFile(vmProfile.getConfigDriveIsoFile());
final MetadataTO metadataTO = new MetadataTO();
final DomainVO domain = _domainDao.findById(vm.getDomainId());
metadataTO.setDomainUuid(domain.getUuid());
to.setMetadata(metadataTO);
return to;
}
Aggregations