use of com.cloud.hypervisor.kvm.resource.VifDriver in project cloudstack by apache.
the class LibvirtStopCommandWrapper method execute.
@Override
public Answer execute(final StopCommand command, final LibvirtComputingResource libvirtComputingResource) {
final String vmName = command.getVmName();
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
if (command.checkBeforeCleanup()) {
try {
final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
final Domain vm = conn.domainLookupByName(command.getVmName());
if (vm != null && vm.getInfo().state == DomainState.VIR_DOMAIN_RUNNING) {
return new StopAnswer(command, "vm is still running on host", false);
}
} catch (final Exception e) {
s_logger.debug("Failed to get vm status in case of checkboforecleanup is true", e);
}
}
File pemFile = new File(LibvirtComputingResource.SSHPRVKEYPATH);
try {
if (vmName.startsWith("s-") || vmName.startsWith("v-")) {
//move the command line file to backup.
s_logger.debug("backing up the cmdline");
try {
Pair<Boolean, String> ret = SshHelper.sshExecute(command.getControlIp(), 3922, "root", pemFile, null, "mv -f " + CMDLINE_PATH + " " + CMDLINE_BACKUP_PATH);
if (!ret.first()) {
s_logger.debug("Failed to backup cmdline file due to " + ret.second());
}
} catch (Exception e) {
s_logger.debug("Failed to backup cmdline file due to " + e.getMessage());
}
}
final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
final List<DiskDef> disks = libvirtComputingResource.getDisks(conn, vmName);
final List<InterfaceDef> ifaces = libvirtComputingResource.getInterfaces(conn, vmName);
libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
final String result = libvirtComputingResource.stopVM(conn, vmName);
if (result == null) {
for (final DiskDef disk : disks) {
libvirtComputingResource.cleanupDisk(disk);
}
for (final InterfaceDef iface : ifaces) {
// each interface at this point, so inform all vif drivers
for (final VifDriver vifDriver : libvirtComputingResource.getAllVifDrivers()) {
vifDriver.unplug(iface);
}
}
}
return new StopAnswer(command, result, true);
} catch (final LibvirtException e) {
s_logger.debug("unable to stop VM:" + vmName + " due to" + e.getMessage());
try {
if (vmName.startsWith("s-") || vmName.startsWith("v-"))
s_logger.debug("restoring cmdline file from backup");
Pair<Boolean, String> ret = SshHelper.sshExecute(command.getControlIp(), 3922, "root", pemFile, null, "mv " + CMDLINE_BACKUP_PATH + " " + CMDLINE_PATH);
if (!ret.first()) {
s_logger.debug("unable to restore cmdline due to " + ret.second());
}
} catch (final Exception ex) {
s_logger.debug("unable to restore cmdline due to:" + ex.getMessage());
}
return new StopAnswer(command, e.getMessage(), false);
}
}
use of com.cloud.hypervisor.kvm.resource.VifDriver in project cloudstack by apache.
the class LibvirtUnPlugNicCommandWrapper method execute.
@Override
public Answer execute(final UnPlugNicCommand command, final LibvirtComputingResource libvirtComputingResource) {
final NicTO nic = command.getNic();
final String vmName = command.getVmName();
Domain vm = null;
try {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
vm = libvirtComputingResource.getDomain(conn, vmName);
final List<InterfaceDef> pluggedNics = libvirtComputingResource.getInterfaces(conn, vmName);
for (final InterfaceDef pluggedNic : pluggedNics) {
if (pluggedNic.getMacAddress().equalsIgnoreCase(nic.getMac())) {
vm.detachDevice(pluggedNic.toString());
// each interface at this point, so inform all vif drivers
for (final VifDriver vifDriver : libvirtComputingResource.getAllVifDrivers()) {
vifDriver.unplug(pluggedNic);
}
return new UnPlugNicAnswer(command, true, "success");
}
}
return new UnPlugNicAnswer(command, true, "success");
} catch (final LibvirtException e) {
final String msg = " Unplug Nic failed due to " + e.toString();
s_logger.warn(msg, e);
return new UnPlugNicAnswer(command, false, msg);
} finally {
if (vm != null) {
try {
vm.free();
} catch (final LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
}
use of com.cloud.hypervisor.kvm.resource.VifDriver in project cloudstack by apache.
the class LibvirtPlugNicCommandWrapper method execute.
@Override
public Answer execute(final PlugNicCommand command, final LibvirtComputingResource libvirtComputingResource) {
final NicTO nic = command.getNic();
final String vmName = command.getVmName();
Domain vm = null;
try {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
vm = libvirtComputingResource.getDomain(conn, vmName);
final List<InterfaceDef> pluggedNics = libvirtComputingResource.getInterfaces(conn, vmName);
Integer nicnum = 0;
for (final InterfaceDef pluggedNic : pluggedNics) {
if (pluggedNic.getMacAddress().equalsIgnoreCase(nic.getMac())) {
s_logger.debug("found existing nic for mac " + pluggedNic.getMacAddress() + " at index " + nicnum);
return new PlugNicAnswer(command, true, "success");
}
nicnum++;
}
final VifDriver vifDriver = libvirtComputingResource.getVifDriver(nic.getType());
final InterfaceDef interfaceDef = vifDriver.plug(nic, "Other PV", "");
vm.attachDevice(interfaceDef.toString());
return new PlugNicAnswer(command, true, "success");
} catch (final LibvirtException e) {
final String msg = " Plug Nic failed due to " + e.toString();
s_logger.warn(msg, e);
return new PlugNicAnswer(command, false, msg);
} catch (final InternalErrorException e) {
final String msg = " Plug Nic failed due to " + e.toString();
s_logger.warn(msg, e);
return new PlugNicAnswer(command, false, msg);
} finally {
if (vm != null) {
try {
vm.free();
} catch (final LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
}
use of com.cloud.hypervisor.kvm.resource.VifDriver in project cloudstack by apache.
the class LibvirtMigrateCommandWrapper method execute.
@Override
public Answer execute(final MigrateCommand command, final LibvirtComputingResource libvirtComputingResource) {
final String vmName = command.getVmName();
String result = null;
List<InterfaceDef> ifaces = null;
List<DiskDef> disks = null;
Domain dm = null;
Connect dconn = null;
Domain destDomain = null;
Connect conn = null;
String xmlDesc = null;
List<Ternary<String, Boolean, String>> vmsnapshots = null;
try {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
ifaces = libvirtComputingResource.getInterfaces(conn, vmName);
disks = libvirtComputingResource.getDisks(conn, vmName);
dm = conn.domainLookupByName(vmName);
/*
We replace the private IP address with the address of the destination host.
This is because the VNC listens on the private IP address of the hypervisor,
but that address is ofcourse different on the target host.
MigrateCommand.getDestinationIp() returns the private IP address of the target
hypervisor. So it's safe to use.
The Domain.migrate method from libvirt supports passing a different XML
description for the instance to be used on the target host.
This is supported by libvirt-java from version 0.50.0
CVE-2015-3252: Get XML with sensitive information suitable for migration by using
VIR_DOMAIN_XML_MIGRATABLE flag (value = 8)
https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainXMLFlags
Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
*/
// 1000000 equals v1.0.0
final int xmlFlag = conn.getLibVirVersion() >= 1000000 ? 8 : 1;
final String target = command.getDestinationIp();
xmlDesc = dm.getXMLDesc(xmlFlag);
xmlDesc = replaceIpForVNCInDescFile(xmlDesc, target);
// delete the metadata of vm snapshots before migration
vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm);
dconn = libvirtUtilitiesHelper.retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system");
//run migration in thread so we can monitor it
s_logger.info("Live migration of instance " + vmName + " initiated");
final ExecutorService executor = Executors.newFixedThreadPool(1);
final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, vmName, command.getDestinationIp());
final Future<Domain> migrateThread = executor.submit(worker);
executor.shutdown();
long sleeptime = 0;
while (!executor.isTerminated()) {
Thread.sleep(100);
sleeptime += 100;
if (sleeptime == 1000) {
// wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
if (migrateDowntime > 0) {
try {
final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
if (setDowntime == 0) {
s_logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms");
}
} catch (final LibvirtException e) {
s_logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage());
}
}
}
if (sleeptime % 1000 == 0) {
s_logger.info("Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms");
}
// pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
if (migratePauseAfter > 0 && sleeptime > migratePauseAfter && dm.getInfo().state == DomainState.VIR_DOMAIN_RUNNING) {
s_logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + migratePauseAfter + "ms to complete migration");
try {
dm.suspend();
} catch (final LibvirtException e) {
// pause could be racy if it attempts to pause right when vm is finished, simply warn
s_logger.info("Failed to pause vm " + vmName + " : " + e.getMessage());
}
}
}
s_logger.info("Migration thread for " + vmName + " is done");
destDomain = migrateThread.get(10, TimeUnit.SECONDS);
if (destDomain != null) {
for (final DiskDef disk : disks) {
libvirtComputingResource.cleanupDisk(disk);
}
}
} catch (final LibvirtException e) {
s_logger.debug("Can't migrate domain: " + e.getMessage());
result = e.getMessage();
} catch (final InterruptedException e) {
s_logger.debug("Interrupted while migrating domain: " + e.getMessage());
result = e.getMessage();
} catch (final ExecutionException e) {
s_logger.debug("Failed to execute while migrating domain: " + e.getMessage());
result = e.getMessage();
} catch (final TimeoutException e) {
s_logger.debug("Timed out while migrating domain: " + e.getMessage());
result = e.getMessage();
} finally {
try {
if (dm != null && result != null) {
// restore vm snapshots in case of failed migration
if (vmsnapshots != null) {
libvirtComputingResource.restoreVMSnapshotMetadata(dm, vmName, vmsnapshots);
}
}
if (dm != null) {
if (dm.isPersistent() == 1) {
dm.undefine();
}
dm.free();
}
if (dconn != null) {
dconn.close();
}
if (destDomain != null) {
destDomain.free();
}
} catch (final LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
if (result != null) {
} else {
libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
for (final InterfaceDef iface : ifaces) {
// We don't know which "traffic type" is associated with
// each interface at this point, so inform all vif drivers
final List<VifDriver> allVifDrivers = libvirtComputingResource.getAllVifDrivers();
for (final VifDriver vifDriver : allVifDrivers) {
vifDriver.unplug(iface);
}
}
}
return new MigrateAnswer(command, result == null, result, null);
}
Aggregations