Search in sources :

Example 11 with MigrateAnswer

use of com.cloud.agent.api.MigrateAnswer in project cloudstack by apache.

the class LibvirtMigrateCommandWrapper method execute.

@Override
public Answer execute(final MigrateCommand command, final LibvirtComputingResource libvirtComputingResource) {
    final String vmName = command.getVmName();
    final Map<String, Boolean> vlanToPersistenceMap = command.getVlanToPersistenceMap();
    final String destinationUri = createMigrationURI(command.getDestinationIp(), libvirtComputingResource);
    final List<MigrateDiskInfo> migrateDiskInfoList = command.getMigrateDiskInfoList();
    String result = null;
    List<InterfaceDef> ifaces = null;
    List<DiskDef> disks;
    Domain dm = null;
    Connect dconn = null;
    Domain destDomain = null;
    Connect conn = null;
    String xmlDesc = null;
    List<Ternary<String, Boolean, String>> vmsnapshots = null;
    try {
        final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
        conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
        ifaces = libvirtComputingResource.getInterfaces(conn, vmName);
        disks = libvirtComputingResource.getDisks(conn, vmName);
        VirtualMachineTO to = command.getVirtualMachine();
        dm = conn.domainLookupByName(vmName);
        /*
                We replace the private IP address with the address of the destination host.
                This is because the VNC listens on the private IP address of the hypervisor,
                but that address is of course different on the target host.

                MigrateCommand.getDestinationIp() returns the private IP address of the target
                hypervisor. So it's safe to use.

                The Domain.migrate method from libvirt supports passing a different XML
                description for the instance to be used on the target host.

                This is supported by libvirt-java from version 0.50.0

                CVE-2015-3252: Get XML with sensitive information suitable for migration by using
                               VIR_DOMAIN_XML_MIGRATABLE flag (value = 8)
                               https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainXMLFlags

                               Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
             */
        // 1000000 equals v1.0.0
        final int xmlFlag = conn.getLibVirVersion() >= 1000000 ? 8 : 1;
        final String target = command.getDestinationIp();
        xmlDesc = dm.getXMLDesc(xmlFlag);
        xmlDesc = replaceIpForVNCInDescFile(xmlDesc, target);
        String oldIsoVolumePath = getOldVolumePath(disks, vmName);
        String newIsoVolumePath = getNewVolumePathIfDatastoreHasChanged(libvirtComputingResource, conn, to);
        if (newIsoVolumePath != null && !newIsoVolumePath.equals(oldIsoVolumePath)) {
            s_logger.debug("Editing mount path");
            xmlDesc = replaceDiskSourceFile(xmlDesc, newIsoVolumePath, vmName);
        }
        // delete the metadata of vm snapshots before migration
        vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm);
        // Verify Format of backing file
        for (DiskDef disk : disks) {
            if (disk.getDeviceType() == DiskDef.DeviceType.DISK && disk.getDiskFormatType() == DiskDef.DiskFmtType.QCOW2) {
                libvirtComputingResource.setBackingFileFormat(disk.getDiskPath());
            }
        }
        Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage = command.getMigrateStorage();
        // migrateStorage is declared as final because the replaceStorage method may mutate mapMigrateStorage, but
        // migrateStorage's value should always only be associated with the initial state of mapMigrateStorage.
        final boolean migrateStorage = MapUtils.isNotEmpty(mapMigrateStorage);
        final boolean migrateStorageManaged = command.isMigrateStorageManaged();
        if (migrateStorage) {
            xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage, migrateStorageManaged);
        }
        Map<String, DpdkTO> dpdkPortsMapping = command.getDpdkInterfaceMapping();
        if (MapUtils.isNotEmpty(dpdkPortsMapping)) {
            xmlDesc = replaceDpdkInterfaces(xmlDesc, dpdkPortsMapping);
        }
        dconn = libvirtUtilitiesHelper.retrieveQemuConnection(destinationUri);
        if (to.getType() == VirtualMachine.Type.User) {
            libvirtComputingResource.detachAndAttachConfigDriveISO(conn, vmName);
        }
        // run migration in thread so we can monitor it
        s_logger.info("Live migration of instance " + vmName + " initiated to destination host: " + dconn.getURI());
        final ExecutorService executor = Executors.newFixedThreadPool(1);
        boolean migrateNonSharedInc = command.isMigrateNonSharedInc() && !migrateStorageManaged;
        final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, migrateStorage, migrateNonSharedInc, command.isAutoConvergence(), vmName, command.getDestinationIp());
        final Future<Domain> migrateThread = executor.submit(worker);
        executor.shutdown();
        long sleeptime = 0;
        while (!executor.isTerminated()) {
            Thread.sleep(100);
            sleeptime += 100;
            if (sleeptime == 1000) {
                // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
                final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
                if (migrateDowntime > 0) {
                    try {
                        final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
                        if (setDowntime == 0) {
                            s_logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms");
                        }
                    } catch (final LibvirtException e) {
                        s_logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage());
                    }
                }
            }
            if (sleeptime % 1000 == 0) {
                s_logger.info("Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms");
            }
            // abort the vm migration if the job is executed more than vm.migrate.wait
            final int migrateWait = libvirtComputingResource.getMigrateWait();
            if (migrateWait > 0 && sleeptime > migrateWait * 1000) {
                DomainState state = null;
                try {
                    state = dm.getInfo().state;
                } catch (final LibvirtException e) {
                    s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
                }
                if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) {
                    try {
                        DomainJobInfo job = dm.getJobInfo();
                        s_logger.info("Aborting " + vmName + " domain job: " + job);
                        dm.abortJob();
                        result = String.format("Migration of VM %s was cancelled by cloudstack due to time out after %d seconds", vmName, migrateWait);
                        s_logger.debug(result);
                        break;
                    } catch (final LibvirtException e) {
                        s_logger.info("Failed to abort the vm migration job of vm " + vmName + " : " + e.getMessage());
                    }
                }
            }
            // pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
            final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
            if (migratePauseAfter > 0 && sleeptime > migratePauseAfter) {
                DomainState state = null;
                try {
                    state = dm.getInfo().state;
                } catch (final LibvirtException e) {
                    s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
                }
                if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) {
                    try {
                        s_logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + migratePauseAfter + "ms to complete migration");
                        dm.suspend();
                    } catch (final LibvirtException e) {
                        // pause could be racy if it attempts to pause right when vm is finished, simply warn
                        s_logger.info("Failed to pause vm " + vmName + " : " + e.getMessage());
                    }
                }
            }
        }
        s_logger.info("Migration thread for " + vmName + " is done");
        destDomain = migrateThread.get(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VM_MIGRATE_DOMAIN_RETRIEVE_TIMEOUT), TimeUnit.SECONDS);
        if (destDomain != null) {
            deleteOrDisconnectDisksOnSourcePool(libvirtComputingResource, migrateDiskInfoList, disks);
        }
    } catch (final LibvirtException e) {
        s_logger.debug("Can't migrate domain: " + e.getMessage());
        result = e.getMessage();
        if (result.startsWith("unable to connect to server") && result.endsWith("refused")) {
            result = String.format("Migration was refused connection to destination: %s. Please check libvirt configuration compatibility and firewall rules on the source and destination hosts.", destinationUri);
        }
    } catch (final InterruptedException | ExecutionException | TimeoutException | IOException | ParserConfigurationException | SAXException | TransformerException | URISyntaxException e) {
        s_logger.debug(String.format("%s : %s", e.getClass().getSimpleName(), e.getMessage()));
        if (result == null) {
            result = "Exception during migrate: " + e.getMessage();
        }
    } finally {
        try {
            if (dm != null && result != null) {
                // restore vm snapshots in case of failed migration
                if (vmsnapshots != null) {
                    libvirtComputingResource.restoreVMSnapshotMetadata(dm, vmName, vmsnapshots);
                }
            }
            if (dm != null) {
                if (dm.isPersistent() == 1) {
                    dm.undefine();
                }
                dm.free();
            }
            if (dconn != null) {
                dconn.close();
            }
            if (destDomain != null) {
                destDomain.free();
            }
        } catch (final LibvirtException e) {
            s_logger.trace("Ignoring libvirt error.", e);
        }
    }
    if (result != null) {
    } else {
        libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
        for (final InterfaceDef iface : ifaces) {
            String vlanId = libvirtComputingResource.getVlanIdFromBridgeName(iface.getBrName());
            // We don't know which "traffic type" is associated with
            // each interface at this point, so inform all vif drivers
            final List<VifDriver> allVifDrivers = libvirtComputingResource.getAllVifDrivers();
            for (final VifDriver vifDriver : allVifDrivers) {
                vifDriver.unplug(iface, libvirtComputingResource.shouldDeleteBridge(vlanToPersistenceMap, vlanId));
            }
        }
    }
    return new MigrateAnswer(command, result == null, result, null);
}
Also used : LibvirtException(org.libvirt.LibvirtException) URISyntaxException(java.net.URISyntaxException) DpdkTO(com.cloud.agent.api.to.DpdkTO) VirtualMachineTO(com.cloud.agent.api.to.VirtualMachineTO) SAXException(org.xml.sax.SAXException) MigrateAnswer(com.cloud.agent.api.MigrateAnswer) DiskDef(com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef) MigrateKVMAsync(com.cloud.hypervisor.kvm.resource.MigrateKVMAsync) ParserConfigurationException(javax.xml.parsers.ParserConfigurationException) ExecutionException(java.util.concurrent.ExecutionException) DomainJobInfo(org.libvirt.DomainJobInfo) TransformerException(javax.xml.transform.TransformerException) TimeoutException(java.util.concurrent.TimeoutException) Ternary(com.cloud.utils.Ternary) Connect(org.libvirt.Connect) IOException(java.io.IOException) MigrateDiskInfo(com.cloud.agent.api.MigrateCommand.MigrateDiskInfo) VifDriver(com.cloud.hypervisor.kvm.resource.VifDriver) InterfaceDef(com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef) DomainState(org.libvirt.DomainInfo.DomainState) ExecutorService(java.util.concurrent.ExecutorService) Domain(org.libvirt.Domain)

Aggregations

MigrateAnswer (com.cloud.agent.api.MigrateAnswer)11 CloudRuntimeException (com.cloud.utils.exception.CloudRuntimeException)5 IOException (java.io.IOException)4 ConfigurationException (javax.naming.ConfigurationException)4 URISyntaxException (java.net.URISyntaxException)3 ExecutionException (java.util.concurrent.ExecutionException)3 Connect (org.libvirt.Connect)3 Domain (org.libvirt.Domain)3 LibvirtException (org.libvirt.LibvirtException)3 MigrateDiskInfo (com.cloud.agent.api.MigrateCommand.MigrateDiskInfo)2 InternalErrorException (com.cloud.exception.InternalErrorException)2 VifDriver (com.cloud.hypervisor.kvm.resource.VifDriver)2 MockHost (com.cloud.simulator.MockHost)2 MockVMVO (com.cloud.simulator.MockVMVO)2 Ternary (com.cloud.utils.Ternary)2 State (com.cloud.vm.VirtualMachine.State)2 Connection (com.xensource.xenapi.Connection)2 Host (com.xensource.xenapi.Host)2 VBD (com.xensource.xenapi.VBD)2 VM (com.xensource.xenapi.VM)2