Search in sources :

Example 26 with HypervisorType

use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.

the class ApiDBUtils method getHypervisorTypeFromFormat.

public static HypervisorType getHypervisorTypeFromFormat(final long dcId, final ImageFormat format) {
    HypervisorType type = s_storageMgr.getHypervisorTypeFromFormat(format);
    if (format == ImageFormat.RAW) {
        // Currently, KVM only supports RBD images of type RAW.
        // This results in a weird collision with OVM volumes which
        // can only be raw, thus making KVM RBD volumes show up as OVM
        // rather than RBD. This block of code can (hopefuly) by checking to
        // see if the pool is using either RBD or NFS. However, it isn't
        // quite clear what to do if both storage types are used. If the image
        // format is RAW, it narrows the hypervisor choice down to OVM and KVM / RBD or KVM / CLVM or KVM / LVM
        // This would be better implemented at a cluster level.
        final List<StoragePoolVO> pools = s_storagePoolDao.listByDataCenterId(dcId);
        final ListIterator<StoragePoolVO> itr = pools.listIterator();
        while (itr.hasNext()) {
            final StoragePoolVO pool = itr.next();
            if (pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.CLVM || pool.getPoolType() == StoragePoolType.LVM) {
                // This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse,
                // If this check is not passed, the hypervisor type will remain OVM.
                type = HypervisorType.KVM;
                break;
            }
        }
    }
    return type;
}
Also used : HypervisorType(com.cloud.model.enumeration.HypervisorType) StoragePoolVO(com.cloud.storage.datastore.db.StoragePoolVO)

Example 27 with HypervisorType

use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.

the class LibvirtConnection method getConnectionByVmName.

public static Connect getConnectionByVmName(final String vmName) throws LibvirtException {
    final HypervisorType[] hypervisors = new HypervisorType[] { HypervisorType.KVM };
    for (final HypervisorType hypervisor : hypervisors) {
        try {
            final Connect conn = LibvirtConnection.getConnectionByType(hypervisor.toString());
            if (conn.domainLookupByName(vmName) != null) {
                return conn;
            }
        } catch (final Exception e) {
            LOGGER.debug("Can not find " + hypervisor.toString() + " connection for Instance: " + vmName + ", continuing.");
        }
    }
    LOGGER.warn("Can not find a connection for Instance " + vmName + ". Assuming the default connection.");
    // return the default connection
    return getConnection();
}
Also used : HypervisorType(com.cloud.model.enumeration.HypervisorType) Connect(org.libvirt.Connect) LibvirtException(org.libvirt.LibvirtException)

Example 28 with HypervisorType

use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.

the class CloudOrchestrator method createVirtualMachineFromScratch.

@Override
public void createVirtualMachineFromScratch(final String id, final String owner, final String isoId, final String hostName, final String displayName, final String hypervisor, final String os, final int cpu, final long memory, final Long diskSize, final List<String> computeTags, final List<String> rootDiskTags, final Map<String, NicProfile> networkNicMap, final DeploymentPlan plan, DiskControllerType diskControllerType) throws InsufficientCapacityException {
    final VirtualMachineEntityImpl vmEntity = ComponentContext.inject(VirtualMachineEntityImpl.class);
    vmEntity.init(id, owner, hostName, displayName, computeTags, rootDiskTags, new ArrayList<>(networkNicMap.keySet()));
    // load vm instance and offerings and call virtualMachineManagerImpl
    final VMInstanceVO vm = _vmDao.findByUuid(id);
    final ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
    final DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo();
    rootDiskOfferingInfo.setDiskOffering(computeOffering);
    final Long diskOfferingId = vm.getDiskOfferingId();
    if (diskOfferingId == null) {
        throw new InvalidParameterValueException("Installing from ISO requires a disk offering to be specified for the root disk.");
    }
    final DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId);
    if (diskOffering == null) {
        throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingId);
    }
    Long size = null;
    if (diskOffering.getDiskSize() == 0) {
        size = diskSize;
        if (size == null) {
            throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter.");
        }
        _volumeMgr.validateVolumeSizeRange(size * 1024 * 1024 * 1024);
    }
    rootDiskOfferingInfo.setDiskOffering(diskOffering);
    rootDiskOfferingInfo.setSize(size);
    if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) {
        final Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
        if (userVmDetails != null) {
            final String minIops = userVmDetails.get("minIopsDo");
            final String maxIops = userVmDetails.get("maxIopsDo");
            rootDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
            rootDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
        }
    }
    final LinkedHashMap<Network, List<? extends NicProfile>> networkIpMap = new LinkedHashMap<>();
    for (final String uuid : networkNicMap.keySet()) {
        final NetworkVO network = _networkDao.findByUuid(uuid);
        if (network != null) {
            networkIpMap.put(network, new ArrayList<>(Arrays.asList(networkNicMap.get(uuid))));
        }
    }
    final HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
    _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<>(), networkIpMap, plan, hypervisorType, diskControllerType);
}
Also used : NetworkVO(com.cloud.network.dao.NetworkVO) VMInstanceVO(com.cloud.vm.VMInstanceVO) NicProfile(com.cloud.vm.NicProfile) ServiceOfferingVO(com.cloud.service.ServiceOfferingVO) VirtualMachineEntityImpl(com.cloud.engine.cloud.entity.api.VirtualMachineEntityImpl) LinkedHashMap(java.util.LinkedHashMap) HypervisorType(com.cloud.model.enumeration.HypervisorType) InvalidParameterValueException(com.cloud.legacymodel.exceptions.InvalidParameterValueException) DiskOfferingVO(com.cloud.storage.DiskOfferingVO) Network(com.cloud.legacymodel.network.Network) ArrayList(java.util.ArrayList) List(java.util.List) DiskOfferingInfo(com.cloud.offering.DiskOfferingInfo)

Example 29 with HypervisorType

use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.

the class VolumeOrchestrator method recreateVolume.

private Pair<VolumeVO, DataStore> recreateVolume(final VolumeVO vol, final VirtualMachineProfile vm, final DeployDestination dest) throws StorageUnavailableException {
    VolumeVO newVol;
    final boolean recreate = RecreatableSystemVmEnabled.value();
    DataStore destPool = null;
    if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
        destPool = this.dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
        s_logger.debug("existing pool: " + destPool.getId());
    } else {
        final StoragePool pool = dest.getStorageForDisks().get(vol);
        destPool = this.dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
    }
    if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) {
        newVol = vol;
    } else {
        newVol = switchVolume(vol, vm);
        // changed
        if (dest.getStorageForDisks() != null && dest.getStorageForDisks().containsKey(vol)) {
            final StoragePool poolWithOldVol = dest.getStorageForDisks().get(vol);
            dest.getStorageForDisks().put(newVol, poolWithOldVol);
            dest.getStorageForDisks().remove(vol);
        }
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Created new volume " + newVol + " for old volume " + vol);
        }
    }
    VolumeInfo volume = this.volFactory.getVolume(newVol.getId(), destPool);
    final Long templateId = newVol.getTemplateId();
    for (int i = 0; i < 2; i++) {
        // retry one more time in case of template reload is required for Vmware case
        AsyncCallFuture<VolumeService.VolumeApiResult> future = null;
        if (templateId == null) {
            final DiskOffering diskOffering = this._entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
            final HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
            // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
            this.volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
            volume = this.volFactory.getVolume(newVol.getId(), destPool);
            future = this.volService.createVolumeAsync(volume, destPool);
        } else {
            final TemplateInfo templ = this.tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getZone().getId());
            if (templ == null) {
                s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getZone().getId());
                throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getZone().getId());
            }
            final PrimaryDataStore primaryDataStore = (PrimaryDataStore) destPool;
            if (primaryDataStore.isManaged()) {
                final DiskOffering diskOffering = this._entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
                final HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
                // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
                this.volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
                final long hostId = vm.getVirtualMachine().getHostId();
                future = this.volService.createManagedStorageAndVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
            } else {
                future = this.volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
            }
        }
        VolumeService.VolumeApiResult result = null;
        try {
            result = future.get();
            if (result.isFailed()) {
                if (result.getResult().contains("request template reload") && (i == 0)) {
                    s_logger.debug("Retry template re-deploy for vmware");
                    continue;
                } else {
                    s_logger.debug("Unable to create " + newVol + ":" + result.getResult());
                    throw new StorageUnavailableException("Unable to create " + newVol + ":" + result.getResult(), destPool.getId());
                }
            }
            final StoragePoolVO storagePool = this._storagePoolDao.findById(destPool.getId());
            if (storagePool.isManaged()) {
                final long hostId = vm.getVirtualMachine().getHostId();
                final Host host = this._hostDao.findById(hostId);
                this.volService.grantAccess(this.volFactory.getVolume(newVol.getId()), host, destPool);
            }
            newVol = this._volsDao.findById(newVol.getId());
            // break out of template-redeploy retry loop
            break;
        } catch (final InterruptedException e) {
            s_logger.error("Unable to create " + newVol, e);
            throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
        } catch (final ExecutionException e) {
            s_logger.error("Unable to create " + newVol, e);
            throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
        }
    }
    return new Pair<>(newVol, destPool);
}
Also used : StoragePool(com.cloud.legacymodel.storage.StoragePool) DiskOffering(com.cloud.legacymodel.storage.DiskOffering) VolumeInfo(com.cloud.engine.subsystem.api.storage.VolumeInfo) Host(com.cloud.legacymodel.dc.Host) HypervisorType(com.cloud.model.enumeration.HypervisorType) TemplateInfo(com.cloud.engine.subsystem.api.storage.TemplateInfo) VolumeVO(com.cloud.storage.VolumeVO) StorageUnavailableException(com.cloud.legacymodel.exceptions.StorageUnavailableException) CloudRuntimeException(com.cloud.legacymodel.exceptions.CloudRuntimeException) VolumeService(com.cloud.engine.subsystem.api.storage.VolumeService) DataStore(com.cloud.engine.subsystem.api.storage.DataStore) PrimaryDataStore(com.cloud.engine.subsystem.api.storage.PrimaryDataStore) StoragePoolVO(com.cloud.storage.datastore.db.StoragePoolVO) ExecutionException(java.util.concurrent.ExecutionException) PrimaryDataStore(com.cloud.engine.subsystem.api.storage.PrimaryDataStore) Pair(com.cloud.legacymodel.utils.Pair)

Example 30 with HypervisorType

use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.

the class VolumeApiServiceImpl method resizeVolume.

@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true)
public VolumeVO resizeVolume(final ResizeVolumeCmd cmd) throws ResourceAllocationException {
    Long newSize;
    Long newMinIops;
    Long newMaxIops;
    final boolean shrinkOk = cmd.getShrinkOk();
    final VolumeVO volume = this._volsDao.findById(cmd.getEntityId());
    if (volume == null) {
        throw new InvalidParameterValueException("No such volume");
    }
    /* Does the caller have authority to act on this volume? */
    this._accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
    if (volume.getInstanceId() != null) {
        // Check that Vm to which this volume is attached does not have VM Snapshots
        if (this._vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) {
            throw new InvalidParameterValueException("Volume cannot be resized which is attached to VM with VM Snapshots");
        }
    }
    final DiskOfferingVO diskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId());
    DiskOfferingVO newDiskOffering = null;
    if (cmd.getNewDiskOfferingId() != null && volume.getDiskOfferingId() != cmd.getNewDiskOfferingId()) {
        newDiskOffering = this._diskOfferingDao.findById(cmd.getNewDiskOfferingId());
    }
    if (newDiskOffering != null && !newDiskOffering.isCustomized()) {
        throw new InvalidParameterValueException("The disk offering for volume " + volume.getName() + " can only be changed to an offering that supports a custom disk size.");
    }
    if (diskOffering.isCustomized() && newDiskOffering != null && !newDiskOffering.isCustomized()) {
        throw new InvalidParameterValueException("Volume " + volume.getName() + " has a custom size disk offering. Cannot change the disk offering." + " Please change the size instead");
    }
    final HypervisorType hypervisorType = this._volsDao.getHypervisorType(volume.getId());
    if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && hypervisorType != HypervisorType.Any && hypervisorType != HypervisorType.None) {
        throw new InvalidParameterValueException("CloudStack currently supports volume resize only on KVM, or XenServer.");
    }
    if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {
        throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + volume.getUuid() + " is in state " + volume.getState() + ".");
    }
    // if we are to use the existing disk offering
    if (newDiskOffering == null) {
        newSize = cmd.getSize();
        // if the caller is looking to change the size of the volume
        if (newSize != null) {
            if (!diskOffering.isCustomized() && !volume.getVolumeType().equals(VolumeType.ROOT)) {
                throw new InvalidParameterValueException("To change a volume's size without providing a new disk offering, its current disk offering must be " + "customizable or it must be a root volume (if providing a disk offering, make sure it is different from the current disk offering).");
            }
            // convert from bytes to GiB
            newSize = newSize << 30;
        } else {
            // no parameter provided; just use the original size of the volume
            newSize = volume.getSize();
        }
        newMinIops = cmd.getMinIops();
        if (newMinIops != null) {
            if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
                throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter.");
            }
        } else {
            // no parameter provided; just use the original min IOPS of the volume
            newMinIops = volume.getMinIops();
        }
        newMaxIops = cmd.getMaxIops();
        if (newMaxIops != null) {
            if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
                throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter.");
            }
        } else {
            // no parameter provided; just use the original max IOPS of the volume
            newMaxIops = volume.getMaxIops();
        }
        validateIops(newMinIops, newMaxIops);
    } else {
        if (newDiskOffering.getRemoved() != null) {
            throw new InvalidParameterValueException("Requested disk offering has been removed.");
        }
        if (!DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) {
            throw new InvalidParameterValueException("Requested disk offering type is invalid.");
        }
        if (diskOffering.getTags() != null) {
            if (!StringUtils.areTagsEqual(diskOffering.getTags(), newDiskOffering.getTags())) {
                throw new InvalidParameterValueException("The tags on the new and old disk offerings must match.");
            }
        } else if (newDiskOffering.getTags() != null) {
            throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well.");
        }
        if (!areIntegersEqual(diskOffering.getHypervisorSnapshotReserve(), newDiskOffering.getHypervisorSnapshotReserve())) {
            throw new InvalidParameterValueException("The hypervisor snapshot reverse on the new and old disk offerings must be equal.");
        }
        if (newDiskOffering.getDomainId() != null) {
            // not a public offering; check access
            this._configMgr.checkDiskOfferingAccess(CallContext.current().getCallingAccount(), newDiskOffering);
        }
        if (newDiskOffering.isCustomized()) {
            newSize = cmd.getSize();
            if (newSize == null) {
                throw new InvalidParameterValueException("The new disk offering requires that a size be specified.");
            }
            // convert from bytes to GiB
            newSize = newSize << 30;
        } else {
            newSize = newDiskOffering.getDiskSize();
        }
        if (!volume.getSize().equals(newSize) && !volume.getVolumeType().equals(VolumeType.DATADISK)) {
            throw new InvalidParameterValueException("Only data volumes can be resized via a new disk offering.");
        }
        if (newDiskOffering.isCustomizedIops() != null && newDiskOffering.isCustomizedIops()) {
            newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops();
            newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops();
            validateIops(newMinIops, newMaxIops);
        } else {
            newMinIops = newDiskOffering.getMinIops();
            newMaxIops = newDiskOffering.getMaxIops();
        }
    }
    final long currentSize = volume.getSize();
    // if the caller is looking to change the size of the volume
    if (currentSize != newSize) {
        if (!validateVolumeSizeRange(newSize)) {
            throw new InvalidParameterValueException("Requested size out of range");
        }
        /*
             * Let's make certain they (think they) know what they're doing if they
             * want to shrink by forcing them to provide the shrinkok parameter.
             * This will be checked again at the hypervisor level where we can see
             * the actual disk size.
             */
        if (currentSize > newSize && !shrinkOk) {
            throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." + "Need to sign off by supplying the shrinkok parameter with value of true.");
        }
        if (newSize > currentSize) {
            /* Check resource limit for this account on primary storage resource */
            this._resourceLimitMgr.checkResourceLimit(this._accountMgr.getAccount(volume.getAccountId()), ResourceType.primary_storage, volume.isDisplayVolume(), new Long(newSize - currentSize).longValue());
        }
    }
    /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */
    if (volume.getState() == Volume.State.Allocated) {
        s_logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS.");
        volume.setSize(newSize);
        volume.setMinIops(newMinIops);
        volume.setMaxIops(newMaxIops);
        if (newDiskOffering != null) {
            volume.setDiskOfferingId(cmd.getNewDiskOfferingId());
        }
        this._volsDao.update(volume.getId(), volume);
        return volume;
    }
    final UserVmVO userVm = this._userVmDao.findById(volume.getInstanceId());
    if (userVm != null) {
        // serialize VM operation
        final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
        if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
            // avoid re-entrance
            final VmWorkJobVO placeHolder;
            placeHolder = createPlaceHolderWork(userVm.getId());
            try {
                return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
            } finally {
                this._workJobDao.expunge(placeHolder.getId());
            }
        } else {
            final Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
            try {
                outcome.get();
            } catch (final InterruptedException e) {
                throw new RuntimeException("Operation was interrupted", e);
            } catch (final java.util.concurrent.ExecutionException e) {
                throw new RuntimeException("Execution exception", e);
            }
            final Object jobResult = this._jobMgr.unmarshallResultObject(outcome.getJob());
            if (jobResult != null) {
                if (jobResult instanceof ConcurrentOperationException) {
                    throw (ConcurrentOperationException) jobResult;
                } else if (jobResult instanceof ResourceAllocationException) {
                    throw (ResourceAllocationException) jobResult;
                } else if (jobResult instanceof RuntimeException) {
                    throw (RuntimeException) jobResult;
                } else if (jobResult instanceof Throwable) {
                    throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
                } else if (jobResult instanceof Long) {
                    return this._volsDao.findById((Long) jobResult);
                }
            }
            return volume;
        }
    }
    return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
}
Also used : UserVmVO(com.cloud.vm.UserVmVO) AsyncJobExecutionContext(com.cloud.framework.jobs.AsyncJobExecutionContext) ConcurrentOperationException(com.cloud.legacymodel.exceptions.ConcurrentOperationException) VmWorkJobVO(com.cloud.framework.jobs.impl.VmWorkJobVO) HypervisorType(com.cloud.model.enumeration.HypervisorType) ExecutionException(java.util.concurrent.ExecutionException) CloudRuntimeException(com.cloud.legacymodel.exceptions.CloudRuntimeException) InvalidParameterValueException(com.cloud.legacymodel.exceptions.InvalidParameterValueException) Volume(com.cloud.legacymodel.storage.Volume) VmWorkDetachVolume(com.cloud.vm.VmWorkDetachVolume) VmWorkMigrateVolume(com.cloud.vm.VmWorkMigrateVolume) VmWorkResizeVolume(com.cloud.vm.VmWorkResizeVolume) VmWorkAttachVolume(com.cloud.vm.VmWorkAttachVolume) VmWorkExtractVolume(com.cloud.vm.VmWorkExtractVolume) DataObject(com.cloud.engine.subsystem.api.storage.DataObject) ResourceAllocationException(com.cloud.legacymodel.exceptions.ResourceAllocationException) ActionEvent(com.cloud.event.ActionEvent) DB(com.cloud.utils.db.DB)

Aggregations

HypervisorType (com.cloud.model.enumeration.HypervisorType)40 CloudRuntimeException (com.cloud.legacymodel.exceptions.CloudRuntimeException)18 ArrayList (java.util.ArrayList)17 InvalidParameterValueException (com.cloud.legacymodel.exceptions.InvalidParameterValueException)16 Account (com.cloud.legacymodel.user.Account)12 VMTemplateVO (com.cloud.storage.VMTemplateVO)8 AgentUnavailableException (com.cloud.legacymodel.exceptions.AgentUnavailableException)7 ClusterVO (com.cloud.dc.ClusterVO)6 DataStore (com.cloud.engine.subsystem.api.storage.DataStore)6 DB (com.cloud.utils.db.DB)6 UserVmVO (com.cloud.vm.UserVmVO)6 HostVO (com.cloud.host.HostVO)5 NoTransitionException (com.cloud.legacymodel.exceptions.NoTransitionException)5 OperationTimedoutException (com.cloud.legacymodel.exceptions.OperationTimedoutException)5 ResourceAllocationException (com.cloud.legacymodel.exceptions.ResourceAllocationException)5 NetworkVO (com.cloud.network.dao.NetworkVO)5 StoragePoolVO (com.cloud.storage.datastore.db.StoragePoolVO)5 VMSnapshotVO (com.cloud.vm.snapshot.VMSnapshotVO)5 EndPoint (com.cloud.engine.subsystem.api.storage.EndPoint)4 ConcurrentOperationException (com.cloud.legacymodel.exceptions.ConcurrentOperationException)4