Search in sources :

Example 21 with DiskProfile

use of com.cloud.vm.DiskProfile in project cloudstack by apache.

the class ManagementServerImpl method listStoragePoolsForMigrationOfVolume.

@Override
public Pair<List<? extends StoragePool>, List<? extends StoragePool>> listStoragePoolsForMigrationOfVolume(final Long volumeId) {
    final Account caller = getCaller();
    if (!_accountMgr.isRootAdmin(caller.getId())) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Caller is not a root admin, permission denied to migrate the volume");
        }
        throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume");
    }
    final VolumeVO volume = _volumeDao.findById(volumeId);
    if (volume == null) {
        final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with" + " specified id.");
        ex.addProxyObject(volumeId.toString(), "volumeId");
        throw ex;
    }
    // Volume must be attached to an instance for live migration.
    final List<StoragePool> allPools = new ArrayList<StoragePool>();
    final List<StoragePool> suitablePools = new ArrayList<StoragePool>();
    // Volume must be in Ready state to be migrated.
    if (!Volume.State.Ready.equals(volume.getState())) {
        s_logger.info("Volume " + volume + " must be in ready state for migration.");
        return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
    }
    if (!_volumeMgr.volumeOnSharedStoragePool(volume)) {
        s_logger.info("Volume " + volume + " is on local storage. It cannot be migrated to another pool.");
        return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
    }
    final Long instanceId = volume.getInstanceId();
    VMInstanceVO vm = null;
    if (instanceId != null) {
        vm = _vmInstanceDao.findById(instanceId);
    }
    if (vm == null) {
        s_logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated.");
    } else if (vm.getState() != State.Running) {
        s_logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
    } else {
        s_logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated.");
        boolean storageMotionSupported = false;
        // Check if the underlying hypervisor supports storage motion.
        final Long hostId = vm.getHostId();
        if (hostId != null) {
            final HostVO host = _hostDao.findById(hostId);
            HypervisorCapabilitiesVO capabilities = null;
            if (host != null) {
                capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion());
            } else {
                s_logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved.");
            }
            if (capabilities != null) {
                storageMotionSupported = capabilities.isStorageMotionSupported();
            } else {
                s_logger.error("Capabilities for host " + host + " couldn't be retrieved.");
            }
        }
        if (!storageMotionSupported) {
            s_logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion.");
            return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
        }
    }
    // Source pool of the volume.
    final StoragePoolVO srcVolumePool = _poolDao.findById(volume.getPoolId());
    // Get all the pools available. Only shared pools are considered because only a volume on a shared pools
    // can be live migrated while the virtual machine stays on the same host.
    List<StoragePoolVO> storagePools = null;
    if (srcVolumePool.getClusterId() == null) {
        storagePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null);
    } else {
        storagePools = _poolDao.findPoolsByTags(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null);
    }
    storagePools.remove(srcVolumePool);
    for (final StoragePoolVO pool : storagePools) {
        if (pool.isShared()) {
            allPools.add((StoragePool) dataStoreMgr.getPrimaryDataStore(pool.getId()));
        }
    }
    // Get all the suitable pools.
    // Exclude the current pool from the list of pools to which the volume can be migrated.
    final ExcludeList avoid = new ExcludeList();
    avoid.addPool(srcVolumePool.getId());
    // Volume stays in the same cluster after migration.
    final DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null);
    final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
    final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
    final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
    // Call the storage pool allocator to find the list of storage pools.
    for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
        final List<StoragePool> pools = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
        if (pools != null && !pools.isEmpty()) {
            suitablePools.addAll(pools);
            break;
        }
    }
    return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
}
Also used : HypervisorCapabilitiesVO(com.cloud.hypervisor.HypervisorCapabilitiesVO) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) Account(com.cloud.user.Account) StoragePool(com.cloud.storage.StoragePool) DataCenterDeployment(com.cloud.deploy.DataCenterDeployment) VirtualMachineProfileImpl(com.cloud.vm.VirtualMachineProfileImpl) ArrayList(java.util.ArrayList) VMInstanceVO(com.cloud.vm.VMInstanceVO) DiskProfile(com.cloud.vm.DiskProfile) HostVO(com.cloud.host.HostVO) VolumeVO(com.cloud.storage.VolumeVO) InvalidParameterValueException(com.cloud.exception.InvalidParameterValueException) DiskOfferingVO(com.cloud.storage.DiskOfferingVO) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) PermissionDeniedException(com.cloud.exception.PermissionDeniedException) ArrayList(java.util.ArrayList) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) List(java.util.List) VirtualMachineProfile(com.cloud.vm.VirtualMachineProfile) StoragePoolAllocator(org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator) Pair(com.cloud.utils.Pair) SSHKeyPair(com.cloud.user.SSHKeyPair)

Example 22 with DiskProfile

use of com.cloud.vm.DiskProfile in project cloudstack by apache.

the class ManagementServerImpl method listHostsForMigrationOfVM.

@Override
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final Long vmId, final Long startIndex, final Long pageSize, final String keyword) {
    final Account caller = getCaller();
    if (!_accountMgr.isRootAdmin(caller.getId())) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Caller is not a root admin, permission denied to migrate the VM");
        }
        throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!");
    }
    final VMInstanceVO vm = _vmInstanceDao.findById(vmId);
    if (vm == null) {
        final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with given id");
        throw ex;
    }
    if (vm.getState() != State.Running) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("VM is not running, cannot migrate the vm" + vm);
        }
        final InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id");
        ex.addProxyObject(vm.getUuid(), "vmId");
        throw ex;
    }
    if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
        s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported");
        // Return empty list.
        return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>(new Pair<List<? extends Host>, Integer>(new ArrayList<HostVO>(), new Integer(0)), new ArrayList<Host>(), new HashMap<Host, Boolean>());
    }
    if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) && !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv) && !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator) && !vm.getHypervisorType().equals(HypervisorType.Ovm3)) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM.");
        }
        throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " + "XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
    }
    if (vm.getType().equals(VirtualMachine.Type.User) && vm.getHypervisorType().equals(HypervisorType.LXC)) {
        throw new InvalidParameterValueException("Unsupported Hypervisor Type for User VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
    }
    final long srcHostId = vm.getHostId();
    final Host srcHost = _hostDao.findById(srcHostId);
    if (srcHost == null) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Unable to find the host with id: " + srcHostId + " of this VM:" + vm);
        }
        final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the host (with specified id) of VM with specified id");
        ex.addProxyObject(String.valueOf(srcHostId), "hostId");
        ex.addProxyObject(vm.getUuid(), "vmId");
        throw ex;
    }
    // Check if the vm can be migrated with storage.
    boolean canMigrateWithStorage = false;
    if (vm.getType() == VirtualMachine.Type.User) {
        final HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
        if (capabilities != null) {
            canMigrateWithStorage = capabilities.isStorageMotionSupported();
        }
    }
    // Check if the vm is using any disks on local storage.
    final VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null);
    final List<VolumeVO> volumes = _volumeDao.findCreatedByInstance(vmProfile.getId());
    boolean usesLocal = false;
    for (final VolumeVO volume : volumes) {
        final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
        final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, vmProfile.getHypervisorType());
        if (diskProfile.useLocalStorage()) {
            usesLocal = true;
            break;
        }
    }
    if (!canMigrateWithStorage && usesLocal) {
        throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate");
    }
    final Type hostType = srcHost.getType();
    Pair<List<HostVO>, Integer> allHostsPair = null;
    List<HostVO> allHosts = null;
    final Map<Host, Boolean> requiresStorageMotion = new HashMap<Host, Boolean>();
    DataCenterDeployment plan = null;
    if (canMigrateWithStorage) {
        allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, null, null, srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
        allHosts = allHostsPair.first();
        allHosts.remove(srcHost);
        for (final VolumeVO volume : volumes) {
            final StoragePool storagePool = _poolDao.findById(volume.getPoolId());
            final Long volClusterId = storagePool.getClusterId();
            for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext(); ) {
                final Host host = iterator.next();
                if (volClusterId != null) {
                    if (!host.getClusterId().equals(volClusterId) || usesLocal) {
                        if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
                            requiresStorageMotion.put(host, true);
                        } else {
                            iterator.remove();
                        }
                    }
                } else {
                    if (storagePool.isManaged()) {
                        if (srcHost.getClusterId() != host.getClusterId()) {
                            requiresStorageMotion.put(host, true);
                        }
                    }
                }
            }
        }
        plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
    } else {
        final Long cluster = srcHost.getClusterId();
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm);
        }
        allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, keyword, null, null, null, null);
        // Filter out the current host.
        allHosts = allHostsPair.first();
        allHosts.remove(srcHost);
        plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
    }
    final Pair<List<? extends Host>, Integer> otherHosts = new Pair<List<? extends Host>, Integer>(allHosts, new Integer(allHosts.size()));
    List<Host> suitableHosts = new ArrayList<Host>();
    final ExcludeList excludes = new ExcludeList();
    excludes.addHost(srcHostId);
    // call affinitygroup chain
    final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
    if (vmGroupCount > 0) {
        for (final AffinityGroupProcessor processor : _affinityProcessors) {
            processor.process(vmProfile, plan, excludes);
        }
    }
    for (final HostAllocator allocator : hostAllocators) {
        if (canMigrateWithStorage) {
            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
        } else {
            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
        }
        if (suitableHosts != null && !suitableHosts.isEmpty()) {
            break;
        }
    }
    if (s_logger.isDebugEnabled()) {
        if (suitableHosts.isEmpty()) {
            s_logger.debug("No suitable hosts found");
        } else {
            s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts);
        }
    }
    return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>(otherHosts, suitableHosts, requiresStorageMotion);
}
Also used : HypervisorCapabilitiesVO(com.cloud.hypervisor.HypervisorCapabilitiesVO) Account(com.cloud.user.Account) StoragePool(com.cloud.storage.StoragePool) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HostAllocator(com.cloud.agent.manager.allocator.HostAllocator) VolumeVO(com.cloud.storage.VolumeVO) InvalidParameterValueException(com.cloud.exception.InvalidParameterValueException) DiskOfferingVO(com.cloud.storage.DiskOfferingVO) ArrayList(java.util.ArrayList) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) List(java.util.List) AffinityGroupProcessor(org.apache.cloudstack.affinity.AffinityGroupProcessor) Pair(com.cloud.utils.Pair) SSHKeyPair(com.cloud.user.SSHKeyPair) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) DataCenterDeployment(com.cloud.deploy.DataCenterDeployment) Ternary(com.cloud.utils.Ternary) VirtualMachineProfileImpl(com.cloud.vm.VirtualMachineProfileImpl) VMInstanceVO(com.cloud.vm.VMInstanceVO) Host(com.cloud.host.Host) DiskProfile(com.cloud.vm.DiskProfile) HostVO(com.cloud.host.HostVO) ResourceObjectType(com.cloud.server.ResourceTag.ResourceObjectType) VlanType(com.cloud.dc.Vlan.VlanType) JoinType(com.cloud.utils.db.JoinBuilder.JoinType) HypervisorType(com.cloud.hypervisor.Hypervisor.HypervisorType) Type(com.cloud.host.Host.Type) PermissionDeniedException(com.cloud.exception.PermissionDeniedException) VirtualMachineProfile(com.cloud.vm.VirtualMachineProfile)

Example 23 with DiskProfile

use of com.cloud.vm.DiskProfile in project cloudstack by apache.

the class VolumeOrchestrator method createVolumeFromSnapshot.

@DB
@Override
public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, UserVm vm) throws StorageUnavailableException {
    Account account = _entityMgr.findById(Account.class, volume.getAccountId());
    final HashSet<StoragePool> poolsToAvoid = new HashSet<StoragePool>();
    StoragePool pool = null;
    Set<Long> podsToAvoid = new HashSet<Long>();
    Pair<Pod, Long> pod = null;
    DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
    DataCenter dc = _entityMgr.findById(DataCenter.class, volume.getDataCenterId());
    DiskProfile dskCh = new DiskProfile(volume, diskOffering, snapshot.getHypervisorType());
    String msg = "There are no available storage pools to store the volume in";
    if (vm != null) {
        Pod podofVM = _entityMgr.findById(Pod.class, vm.getPodIdToDeployIn());
        if (podofVM != null) {
            pod = new Pair<Pod, Long>(podofVM, podofVM.getId());
        }
    }
    if (vm != null && pod != null) {
        //if VM is running use the hostId to find the clusterID. If it is stopped, refer the cluster where the ROOT volume of the VM exists.
        Long hostId = null;
        Long clusterId = null;
        if (vm.getState() == State.Running) {
            hostId = vm.getHostId();
            if (hostId != null) {
                Host vmHost = _entityMgr.findById(Host.class, hostId);
                clusterId = vmHost.getClusterId();
            }
        } else {
            List<VolumeVO> rootVolumesOfVm = _volsDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT);
            if (rootVolumesOfVm.size() != 1) {
                throw new CloudRuntimeException("The VM " + vm.getHostName() + " has more than one ROOT volume and is in an invalid state. Please contact Cloud Support.");
            } else {
                VolumeVO rootVolumeOfVm = rootVolumesOfVm.get(0);
                StoragePoolVO rootDiskPool = _storagePoolDao.findById(rootVolumeOfVm.getPoolId());
                clusterId = (rootDiskPool == null ? null : rootDiskPool.getClusterId());
            }
        }
        // Determine what storage pool to store the volume in
        while ((pool = findStoragePool(dskCh, dc, pod.first(), clusterId, hostId, vm, poolsToAvoid)) != null) {
            break;
        }
        if (pool == null) {
            //pool could not be found in the VM's pod/cluster.
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid());
            }
            StringBuilder addDetails = new StringBuilder(msg);
            addDetails.append(", Could not find any storage pool to create Volume in the pod/cluster of the VM ");
            addDetails.append(vm.getUuid());
            msg = addDetails.toString();
        }
    } else {
        // Determine what pod to store the volume in
        while ((pod = findPod(null, null, dc, account.getId(), podsToAvoid)) != null) {
            podsToAvoid.add(pod.first().getId());
            // Determine what storage pool to store the volume in
            while ((pool = findStoragePool(dskCh, dc, pod.first(), null, null, null, poolsToAvoid)) != null) {
                break;
            }
            if (pool != null) {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Found a suitable pool for create volume: " + pool.getId());
                }
                break;
            }
        }
    }
    if (pool == null) {
        s_logger.info(msg);
        throw new StorageUnavailableException(msg, -1);
    }
    VolumeInfo vol = volFactory.getVolume(volume.getId());
    DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
    DataStoreRole dataStoreRole = getDataStoreRole(snapshot);
    SnapshotInfo snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), dataStoreRole);
    if (snapInfo == null && dataStoreRole == DataStoreRole.Image) {
        // snapshot is not backed up to secondary, let's do that now.
        snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), DataStoreRole.Primary);
        if (snapInfo == null) {
            throw new CloudRuntimeException("Cannot find snapshot " + snapshot.getId());
        }
        // We need to copy the snapshot onto secondary.
        SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.BACKUP);
        snapshotStrategy.backupSnapshot(snapInfo);
        // Attempt to grab it again.
        snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), dataStoreRole);
        if (snapInfo == null) {
            throw new CloudRuntimeException("Cannot find snapshot " + snapshot.getId() + " on secondary and could not create backup");
        }
    }
    // don't try to perform a sync if the DataStoreRole of the snapshot is equal to DataStoreRole.Primary
    if (!DataStoreRole.Primary.equals(dataStoreRole)) {
        try {
            // sync snapshot to region store if necessary
            DataStore snapStore = snapInfo.getDataStore();
            long snapVolId = snapInfo.getVolumeId();
            _snapshotSrv.syncVolumeSnapshotsToRegionStore(snapVolId, snapStore);
        } catch (Exception ex) {
            // log but ignore the sync error to avoid any potential S3 down issue, it should be sync next time
            s_logger.warn(ex.getMessage(), ex);
        }
    }
    // create volume on primary from snapshot
    AsyncCallFuture<VolumeApiResult> future = volService.createVolumeFromSnapshot(vol, store, snapInfo);
    try {
        VolumeApiResult result = future.get();
        if (result.isFailed()) {
            s_logger.debug("Failed to create volume from snapshot:" + result.getResult());
            throw new CloudRuntimeException("Failed to create volume from snapshot:" + result.getResult());
        }
        return result.getVolume();
    } catch (InterruptedException e) {
        s_logger.debug("Failed to create volume from snapshot", e);
        throw new CloudRuntimeException("Failed to create volume from snapshot", e);
    } catch (ExecutionException e) {
        s_logger.debug("Failed to create volume from snapshot", e);
        throw new CloudRuntimeException("Failed to create volume from snapshot", e);
    }
}
Also used : Account(com.cloud.user.Account) StoragePool(com.cloud.storage.StoragePool) DiskOffering(com.cloud.offering.DiskOffering) VolumeInfo(org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo) VolumeApiResult(org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult) DataStoreRole(com.cloud.storage.DataStoreRole) VolumeVO(com.cloud.storage.VolumeVO) StorageUnavailableException(com.cloud.exception.StorageUnavailableException) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) PrimaryDataStore(org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore) DataStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) ExecutionException(java.util.concurrent.ExecutionException) SnapshotStrategy(org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy) HashSet(java.util.HashSet) Pod(com.cloud.dc.Pod) Host(com.cloud.host.Host) DiskProfile(com.cloud.vm.DiskProfile) NoTransitionException(com.cloud.utils.fsm.NoTransitionException) InsufficientStorageCapacityException(com.cloud.exception.InsufficientStorageCapacityException) StorageUnavailableException(com.cloud.exception.StorageUnavailableException) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) ExecutionException(java.util.concurrent.ExecutionException) InvalidParameterValueException(com.cloud.exception.InvalidParameterValueException) ConcurrentOperationException(com.cloud.exception.ConcurrentOperationException) ConfigurationException(javax.naming.ConfigurationException) SnapshotInfo(org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo) DataCenter(com.cloud.dc.DataCenter) DB(com.cloud.utils.db.DB)

Example 24 with DiskProfile

use of com.cloud.vm.DiskProfile in project cloudstack by apache.

the class VolumeOrchestrator method moveVolume.

@Override
public VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) throws ConcurrentOperationException, StorageUnavailableException {
    // Find a destination storage pool with the specified criteria
    DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
    DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null);
    dskCh.setHyperType(dataDiskHyperType);
    storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering);
    DataCenter destPoolDataCenter = _entityMgr.findById(DataCenter.class, destPoolDcId);
    Pod destPoolPod = _entityMgr.findById(Pod.class, destPoolPodId);
    StoragePool destPool = findStoragePool(dskCh, destPoolDataCenter, destPoolPod, destPoolClusterId, null, null, new HashSet<StoragePool>());
    if (destPool == null) {
        throw new CloudRuntimeException("Failed to find a storage pool with enough capacity to move the volume to.");
    }
    Volume newVol = migrateVolume(volume, destPool);
    return volFactory.getVolume(newVol.getId());
}
Also used : DiskOffering(com.cloud.offering.DiskOffering) DataCenter(com.cloud.dc.DataCenter) StoragePool(com.cloud.storage.StoragePool) Pod(com.cloud.dc.Pod) VmWorkMigrateVolume(com.cloud.vm.VmWorkMigrateVolume) VmWorkAttachVolume(com.cloud.vm.VmWorkAttachVolume) Volume(com.cloud.storage.Volume) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) DiskProfile(com.cloud.vm.DiskProfile)

Example 25 with DiskProfile

use of com.cloud.vm.DiskProfile in project cloudstack by apache.

the class StorageAllocatorTest method testPoolStateIsNotUp.

@Test
public void testPoolStateIsNotUp() {
    try {
        createDb();
        StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
        pool.setScope(ScopeType.ZONE);
        pool.setStatus(StoragePoolStatus.Maintenance);
        storagePoolDao.update(pool.getId(), pool);
        DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
        VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
        Mockito.when(storageMgr.storagePoolHasEnoughSpace(Matchers.anyListOf(Volume.class), Matchers.any(StoragePool.class))).thenReturn(true);
        DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
        int foundAcct = 0;
        for (StoragePoolAllocator allocator : allocators) {
            List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
            if (!pools.isEmpty()) {
                Assert.assertEquals(pools.get(0).getId(), storage.getId());
                foundAcct++;
            }
        }
        if (foundAcct == 1) {
            Assert.fail();
        }
    } catch (Exception e) {
        cleanDb();
        Assert.fail();
    }
}
Also used : ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) StoragePool(com.cloud.storage.StoragePool) DataCenterDeployment(com.cloud.deploy.DataCenterDeployment) Volume(com.cloud.storage.Volume) StoragePoolVO(org.apache.cloudstack.storage.datastore.db.StoragePoolVO) VirtualMachineProfile(com.cloud.vm.VirtualMachineProfile) DeploymentPlan(com.cloud.deploy.DeploymentPlan) DiskProfile(com.cloud.vm.DiskProfile) StoragePoolAllocator(org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator) Test(org.junit.Test)

Aggregations

DiskProfile (com.cloud.vm.DiskProfile)27 StoragePool (com.cloud.storage.StoragePool)16 ExcludeList (com.cloud.deploy.DeploymentPlanner.ExcludeList)12 Test (org.junit.Test)12 DataCenterDeployment (com.cloud.deploy.DataCenterDeployment)11 StoragePoolAllocator (org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator)11 StorageFilerTO (com.cloud.agent.api.to.StorageFilerTO)10 Volume (com.cloud.storage.Volume)10 VirtualMachineProfile (com.cloud.vm.VirtualMachineProfile)10 CloudRuntimeException (com.cloud.utils.exception.CloudRuntimeException)9 CreateAnswer (com.cloud.agent.api.storage.CreateAnswer)8 DeploymentPlan (com.cloud.deploy.DeploymentPlan)8 StoragePoolVO (org.apache.cloudstack.storage.datastore.db.StoragePoolVO)8 VolumeTO (com.cloud.agent.api.to.VolumeTO)7 DiskOfferingVO (com.cloud.storage.DiskOfferingVO)7 Answer (com.cloud.agent.api.Answer)6 VolumeVO (com.cloud.storage.VolumeVO)4 Account (com.cloud.user.Account)4 ArrayList (java.util.ArrayList)4 HashSet (java.util.HashSet)4