Search in sources :

Example 66 with Volume

use of com.cloud.storage.Volume in project cloudstack by apache.

the class DeploymentPlanningManagerImpl method findSuitablePoolsForVolumes.

protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
    List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
    Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
    List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
    // There should be atleast the ROOT volume of the VM in usable state
    if (volumesTobeCreated.isEmpty()) {
        throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM");
    }
    // don't allow to start vm that doesn't have a root volume
    if (_volsDao.findByInstanceAndType(vmProfile.getId(), Volume.Type.ROOT).isEmpty()) {
        throw new CloudRuntimeException("Unable to prepare volumes for vm as ROOT volume is missing");
    }
    // for each volume find list of suitable storage pools by calling the
    // allocators
    Set<Long> originalAvoidPoolSet = avoid.getPoolsToAvoid();
    if (originalAvoidPoolSet == null) {
        originalAvoidPoolSet = new HashSet<Long>();
    }
    Set<Long> poolsToAvoidOutput = new HashSet<Long>(originalAvoidPoolSet);
    for (VolumeVO toBeCreated : volumesTobeCreated) {
        s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")");
        // be reused.
        if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) {
            s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId());
            List<StoragePool> suitablePools = new ArrayList<StoragePool>();
            StoragePool pool = null;
            if (toBeCreated.getPoolId() != null) {
                pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
            } else {
                pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
            }
            if (!pool.isInMaintenance()) {
                if (!avoid.shouldAvoid(pool)) {
                    long exstPoolDcId = pool.getDataCenterId();
                    long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
                    long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
                    boolean canReusePool = false;
                    if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) {
                        canReusePool = true;
                    } else if (plan.getDataCenterId() == exstPoolDcId) {
                        DataStore dataStore = dataStoreMgr.getPrimaryDataStore(pool.getId());
                        if (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE) {
                            canReusePool = true;
                        }
                    } else {
                        s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
                        canReusePool = false;
                    }
                    if (canReusePool) {
                        s_logger.debug("Planner need not allocate a pool for this volume since its READY");
                        suitablePools.add(pool);
                        suitableVolumeStoragePools.put(toBeCreated, suitablePools);
                        if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
                            readyAndReusedVolumes.add(toBeCreated);
                        }
                        continue;
                    }
                } else {
                    s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
                }
            } else {
                s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
            }
        }
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("We need to allocate new storagepool for this volume");
        }
        if (!isRootAdmin(vmProfile)) {
            if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) {
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
                    s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
                }
                // Cannot find suitable storage pools under this cluster for
                // this volume since allocation_state is disabled.
                // - remove any suitable pools found for other volumes.
                // All volumes should get suitable pools under this cluster;
                // else we cant use this cluster.
                suitableVolumeStoragePools.clear();
                break;
            }
        }
        s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
        DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
        if (vmProfile.getTemplate().getFormat() == Storage.ImageFormat.ISO && vmProfile.getServiceOffering().getTagsArray().length != 0) {
            diskOffering.setTagsArray(Arrays.asList(vmProfile.getServiceOffering().getTagsArray()));
        }
        DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
        boolean useLocalStorage = false;
        if (vmProfile.getType() != VirtualMachine.Type.User) {
            DataCenterVO zone = _dcDao.findById(plan.getDataCenterId());
            assert (zone != null) : "Invalid zone in deployment plan";
            Boolean useLocalStorageForSystemVM = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(zone.getId());
            if (useLocalStorageForSystemVM != null) {
                useLocalStorage = useLocalStorageForSystemVM.booleanValue();
                s_logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId());
            }
        } else {
            useLocalStorage = diskOffering.getUseLocalStorage();
            // offering when it is a ROOT disk
            if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
                if (toBeCreated.getVolumeType() == Volume.Type.ROOT) {
                    useLocalStorage = true;
                }
            }
        }
        diskProfile.setUseLocalStorage(useLocalStorage);
        boolean foundPotentialPools = false;
        for (StoragePoolAllocator allocator : _storagePoolAllocators) {
            final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
            if (suitablePools != null && !suitablePools.isEmpty()) {
                suitableVolumeStoragePools.put(toBeCreated, suitablePools);
                foundPotentialPools = true;
                break;
            }
        }
        if (avoid.getPoolsToAvoid() != null) {
            poolsToAvoidOutput.addAll(avoid.getPoolsToAvoid());
            avoid.getPoolsToAvoid().retainAll(originalAvoidPoolSet);
        }
        if (!foundPotentialPools) {
            s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId());
            // No suitable storage pools found under this cluster for this
            // volume. - remove any suitable pools found for other volumes.
            // All volumes should get suitable pools under this cluster;
            // else we cant use this cluster.
            suitableVolumeStoragePools.clear();
            break;
        }
    }
    HashSet<Long> toRemove = new HashSet<Long>();
    for (List<StoragePool> lsp : suitableVolumeStoragePools.values()) {
        for (StoragePool sp : lsp) {
            toRemove.add(sp.getId());
        }
    }
    poolsToAvoidOutput.removeAll(toRemove);
    if (avoid.getPoolsToAvoid() != null) {
        avoid.getPoolsToAvoid().addAll(poolsToAvoidOutput);
    }
    if (suitableVolumeStoragePools.isEmpty()) {
        s_logger.debug("No suitable pools found");
    }
    return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
}
Also used : DataCenterVO(com.cloud.dc.DataCenterVO) StoragePool(com.cloud.storage.StoragePool) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DiskProfile(com.cloud.vm.DiskProfile) VolumeVO(com.cloud.storage.VolumeVO) Volume(com.cloud.storage.Volume) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) DiskOfferingVO(com.cloud.storage.DiskOfferingVO) DataStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) ArrayList(java.util.ArrayList) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) List(java.util.List) StoragePoolAllocator(org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator) HashSet(java.util.HashSet) Pair(com.cloud.utils.Pair)

Example 67 with Volume

use of com.cloud.storage.Volume in project cloudstack by apache.

the class DeploymentPlanningManagerImpl method planDeployment.

@Override
public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
    ServiceOffering offering = vmProfile.getServiceOffering();
    int cpu_requested = offering.getCpu() * offering.getSpeed();
    long ram_requested = offering.getRamSize() * 1024L * 1024L;
    VirtualMachine vm = vmProfile.getVirtualMachine();
    DataCenter dc = _dcDao.findById(vm.getDataCenterId());
    if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) {
        checkForNonDedicatedResources(vmProfile, dc, avoids);
    }
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
        s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
        s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
    }
    String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
    if (plan.getHostId() != null && haVmTag == null) {
        Long hostIdSpecified = plan.getHostId();
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified);
        }
        HostVO host = _hostDao.findById(hostIdSpecified);
        if (host == null) {
            s_logger.debug("The specified host cannot be found");
        } else if (avoids.shouldAvoid(host)) {
            s_logger.debug("The specified host is in avoid set");
        } else {
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
            }
            Pod pod = _podDao.findById(host.getPodId());
            Cluster cluster = _clusterDao.findById(host.getClusterId());
            if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
                s_logger.debug("Returning Deployment Destination: " + dest);
                return dest;
            }
            // search for storage under the zone, pod, cluster of the host.
            DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
            Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
            Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
            List<Volume> readyAndReusedVolumes = result.second();
            // choose the potential pool for this VM for this host
            if (!suitableVolumeStoragePools.isEmpty()) {
                List<Host> suitableHosts = new ArrayList<Host>();
                suitableHosts.add(host);
                Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                if (potentialResources != null) {
                    pod = _podDao.findById(host.getPodId());
                    cluster = _clusterDao.findById(host.getClusterId());
                    Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                    // we don't have to prepare this volume.
                    for (Volume vol : readyAndReusedVolumes) {
                        storageVolMap.remove(vol);
                    }
                    DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
                    s_logger.debug("Returning Deployment Destination: " + dest);
                    return dest;
                }
            }
        }
        s_logger.debug("Cannot deploy to specified host, returning.");
        return null;
    }
    // call affinitygroup chain
    long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
    if (vmGroupCount > 0) {
        for (AffinityGroupProcessor processor : _affinityProcessors) {
            processor.process(vmProfile, plan, avoids);
        }
    }
    if (vm.getType() == VirtualMachine.Type.User) {
        checkForNonDedicatedResources(vmProfile, dc, avoids);
    }
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
    }
    // check if datacenter is in avoid set
    if (avoids.shouldAvoid(dc)) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.");
        }
        return null;
    }
    if (planner == null) {
        String plannerName = offering.getDeploymentPlanner();
        if (plannerName == null) {
            if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                plannerName = "BareMetalPlanner";
            } else {
                plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
            }
        }
        planner = getDeploymentPlannerByName(plannerName);
    }
    if (vm.getLastHostId() != null && haVmTag == null) {
        s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());
        HostVO host = _hostDao.findById(vm.getLastHostId());
        ServiceOfferingDetailsVO offeringDetails = null;
        if (host == null) {
            s_logger.debug("The last host of this VM cannot be found");
        } else if (avoids.shouldAvoid(host)) {
            s_logger.debug("The last host of this VM is in avoid set");
        } else if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) {
            s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + plan.getClusterId());
        } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
            s_logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
        } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
            ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
            if (!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) {
                s_logger.debug("The last host of this VM does not have required GPU devices available");
            }
        } else {
            if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
                boolean hostTagsMatch = true;
                if (offering.getHostTag() != null) {
                    _hostDao.loadHostTags(host);
                    if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
                        hostTagsMatch = false;
                    }
                }
                if (hostTagsMatch) {
                    long cluster_id = host.getClusterId();
                    ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
                    ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
                    boolean hostHasCpuCapability, hostHasCapacity = false;
                    hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
                    if (hostHasCpuCapability) {
                        // first check from reserved capacity
                        hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true);
                        // if not reserved, check the free capacity
                        if (!hostHasCapacity)
                            hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true);
                    }
                    if (hostHasCapacity && hostHasCpuCapability) {
                        s_logger.debug("The last host of this VM is UP and has enough capacity");
                        s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
                        Pod pod = _podDao.findById(host.getPodId());
                        Cluster cluster = _clusterDao.findById(host.getClusterId());
                        if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                            DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
                            s_logger.debug("Returning Deployment Destination: " + dest);
                            return dest;
                        }
                        // search for storage under the zone, pod, cluster
                        // of
                        // the last host.
                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                        List<Volume> readyAndReusedVolumes = result.second();
                        // host
                        if (!suitableVolumeStoragePools.isEmpty()) {
                            List<Host> suitableHosts = new ArrayList<Host>();
                            suitableHosts.add(host);
                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                            if (potentialResources != null) {
                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                                // this volume.
                                for (Volume vol : readyAndReusedVolumes) {
                                    storageVolMap.remove(vol);
                                }
                                DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
                                s_logger.debug("Returning Deployment Destination: " + dest);
                                return dest;
                            }
                        }
                    } else {
                        s_logger.debug("The last host of this VM does not have enough capacity");
                    }
                } else {
                    s_logger.debug("Service Offering host tag does not match the last host of this VM");
                }
            } else {
                s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + host.getResourceState());
            }
        }
        s_logger.debug("Cannot choose the last host to deploy this VM ");
    }
    DeployDestination dest = null;
    List<Long> clusterList = null;
    if (planner != null && planner.canHandle(vmProfile, plan, avoids)) {
        while (true) {
            if (planner instanceof DeploymentClusterPlanner) {
                ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
                clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
                if (clusterList != null && !clusterList.isEmpty()) {
                    // planner refactoring. call allocators to list hosts
                    ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
                    resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
                    dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
                    if (dest != null) {
                        return dest;
                    }
                    // reset the avoid input to the planners
                    resetAvoidSet(avoids, plannerAvoidOutput);
                } else {
                    return null;
                }
            } else {
                dest = planner.plan(vmProfile, plan, avoids);
                if (dest != null) {
                    long hostId = dest.getHost().getId();
                    avoids.addHost(dest.getHost().getId());
                    if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
                        // found destination
                        return dest;
                    } else {
                        // deployment picked it up for dedicated access
                        continue;
                    }
                } else {
                    return null;
                }
            }
        }
    }
    return dest;
}
Also used : StoragePool(com.cloud.storage.StoragePool) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ServiceOfferingDetailsVO(com.cloud.service.ServiceOfferingDetailsVO) ArrayList(java.util.ArrayList) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) List(java.util.List) AffinityGroupProcessor(org.apache.cloudstack.affinity.AffinityGroupProcessor) Pair(com.cloud.utils.Pair) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) Pod(com.cloud.dc.Pod) ServiceOffering(com.cloud.offering.ServiceOffering) Cluster(com.cloud.org.Cluster) Host(com.cloud.host.Host) StoragePoolHostVO(com.cloud.storage.StoragePoolHostVO) HostVO(com.cloud.host.HostVO) DataCenter(com.cloud.dc.DataCenter) Volume(com.cloud.storage.Volume) Map(java.util.Map) HashMap(java.util.HashMap) ClusterDetailsVO(com.cloud.dc.ClusterDetailsVO) VirtualMachine(com.cloud.vm.VirtualMachine)

Example 68 with Volume

use of com.cloud.storage.Volume in project cloudstack by apache.

the class DeploymentPlanningManagerImpl method findPotentialDeploymentResources.

protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools, ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, List<Volume> readyAndReusedVolumes) {
    s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
    boolean hostCanAccessPool = false;
    boolean haveEnoughSpace = false;
    if (readyAndReusedVolumes == null) {
        readyAndReusedVolumes = new ArrayList<Volume>();
    }
    Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
    TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {

        @Override
        public int compare(Volume v1, Volume v2) {
            if (v1.getSize() < v2.getSize())
                return 1;
            else
                return -1;
        }
    });
    volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
    boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
    for (Host potentialHost : suitableHosts) {
        Map<StoragePool, List<Volume>> volumeAllocationMap = new HashMap<StoragePool, List<Volume>>();
        for (Volume vol : volumesOrderBySizeDesc) {
            haveEnoughSpace = false;
            s_logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType());
            List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
            hostCanAccessPool = false;
            for (StoragePool potentialSPool : volumePoolList) {
                if (hostCanAccessSPool(potentialHost, potentialSPool)) {
                    hostCanAccessPool = true;
                    if (multipleVolume && !readyAndReusedVolumes.contains(vol)) {
                        List<Volume> requestVolumes = null;
                        if (volumeAllocationMap.containsKey(potentialSPool))
                            requestVolumes = volumeAllocationMap.get(potentialSPool);
                        else
                            requestVolumes = new ArrayList<Volume>();
                        requestVolumes.add(vol);
                        if (!_storageMgr.storagePoolHasEnoughIops(requestVolumes, potentialSPool) || !_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool, potentialHost.getClusterId()))
                            continue;
                        volumeAllocationMap.put(potentialSPool, requestVolumes);
                    }
                    storage.put(vol, potentialSPool);
                    haveEnoughSpace = true;
                    break;
                }
            }
            if (!hostCanAccessPool) {
                break;
            }
            if (!haveEnoughSpace) {
                s_logger.warn("insufficient capacity to allocate all volumes");
                break;
            }
        }
        if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) {
            s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM");
            return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
        } else {
            avoid.addHost(potentialHost.getId());
        }
    }
    s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM");
    return null;
}
Also used : StoragePool(com.cloud.storage.StoragePool) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Host(com.cloud.host.Host) Volume(com.cloud.storage.Volume) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) List(java.util.List) Pair(com.cloud.utils.Pair)

Example 69 with Volume

use of com.cloud.storage.Volume in project cloudstack by apache.

the class LibvirtComputingResourceTest method testDestroyCommand.

@Test
public void testDestroyCommand() {
    final StoragePool pool = Mockito.mock(StoragePool.class);
    final Volume volume = Mockito.mock(Volume.class);
    final String vmName = "Test";
    final DestroyCommand command = new DestroyCommand(pool, volume, vmName);
    final KVMStoragePoolManager poolManager = Mockito.mock(KVMStoragePoolManager.class);
    final KVMStoragePool primary = Mockito.mock(KVMStoragePool.class);
    final VolumeTO vol = command.getVolume();
    when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(poolManager);
    when(poolManager.getStoragePool(vol.getPoolType(), vol.getPoolUuid())).thenReturn(primary);
    final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
    assertNotNull(wrapper);
    final Answer answer = wrapper.execute(command, libvirtComputingResource);
    assertTrue(answer.getResult());
    verify(libvirtComputingResource, times(1)).getStoragePoolMgr();
    verify(poolManager, times(1)).getStoragePool(vol.getPoolType(), vol.getPoolUuid());
}
Also used : AttachAnswer(org.apache.cloudstack.storage.command.AttachAnswer) Answer(com.cloud.agent.api.Answer) CheckRouterAnswer(com.cloud.agent.api.CheckRouterAnswer) KVMStoragePoolManager(com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager) VolumeTO(com.cloud.agent.api.to.VolumeTO) LibvirtRequestWrapper(com.cloud.hypervisor.kvm.resource.wrapper.LibvirtRequestWrapper) KVMStoragePool(com.cloud.hypervisor.kvm.storage.KVMStoragePool) StoragePool(com.cloud.storage.StoragePool) NfsStoragePool(com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool) KVMStoragePool(com.cloud.hypervisor.kvm.storage.KVMStoragePool) Volume(com.cloud.storage.Volume) DestroyCommand(com.cloud.agent.api.storage.DestroyCommand) Test(org.junit.Test)

Example 70 with Volume

use of com.cloud.storage.Volume in project cloudstack by apache.

the class UserVmManagerImpl method destroyVm.

@Override
public UserVm destroyVm(long vmId, boolean expunge) throws ResourceUnavailableException, ConcurrentOperationException {
    // Account caller = CallContext.current().getCallingAccount();
    // Long userId = CallContext.current().getCallingUserId();
    Long userId = 2L;
    // Verify input parameters
    UserVmVO vm = _vmDao.findById(vmId);
    if (vm == null || vm.getRemoved() != null) {
        InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find a virtual machine with specified vmId");
        throw ex;
    }
    if (vm.getState() == State.Destroyed || vm.getState() == State.Expunging) {
        s_logger.trace("Vm id=" + vmId + " is already destroyed");
        return vm;
    }
    boolean status;
    State vmState = vm.getState();
    try {
        VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid());
        status = vmEntity.destroy(Long.toString(userId), expunge);
    } catch (CloudException e) {
        CloudRuntimeException ex = new CloudRuntimeException("Unable to destroy with specified vmId", e);
        ex.addProxyObject(vm.getUuid(), "vmId");
        throw ex;
    }
    if (status) {
        // Mark the account's volumes as destroyed
        List<VolumeVO> volumes = _volsDao.findByInstance(vmId);
        for (VolumeVO volume : volumes) {
            if (volume.getVolumeType().equals(Volume.Type.ROOT)) {
                UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume());
            }
        }
        if (vmState != State.Error) {
            // Get serviceOffering for Virtual Machine
            ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId());
            //Update Resource Count for the given account
            resourceCountDecrement(vm.getAccountId(), vm.isDisplayVm(), new Long(offering.getCpu()), new Long(offering.getRamSize()));
        }
        return _vmDao.findById(vmId);
    } else {
        CloudRuntimeException ex = new CloudRuntimeException("Failed to destroy vm with specified vmId");
        ex.addProxyObject(vm.getUuid(), "vmId");
        throw ex;
    }
}
Also used : VolumeVO(com.cloud.storage.VolumeVO) InvalidParameterValueException(com.cloud.exception.InvalidParameterValueException) Volume(com.cloud.storage.Volume) ResourceState(com.cloud.resource.ResourceState) State(com.cloud.vm.VirtualMachine.State) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) VirtualMachineEntity(org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity) CloudException(com.cloud.exception.CloudException) ServiceOfferingVO(com.cloud.service.ServiceOfferingVO)

Aggregations

Volume (com.cloud.storage.Volume)70 StoragePool (com.cloud.storage.StoragePool)21 ServerApiException (org.apache.cloudstack.api.ServerApiException)16 InvalidParameterValueException (com.cloud.exception.InvalidParameterValueException)15 CloudRuntimeException (com.cloud.utils.exception.CloudRuntimeException)15 ExcludeList (com.cloud.deploy.DeploymentPlanner.ExcludeList)14 VolumeResponse (org.apache.cloudstack.api.response.VolumeResponse)14 ArrayList (java.util.ArrayList)12 Account (com.cloud.user.Account)11 VolumeVO (com.cloud.storage.VolumeVO)10 DiskProfile (com.cloud.vm.DiskProfile)10 StoragePoolAllocator (org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator)10 Test (org.junit.Test)10 DataCenterDeployment (com.cloud.deploy.DataCenterDeployment)9 HashMap (java.util.HashMap)9 DeploymentPlan (com.cloud.deploy.DeploymentPlan)8 PermissionDeniedException (com.cloud.exception.PermissionDeniedException)8 VirtualMachineProfile (com.cloud.vm.VirtualMachineProfile)8 Project (com.cloud.projects.Project)7 VmWorkAttachVolume (com.cloud.vm.VmWorkAttachVolume)7