Search in sources :

Example 21 with Cluster

use of com.cloud.org.Cluster in project cloudstack by apache.

the class MetricsServiceImpl method listClusterMetrics.

@Override
public List<ClusterMetricsResponse> listClusterMetrics(List<ClusterResponse> clusterResponses) {
    final List<ClusterMetricsResponse> metricsResponses = new ArrayList<>();
    for (final ClusterResponse clusterResponse : clusterResponses) {
        ClusterMetricsResponse metricsResponse = new ClusterMetricsResponse();
        try {
            BeanUtils.copyProperties(metricsResponse, clusterResponse);
        } catch (IllegalAccessException | InvocationTargetException e) {
            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate cluster metrics response");
        }
        final Cluster cluster = clusterDao.findByUuid(clusterResponse.getId());
        if (cluster == null) {
            continue;
        }
        final Long clusterId = cluster.getId();
        // Thresholds
        final Double cpuThreshold = AlertManager.CPUCapacityThreshold.valueIn(clusterId);
        final Double memoryThreshold = AlertManager.MemoryCapacityThreshold.valueIn(clusterId);
        final Float cpuDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId);
        final Float memoryDisableThreshold = DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.valueIn(clusterId);
        final Double cpuOvercommitRatio = findRatioValue(ApiDBUtils.findClusterDetails(clusterId, "cpuOvercommitRatio"));
        final Double memoryOvercommitRatio = findRatioValue(ApiDBUtils.findClusterDetails(clusterId, "memoryOvercommitRatio"));
        // CPU and memory capacities
        final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_CPU, null, clusterId);
        final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_MEMORY, null, clusterId);
        final Metrics metrics = new Metrics(cpuCapacity, memoryCapacity);
        for (final HostJoinVO host : hostJoinDao.findByClusterId(clusterId, Host.Type.Routing)) {
            if (host.getStatus() == Status.Up) {
                metrics.incrUpResources();
            }
            metrics.incrTotalResources();
            updateHostMetrics(metrics, host);
        }
        metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState());
        metricsResponse.setResources(metrics.getUpResources(), metrics.getTotalResources());
        // CPU
        metricsResponse.setCpuTotal(metrics.getTotalCpu());
        metricsResponse.setCpuAllocated(metrics.getCpuAllocated(), metrics.getTotalCpu());
        if (metrics.getCpuUsedPercentage() > 0L) {
            metricsResponse.setCpuUsed(metrics.getCpuUsedPercentage(), metrics.getTotalHosts());
            metricsResponse.setCpuMaxDeviation(metrics.getMaximumCpuUsage(), metrics.getCpuUsedPercentage(), metrics.getTotalHosts());
        }
        // Memory
        metricsResponse.setMemTotal(metrics.getTotalMemory());
        metricsResponse.setMemAllocated(metrics.getMemoryAllocated(), metrics.getTotalMemory());
        if (metrics.getMemoryUsed() > 0L) {
            metricsResponse.setMemUsed(metrics.getMemoryUsed(), metrics.getTotalMemory());
            metricsResponse.setMemMaxDeviation(metrics.getMaximumMemoryUsage(), metrics.getMemoryUsed(), metrics.getTotalHosts());
        }
        // CPU thresholds
        metricsResponse.setCpuUsageThreshold(metrics.getCpuUsedPercentage(), metrics.getTotalHosts(), cpuThreshold);
        metricsResponse.setCpuUsageDisableThreshold(metrics.getCpuUsedPercentage(), metrics.getTotalHosts(), cpuDisableThreshold);
        metricsResponse.setCpuAllocatedThreshold(metrics.getCpuAllocated(), metrics.getTotalCpu(), cpuOvercommitRatio, cpuThreshold);
        metricsResponse.setCpuAllocatedDisableThreshold(metrics.getCpuAllocated(), metrics.getTotalCpu(), cpuOvercommitRatio, cpuDisableThreshold);
        // Memory thresholds
        metricsResponse.setMemoryUsageThreshold(metrics.getMemoryUsed(), metrics.getTotalMemory(), memoryThreshold);
        metricsResponse.setMemoryUsageDisableThreshold(metrics.getMemoryUsed(), metrics.getTotalMemory(), memoryDisableThreshold);
        metricsResponse.setMemoryAllocatedThreshold(metrics.getMemoryAllocated(), metrics.getTotalMemory(), memoryOvercommitRatio, memoryThreshold);
        metricsResponse.setMemoryAllocatedDisableThreshold(metrics.getMemoryAllocated(), metrics.getTotalMemory(), memoryOvercommitRatio, memoryDisableThreshold);
        metricsResponses.add(metricsResponse);
    }
    return metricsResponses;
}
Also used : ArrayList(java.util.ArrayList) ClusterResponse(org.apache.cloudstack.api.response.ClusterResponse) Cluster(com.cloud.org.Cluster) HostJoinVO(com.cloud.api.query.vo.HostJoinVO) InvocationTargetException(java.lang.reflect.InvocationTargetException) ClusterMetricsResponse(org.apache.cloudstack.response.ClusterMetricsResponse) ServerApiException(org.apache.cloudstack.api.ServerApiException) CapacityDaoImpl(com.cloud.capacity.dao.CapacityDaoImpl)

Example 22 with Cluster

use of com.cloud.org.Cluster in project cloudstack by apache.

the class MetricsServiceImpl method listStoragePoolMetrics.

@Override
public List<StoragePoolMetricsResponse> listStoragePoolMetrics(List<StoragePoolResponse> poolResponses) {
    final List<StoragePoolMetricsResponse> metricsResponses = new ArrayList<>();
    for (final StoragePoolResponse poolResponse : poolResponses) {
        StoragePoolMetricsResponse metricsResponse = new StoragePoolMetricsResponse();
        try {
            BeanUtils.copyProperties(metricsResponse, poolResponse);
        } catch (IllegalAccessException | InvocationTargetException e) {
            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate storagepool metrics response");
        }
        Long poolClusterId = null;
        final Cluster cluster = clusterDao.findByUuid(poolResponse.getClusterId());
        if (cluster != null) {
            poolClusterId = cluster.getId();
        }
        final Double storageThreshold = AlertManager.StorageCapacityThreshold.valueIn(poolClusterId);
        final Double storageDisableThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(poolClusterId);
        metricsResponse.setDiskSizeUsedGB(poolResponse.getDiskSizeUsed());
        metricsResponse.setDiskSizeTotalGB(poolResponse.getDiskSizeTotal(), poolResponse.getOverProvisionFactor());
        metricsResponse.setDiskSizeAllocatedGB(poolResponse.getDiskSizeAllocated());
        metricsResponse.setDiskSizeUnallocatedGB(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor());
        metricsResponse.setStorageUsedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageThreshold);
        metricsResponse.setStorageUsedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
        metricsResponse.setStorageAllocatedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor(), storageThreshold);
        metricsResponse.setStorageAllocatedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
        metricsResponses.add(metricsResponse);
    }
    return metricsResponses;
}
Also used : StoragePoolResponse(org.apache.cloudstack.api.response.StoragePoolResponse) ServerApiException(org.apache.cloudstack.api.ServerApiException) StoragePoolMetricsResponse(org.apache.cloudstack.response.StoragePoolMetricsResponse) ArrayList(java.util.ArrayList) Cluster(com.cloud.org.Cluster) InvocationTargetException(java.lang.reflect.InvocationTargetException)

Example 23 with Cluster

use of com.cloud.org.Cluster in project cloudstack by apache.

the class BareMetalPlanner method plan.

@Override
public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException {
    VirtualMachine vm = vmProfile.getVirtualMachine();
    ServiceOffering offering = vmProfile.getServiceOffering();
    String hostTag = null;
    String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
    if (vm.getLastHostId() != null && haVmTag == null) {
        HostVO h = _hostDao.findById(vm.getLastHostId());
        DataCenter dc = _dcDao.findById(h.getDataCenterId());
        Pod pod = _podDao.findById(h.getPodId());
        Cluster c = _clusterDao.findById(h.getClusterId());
        s_logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId());
        return new DeployDestination(dc, pod, c, h);
    }
    if (haVmTag != null) {
        hostTag = haVmTag;
    } else if (offering.getHostTag() != null) {
        String[] tags = offering.getHostTag().split(",");
        if (tags.length > 0) {
            hostTag = tags[0];
        }
    }
    List<ClusterVO> clusters = _clusterDao.listByDcHyType(vm.getDataCenterId(), HypervisorType.BareMetal.toString());
    int cpu_requested;
    long ram_requested;
    HostVO target = null;
    List<HostVO> hosts;
    for (ClusterVO cluster : clusters) {
        hosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
        if (hostTag != null) {
            for (HostVO h : hosts) {
                _hostDao.loadDetails(h);
                if (h.getDetail("hostTag") != null && h.getDetail("hostTag").equalsIgnoreCase(hostTag)) {
                    target = h;
                    break;
                }
            }
        }
    }
    if (target == null) {
        s_logger.warn("Cannot find host with tag " + hostTag + " use capacity from service offering");
        cpu_requested = offering.getCpu() * offering.getSpeed();
        ram_requested = offering.getRamSize() * 1024L * 1024L;
    } else {
        cpu_requested = target.getCpus() * target.getSpeed().intValue();
        ram_requested = target.getTotalMemory();
    }
    for (ClusterVO cluster : clusters) {
        if (haVmTag == null) {
            hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
        } else {
            s_logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId());
            return null;
        }
        for (HostVO h : hosts) {
            long cluster_id = h.getClusterId();
            ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
            ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
            Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
            Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
            if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
                s_logger.debug("Find host " + h.getId() + " has enough capacity");
                DataCenter dc = _dcDao.findById(h.getDataCenterId());
                Pod pod = _podDao.findById(h.getPodId());
                return new DeployDestination(dc, pod, cluster, h);
            }
        }
    }
    s_logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, ram_requested));
    return null;
}
Also used : ClusterVO(com.cloud.dc.ClusterVO) Pod(com.cloud.dc.Pod) ServiceOffering(com.cloud.offering.ServiceOffering) Cluster(com.cloud.org.Cluster) HostVO(com.cloud.host.HostVO) DataCenter(com.cloud.dc.DataCenter) DeployDestination(com.cloud.deploy.DeployDestination) ClusterDetailsVO(com.cloud.dc.ClusterDetailsVO) VirtualMachine(com.cloud.vm.VirtualMachine)

Example 24 with Cluster

use of com.cloud.org.Cluster in project cloudstack by apache.

the class DeploymentPlanningManagerImpl method isEnabledForAllocation.

private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) {
    // Check if the zone exists in the system
    DataCenterVO zone = _dcDao.findById(zoneId);
    if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) {
        s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId);
        return false;
    }
    Pod pod = _podDao.findById(podId);
    if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) {
        s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId);
        return false;
    }
    Cluster cluster = _clusterDao.findById(clusterId);
    if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) {
        s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId);
        return false;
    }
    return true;
}
Also used : DataCenterVO(com.cloud.dc.DataCenterVO) Pod(com.cloud.dc.Pod) Cluster(com.cloud.org.Cluster)

Example 25 with Cluster

use of com.cloud.org.Cluster in project cloudstack by apache.

the class DeploymentPlanningManagerImpl method planDeployment.

@Override
public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
    ServiceOffering offering = vmProfile.getServiceOffering();
    int cpu_requested = offering.getCpu() * offering.getSpeed();
    long ram_requested = offering.getRamSize() * 1024L * 1024L;
    VirtualMachine vm = vmProfile.getVirtualMachine();
    DataCenter dc = _dcDao.findById(vm.getDataCenterId());
    if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) {
        checkForNonDedicatedResources(vmProfile, dc, avoids);
    }
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
        s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
        s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
    }
    String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
    if (plan.getHostId() != null && haVmTag == null) {
        Long hostIdSpecified = plan.getHostId();
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified);
        }
        HostVO host = _hostDao.findById(hostIdSpecified);
        if (host == null) {
            s_logger.debug("The specified host cannot be found");
        } else if (avoids.shouldAvoid(host)) {
            s_logger.debug("The specified host is in avoid set");
        } else {
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
            }
            Pod pod = _podDao.findById(host.getPodId());
            Cluster cluster = _clusterDao.findById(host.getClusterId());
            if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
                s_logger.debug("Returning Deployment Destination: " + dest);
                return dest;
            }
            // search for storage under the zone, pod, cluster of the host.
            DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
            Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
            Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
            List<Volume> readyAndReusedVolumes = result.second();
            // choose the potential pool for this VM for this host
            if (!suitableVolumeStoragePools.isEmpty()) {
                List<Host> suitableHosts = new ArrayList<Host>();
                suitableHosts.add(host);
                Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                if (potentialResources != null) {
                    pod = _podDao.findById(host.getPodId());
                    cluster = _clusterDao.findById(host.getClusterId());
                    Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                    // we don't have to prepare this volume.
                    for (Volume vol : readyAndReusedVolumes) {
                        storageVolMap.remove(vol);
                    }
                    DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
                    s_logger.debug("Returning Deployment Destination: " + dest);
                    return dest;
                }
            }
        }
        s_logger.debug("Cannot deploy to specified host, returning.");
        return null;
    }
    // call affinitygroup chain
    long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
    if (vmGroupCount > 0) {
        for (AffinityGroupProcessor processor : _affinityProcessors) {
            processor.process(vmProfile, plan, avoids);
        }
    }
    if (vm.getType() == VirtualMachine.Type.User) {
        checkForNonDedicatedResources(vmProfile, dc, avoids);
    }
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
    }
    // check if datacenter is in avoid set
    if (avoids.shouldAvoid(dc)) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.");
        }
        return null;
    }
    if (planner == null) {
        String plannerName = offering.getDeploymentPlanner();
        if (plannerName == null) {
            if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                plannerName = "BareMetalPlanner";
            } else {
                plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
            }
        }
        planner = getDeploymentPlannerByName(plannerName);
    }
    if (vm.getLastHostId() != null && haVmTag == null) {
        s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());
        HostVO host = _hostDao.findById(vm.getLastHostId());
        ServiceOfferingDetailsVO offeringDetails = null;
        if (host == null) {
            s_logger.debug("The last host of this VM cannot be found");
        } else if (avoids.shouldAvoid(host)) {
            s_logger.debug("The last host of this VM is in avoid set");
        } else if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) {
            s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + plan.getClusterId());
        } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
            s_logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
        } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
            ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
            if (!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) {
                s_logger.debug("The last host of this VM does not have required GPU devices available");
            }
        } else {
            if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
                boolean hostTagsMatch = true;
                if (offering.getHostTag() != null) {
                    _hostDao.loadHostTags(host);
                    if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
                        hostTagsMatch = false;
                    }
                }
                if (hostTagsMatch) {
                    long cluster_id = host.getClusterId();
                    ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
                    ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
                    boolean hostHasCpuCapability, hostHasCapacity = false;
                    hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
                    if (hostHasCpuCapability) {
                        // first check from reserved capacity
                        hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true);
                        // if not reserved, check the free capacity
                        if (!hostHasCapacity)
                            hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true);
                    }
                    if (hostHasCapacity && hostHasCpuCapability) {
                        s_logger.debug("The last host of this VM is UP and has enough capacity");
                        s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
                        Pod pod = _podDao.findById(host.getPodId());
                        Cluster cluster = _clusterDao.findById(host.getClusterId());
                        if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                            DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
                            s_logger.debug("Returning Deployment Destination: " + dest);
                            return dest;
                        }
                        // search for storage under the zone, pod, cluster
                        // of
                        // the last host.
                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                        List<Volume> readyAndReusedVolumes = result.second();
                        // host
                        if (!suitableVolumeStoragePools.isEmpty()) {
                            List<Host> suitableHosts = new ArrayList<Host>();
                            suitableHosts.add(host);
                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                            if (potentialResources != null) {
                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                                // this volume.
                                for (Volume vol : readyAndReusedVolumes) {
                                    storageVolMap.remove(vol);
                                }
                                DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
                                s_logger.debug("Returning Deployment Destination: " + dest);
                                return dest;
                            }
                        }
                    } else {
                        s_logger.debug("The last host of this VM does not have enough capacity");
                    }
                } else {
                    s_logger.debug("Service Offering host tag does not match the last host of this VM");
                }
            } else {
                s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + host.getResourceState());
            }
        }
        s_logger.debug("Cannot choose the last host to deploy this VM ");
    }
    DeployDestination dest = null;
    List<Long> clusterList = null;
    if (planner != null && planner.canHandle(vmProfile, plan, avoids)) {
        while (true) {
            if (planner instanceof DeploymentClusterPlanner) {
                ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
                clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
                if (clusterList != null && !clusterList.isEmpty()) {
                    // planner refactoring. call allocators to list hosts
                    ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
                    resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
                    dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
                    if (dest != null) {
                        return dest;
                    }
                    // reset the avoid input to the planners
                    resetAvoidSet(avoids, plannerAvoidOutput);
                } else {
                    return null;
                }
            } else {
                dest = planner.plan(vmProfile, plan, avoids);
                if (dest != null) {
                    long hostId = dest.getHost().getId();
                    avoids.addHost(dest.getHost().getId());
                    if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
                        // found destination
                        return dest;
                    } else {
                        // deployment picked it up for dedicated access
                        continue;
                    }
                } else {
                    return null;
                }
            }
        }
    }
    return dest;
}
Also used : StoragePool(com.cloud.storage.StoragePool) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ServiceOfferingDetailsVO(com.cloud.service.ServiceOfferingDetailsVO) ArrayList(java.util.ArrayList) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) List(java.util.List) AffinityGroupProcessor(org.apache.cloudstack.affinity.AffinityGroupProcessor) Pair(com.cloud.utils.Pair) ExcludeList(com.cloud.deploy.DeploymentPlanner.ExcludeList) Pod(com.cloud.dc.Pod) ServiceOffering(com.cloud.offering.ServiceOffering) Cluster(com.cloud.org.Cluster) Host(com.cloud.host.Host) StoragePoolHostVO(com.cloud.storage.StoragePoolHostVO) HostVO(com.cloud.host.HostVO) DataCenter(com.cloud.dc.DataCenter) Volume(com.cloud.storage.Volume) Map(java.util.Map) HashMap(java.util.HashMap) ClusterDetailsVO(com.cloud.dc.ClusterDetailsVO) VirtualMachine(com.cloud.vm.VirtualMachine)

Aggregations

Cluster (com.cloud.org.Cluster)26 ArrayList (java.util.ArrayList)13 HostVO (com.cloud.host.HostVO)8 ServerApiException (org.apache.cloudstack.api.ServerApiException)7 InvalidParameterValueException (com.cloud.exception.InvalidParameterValueException)6 CloudRuntimeException (com.cloud.utils.exception.CloudRuntimeException)6 DataCenter (com.cloud.dc.DataCenter)5 Pod (com.cloud.dc.Pod)5 DeployDestination (com.cloud.deploy.DeployDestination)5 DataCenterVO (com.cloud.dc.DataCenterVO)4 DiscoveryException (com.cloud.exception.DiscoveryException)4 ResourceInUseException (com.cloud.exception.ResourceInUseException)4 Host (com.cloud.host.Host)4 StoragePool (com.cloud.storage.StoragePool)4 ClusterResponse (org.apache.cloudstack.api.response.ClusterResponse)4 ClusterResponse (com.cloud.api.response.ClusterResponse)3 ClusterDetailsVO (com.cloud.dc.ClusterDetailsVO)3 ClusterVO (com.cloud.dc.ClusterVO)3 List (java.util.List)3 ServerApiException (com.cloud.api.ServerApiException)2