use of com.cloud.dc.ClusterDetailsVO in project cloudstack by apache.
the class KubernetesClusterResourceModifierActionWorker method plan.
protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
final int cpu_requested = offering.getCpu() * offering.getSpeed();
final long ram_requested = offering.getRamSize() * 1024L * 1024L;
List<HostVO> hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId());
final Map<String, Pair<HostVO, Integer>> hosts_with_resevered_capacity = new ConcurrentHashMap<String, Pair<HostVO, Integer>>();
for (HostVO h : hosts) {
hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
}
boolean suitable_host_found = false;
for (int i = 1; i <= nodesCount; i++) {
suitable_host_found = false;
for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
Pair<HostVO, Integer> hp = hostEntry.getValue();
HostVO h = hp.first();
if (!h.getHypervisorType().equals(clusterTemplate.getHypervisorType())) {
continue;
}
hostDao.loadHostTags(h);
if (StringUtils.isNotEmpty(offering.getHostTag()) && !(h.getHostTags() != null && h.getHostTags().contains(offering.getHostTag()))) {
continue;
}
int reserved = hp.second();
reserved++;
ClusterVO cluster = clusterDao.findById(h.getClusterId());
ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Checking host : %s for capacity already reserved %d", h.getName(), reserved));
}
if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Found host : %s for with enough capacity, CPU=%d RAM=%s", h.getName(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)));
}
hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
suitable_host_found = true;
break;
}
}
if (!suitable_host_found) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Suitable hosts not found in datacenter : %s for node %d, with offering : %s and hypervisor: %s", zone.getName(), i, offering.getName(), clusterTemplate.getHypervisorType().toString()));
}
break;
}
}
if (suitable_host_found) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Suitable hosts found in datacenter : %s, creating deployment destination", zone.getName()));
}
return new DeployDestination(zone, null, null, null);
}
String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering : %s and hypervisor: %s", cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getName(), clusterTemplate.getHypervisorType().toString());
LOGGER.warn(msg);
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
}
use of com.cloud.dc.ClusterDetailsVO in project cloudstack by apache.
the class VolumeTestVmware method setUp.
@Test(priority = -1)
public void setUp() {
ComponentContext.initComponentsLifeCycle();
host = hostDao.findByGuid(this.getHostGuid());
if (host != null) {
dcId = host.getDataCenterId();
clusterId = host.getClusterId();
podId = host.getPodId();
imageStore = this.imageStoreDao.findByName(imageStoreName);
} else {
// create data center
DataCenterVO dc = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24", null, null, NetworkType.Basic, null, null, true, true, null, null);
dc = dcDao.persist(dc);
dcId = dc.getId();
// create pod
HostPodVO pod = new HostPodVO(UUID.randomUUID().toString(), dc.getId(), this.getHostGateway(), this.getHostCidr(), 8, "test");
pod = podDao.persist(pod);
podId = pod.getId();
// create xen cluster
ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
cluster.setHypervisorType(HypervisorType.VMware.toString());
cluster.setClusterType(ClusterType.ExternalManaged);
cluster.setManagedState(ManagedState.Managed);
cluster = clusterDao.persist(cluster);
clusterId = cluster.getId();
// setup vcenter
ClusterDetailsVO clusterDetailVO = new ClusterDetailsVO(cluster.getId(), "url", null);
this.clusterDetailsDao.persist(clusterDetailVO);
clusterDetailVO = new ClusterDetailsVO(cluster.getId(), "username", null);
this.clusterDetailsDao.persist(clusterDetailVO);
clusterDetailVO = new ClusterDetailsVO(cluster.getId(), "password", null);
this.clusterDetailsDao.persist(clusterDetailVO);
// create xen host
host = new HostVO(this.getHostGuid());
host.setName("devcloud vmware host");
host.setType(Host.Type.Routing);
host.setPrivateIpAddress(this.getHostIp());
host.setDataCenterId(dc.getId());
host.setVersion("6.0.1");
host.setAvailable(true);
host.setSetup(true);
host.setPodId(podId);
host.setLastPinged(0);
host.setResourceState(ResourceState.Enabled);
host.setHypervisorType(HypervisorType.VMware);
host.setClusterId(cluster.getId());
host = hostDao.persist(host);
imageStore = new ImageStoreVO();
imageStore.setName(imageStoreName);
imageStore.setDataCenterId(dcId);
imageStore.setProviderName("CloudStack ImageStore Provider");
imageStore.setRole(DataStoreRole.Image);
imageStore.setUrl(this.getSecondaryStorage());
imageStore.setUuid(UUID.randomUUID().toString());
imageStore.setProtocol("nfs");
imageStore = imageStoreDao.persist(imageStore);
}
image = new VMTemplateVO();
image.setTemplateType(TemplateType.USER);
image.setUrl(this.getTemplateUrl());
image.setUniqueName(UUID.randomUUID().toString());
image.setName(UUID.randomUUID().toString());
image.setPublicTemplate(true);
image.setFeatured(true);
image.setRequiresHvm(true);
image.setBits(64);
image.setFormat(Storage.ImageFormat.VHD);
image.setEnablePassword(true);
image.setEnableSshKey(true);
image.setGuestOSId(1);
image.setBootable(true);
image.setPrepopulate(true);
image.setCrossZones(true);
image.setExtractable(true);
image = imageDataDao.persist(image);
/*
* TemplateDataStoreVO templateStore = new TemplateDataStoreVO();
*
* templateStore.setDataStoreId(imageStore.getId());
* templateStore.setDownloadPercent(100);
* templateStore.setDownloadState(Status.DOWNLOADED);
* templateStore.setDownloadUrl(imageStore.getUrl());
* templateStore.setInstallPath(this.getImageInstallPath());
* templateStore.setTemplateId(image.getId());
* templateStoreDao.persist(templateStore);
*/
DataStore store = this.dataStoreMgr.getDataStore(imageStore.getId(), DataStoreRole.Image);
TemplateInfo template = templateFactory.getTemplate(image.getId(), DataStoreRole.Image);
DataObject templateOnStore = store.create(template);
TemplateObjectTO to = new TemplateObjectTO();
to.setPath(this.getImageInstallPath());
CopyCmdAnswer answer = new CopyCmdAnswer(to);
templateOnStore.processEvent(Event.CreateOnlyRequested);
templateOnStore.processEvent(Event.OperationSuccessed, answer);
}
use of com.cloud.dc.ClusterDetailsVO in project cloudstack by apache.
the class BareMetalPlanner method plan.
@Override
public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException {
VirtualMachine vm = vmProfile.getVirtualMachine();
ServiceOffering offering = vmProfile.getServiceOffering();
String hostTag = null;
String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
if (vm.getLastHostId() != null && haVmTag == null) {
HostVO h = _hostDao.findById(vm.getLastHostId());
DataCenter dc = _dcDao.findById(h.getDataCenterId());
Pod pod = _podDao.findById(h.getPodId());
Cluster c = _clusterDao.findById(h.getClusterId());
s_logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId());
return new DeployDestination(dc, pod, c, h);
}
if (haVmTag != null) {
hostTag = haVmTag;
} else if (offering.getHostTag() != null) {
String[] tags = offering.getHostTag().split(",");
if (tags.length > 0) {
hostTag = tags[0];
}
}
List<ClusterVO> clusters = _clusterDao.listByDcHyType(vm.getDataCenterId(), HypervisorType.BareMetal.toString());
int cpu_requested;
long ram_requested;
HostVO target = null;
List<HostVO> hosts;
for (ClusterVO cluster : clusters) {
hosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
if (hostTag != null) {
for (HostVO h : hosts) {
_hostDao.loadDetails(h);
if (h.getDetail("hostTag") != null && h.getDetail("hostTag").equalsIgnoreCase(hostTag)) {
target = h;
break;
}
}
}
}
if (target == null) {
s_logger.warn("Cannot find host with tag " + hostTag + " use capacity from service offering");
cpu_requested = offering.getCpu() * offering.getSpeed();
ram_requested = offering.getRamSize() * 1024L * 1024L;
} else {
cpu_requested = target.getCpus() * target.getSpeed().intValue();
ram_requested = target.getTotalMemory();
}
for (ClusterVO cluster : clusters) {
if (haVmTag == null) {
hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
} else {
s_logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId());
return null;
}
for (HostVO h : hosts) {
long cluster_id = h.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
s_logger.debug("Find host " + h.getId() + " has enough capacity");
DataCenter dc = _dcDao.findById(h.getDataCenterId());
Pod pod = _podDao.findById(h.getPodId());
return new DeployDestination(dc, pod, cluster, h);
}
}
}
s_logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, NumbersUtil.toHumanReadableSize(ram_requested)));
return null;
}
Aggregations