use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class DeploymentPlanningManagerImpl method checkClustersforDestination.
// /refactoring planner methods
private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("ClusterId List to consider: " + clusterList);
}
for (Long clusterId : clusterList) {
ClusterVO clusterVO = _clusterDao.findById(clusterId);
if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
s_logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
avoid.addCluster(clusterVO.getId());
continue;
}
s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId());
// search for resources(hosts and storage) under this zone, pod,
// cluster.
DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
Pod pod = _podDao.findById(clusterVO.getPodId());
if (CollectionUtils.isNotEmpty(avoid.getPodsToAvoid()) && avoid.getPodsToAvoid().contains(pod.getId())) {
s_logger.debug("The cluster is in a disabled pod : " + pod.getId());
} else {
// find suitable hosts under this cluster, need as many hosts as we
// get.
List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
// pools for each volume of the VM
if (suitableHosts != null && !suitableHosts.isEmpty()) {
if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
return dest;
}
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// choose the potential host and pool for the VM
if (!suitableVolumeStoragePools.isEmpty()) {
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired, readyAndReusedVolumes, plan.getPreferredHosts(), vmProfile.getVirtualMachine());
if (potentialResources != null) {
Host host = _hostDao.findById(potentialResources.first().getId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// we don't have to prepare this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile);
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap, displayStorage);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
} else {
s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId);
}
} else {
s_logger.debug("No suitable hosts found under this Cluster: " + clusterId);
}
}
if (canAvoidCluster(clusterVO, avoid, plannerAvoidOutput, vmProfile)) {
avoid.addCluster(clusterVO.getId());
}
}
s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
return null;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class ConfigurationManagerImpl method resetConfiguration.
@Override
@ActionEvent(eventType = EventTypes.EVENT_CONFIGURATION_VALUE_EDIT, eventDescription = "resetting configuration")
public Pair<Configuration, String> resetConfiguration(final ResetCfgCmd cmd) throws InvalidParameterValueException {
final Long userId = CallContext.current().getCallingUserId();
final String name = cmd.getCfgName();
final Long zoneId = cmd.getZoneId();
final Long clusterId = cmd.getClusterId();
final Long storagepoolId = cmd.getStoragepoolId();
final Long accountId = cmd.getAccountId();
final Long domainId = cmd.getDomainId();
final Long imageStoreId = cmd.getImageStoreId();
Optional optionalValue;
final ConfigKey<?> configKey = _configDepot.get(name);
if (configKey == null) {
s_logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface");
throw new InvalidParameterValueException("Config parameter with name " + name + " doesn't exist");
}
String defaultValue = configKey.defaultValue();
String category = configKey.category();
String configScope = configKey.scope().toString();
String scope = "";
Map<String, Long> scopeMap = new LinkedHashMap<>();
Long id = null;
int paramCountCheck = 0;
scopeMap.put(ConfigKey.Scope.Zone.toString(), zoneId);
scopeMap.put(ConfigKey.Scope.Cluster.toString(), clusterId);
scopeMap.put(ConfigKey.Scope.Domain.toString(), domainId);
scopeMap.put(ConfigKey.Scope.Account.toString(), accountId);
scopeMap.put(ConfigKey.Scope.StoragePool.toString(), storagepoolId);
scopeMap.put(ConfigKey.Scope.ImageStore.toString(), imageStoreId);
ParamCountPair paramCountPair = getParamCount(scopeMap);
id = paramCountPair.getId();
paramCountCheck = paramCountPair.getParamCount();
scope = paramCountPair.getScope();
if (paramCountCheck > 1) {
throw new InvalidParameterValueException("cannot handle multiple IDs, provide only one ID corresponding to the scope");
}
if (scope != null && !scope.equals(ConfigKey.Scope.Global.toString()) && !configScope.contains(scope)) {
throw new InvalidParameterValueException("Invalid scope id provided for the parameter " + name);
}
String newValue = null;
switch(ConfigKey.Scope.valueOf(scope)) {
case Zone:
final DataCenterVO zone = _zoneDao.findById(id);
if (zone == null) {
throw new InvalidParameterValueException("unable to find zone by id " + id);
}
_dcDetailsDao.removeDetail(id, name);
optionalValue = Optional.ofNullable(configKey.valueIn(id));
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
break;
case Cluster:
final ClusterVO cluster = _clusterDao.findById(id);
if (cluster == null) {
throw new InvalidParameterValueException("unable to find cluster by id " + id);
}
ClusterDetailsVO clusterDetailsVO = _clusterDetailsDao.findDetail(id, name);
newValue = configKey.value().toString();
if (name.equalsIgnoreCase("cpu.overprovisioning.factor") || name.equalsIgnoreCase("mem.overprovisioning.factor")) {
_clusterDetailsDao.persist(id, name, newValue);
} else if (clusterDetailsVO != null) {
_clusterDetailsDao.remove(clusterDetailsVO.getId());
}
optionalValue = Optional.ofNullable(configKey.valueIn(id));
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
break;
case StoragePool:
final StoragePoolVO pool = _storagePoolDao.findById(id);
if (pool == null) {
throw new InvalidParameterValueException("unable to find storage pool by id " + id);
}
_storagePoolDetailsDao.removeDetail(id, name);
optionalValue = Optional.ofNullable(configKey.valueIn(id));
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
break;
case Domain:
final DomainVO domain = _domainDao.findById(id);
if (domain == null) {
throw new InvalidParameterValueException("unable to find domain by id " + id);
}
DomainDetailVO domainDetailVO = _domainDetailsDao.findDetail(id, name);
if (domainDetailVO != null) {
_domainDetailsDao.remove(domainDetailVO.getId());
}
optionalValue = Optional.ofNullable(configKey.valueIn(id));
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
break;
case Account:
final AccountVO account = _accountDao.findById(id);
if (account == null) {
throw new InvalidParameterValueException("unable to find account by id " + id);
}
AccountDetailVO accountDetailVO = _accountDetailsDao.findDetail(id, name);
if (accountDetailVO != null) {
_accountDetailsDao.remove(accountDetailVO.getId());
}
optionalValue = Optional.ofNullable(configKey.valueIn(id));
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
break;
case ImageStore:
final ImageStoreVO imageStoreVO = _imageStoreDao.findById(id);
if (imageStoreVO == null) {
throw new InvalidParameterValueException("unable to find the image store by id " + id);
}
ImageStoreDetailVO imageStoreDetailVO = _imageStoreDetailsDao.findDetail(id, name);
if (imageStoreDetailVO != null) {
_imageStoreDetailsDao.remove(imageStoreDetailVO.getId());
}
optionalValue = Optional.ofNullable(configKey.valueIn(id));
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
break;
default:
if (!_configDao.update(name, category, defaultValue)) {
s_logger.error("Failed to reset configuration option, name: " + name + ", defaultValue:" + defaultValue);
throw new CloudRuntimeException("Failed to reset configuration value. Please contact Cloud Support.");
}
optionalValue = Optional.ofNullable(configKey.value());
newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue;
}
CallContext.current().setEventDetails(" Name: " + name + " New Value: " + (name.toLowerCase().contains("password") ? "*****" : defaultValue == null ? "" : defaultValue));
return new Pair<Configuration, String>(_configDao.findByName(name), newValue);
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class CapacityManagerImpl method updateCapacityForHost.
@DB
@Override
public void updateCapacityForHost(final Host host, final Map<Long, ServiceOfferingVO> offeringsMap) {
long usedCpuCore = 0;
long reservedCpuCore = 0;
long usedCpu = 0;
long usedMemory = 0;
long reservedMemory = 0;
long reservedCpu = 0;
final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled;
List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId());
}
final List<VMInstanceVO> vosMigrating = _vmDao.listVmsMigratingFromHost(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vosMigrating.size() + " VMs are Migrating from host " + host.getId());
}
vms.addAll(vosMigrating);
ClusterVO cluster = _clusterDao.findById(host.getClusterId());
ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
ClusterDetailsVO clusterDetailRam = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
Float clusterCpuOvercommitRatio = Float.parseFloat(clusterDetailCpu.getValue());
Float clusterRamOvercommitRatio = Float.parseFloat(clusterDetailRam.getValue());
for (VMInstanceVO vm : vms) {
Float cpuOvercommitRatio = 1.0f;
Float ramOvercommitRatio = 1.0f;
Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
String vmDetailCpu = vmDetails.get("cpuOvercommitRatio");
String vmDetailRam = vmDetails.get("memoryOvercommitRatio");
// if vmDetailCpu or vmDetailRam is not null it means it is running in a overcommitted cluster.
cpuOvercommitRatio = (vmDetailCpu != null) ? Float.parseFloat(vmDetailCpu) : clusterCpuOvercommitRatio;
ramOvercommitRatio = (vmDetailRam != null) ? Float.parseFloat(vmDetailRam) : clusterRamOvercommitRatio;
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
if (so == null) {
so = _offeringsDao.findByIdIncludingRemoved(vm.getServiceOfferingId());
}
if (so.isDynamic()) {
usedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
if (vmDetails.containsKey(UsageEventVO.DynamicParameters.cpuSpeed.name())) {
usedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
} else {
usedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
}
usedCpuCore += Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name()));
} else {
usedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
usedCpu += ((so.getCpu() * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
usedCpuCore += so.getCpu();
}
}
List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId());
}
for (VMInstanceVO vm : vmsByLastHostId) {
Float cpuOvercommitRatio = 1.0f;
Float ramOvercommitRatio = 1.0f;
long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000;
if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO);
UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO);
if (vmDetailCpu != null) {
// if vmDetail_cpu is not null it means it is running in a overcommited cluster.
cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue());
ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue());
}
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (so == null) {
so = _offeringsDao.findByIdIncludingRemoved(vm.getServiceOfferingId());
}
if (so.isDynamic()) {
reservedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
if (vmDetails.containsKey(UsageEventVO.DynamicParameters.cpuSpeed.name())) {
reservedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
} else {
reservedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
}
reservedCpuCore += Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name()));
} else {
reservedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
reservedCpu += (so.getCpu() * so.getSpeed() / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
reservedCpuCore += so.getCpu();
}
} else {
// signal if not done already, that the VM has been stopped for skip.counting.hours,
// hence capacity will not be reserved anymore.
UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.MESSAGE_RESERVED_CAPACITY_FREED_FLAG);
if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) {
_messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm);
if (vm.getType() == VirtualMachine.Type.User) {
UserVmVO userVM = _userVMDao.findById(vm.getId());
_userVMDao.loadDetails(userVM);
userVM.setDetail(VmDetailConstants.MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true");
_userVMDao.saveDetails(userVM);
}
}
}
}
CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU);
CapacityVO memCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY);
CapacityVO cpuCoreCap = _capacityDao.findByHostIdType(host.getId(), CapacityVO.CAPACITY_TYPE_CPU_CORE);
if (cpuCoreCap != null) {
long hostTotalCpuCore = host.getCpus().longValue();
if (cpuCoreCap.getTotalCapacity() != hostTotalCpuCore) {
s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCoreCap.getTotalCapacity() + " new total CPU:" + hostTotalCpuCore);
cpuCoreCap.setTotalCapacity(hostTotalCpuCore);
}
if (cpuCoreCap.getUsedCapacity() == usedCpuCore && cpuCoreCap.getReservedCapacity() == reservedCpuCore) {
s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpuCore: " + cpuCoreCap.getUsedCapacity() + " reservedCpuCore: " + cpuCoreCap.getReservedCapacity());
} else {
if (cpuCoreCap.getReservedCapacity() != reservedCpuCore) {
s_logger.debug("Calibrate reserved cpu core for host: " + host.getId() + " old reservedCpuCore:" + cpuCoreCap.getReservedCapacity() + " new reservedCpuCore:" + reservedCpuCore);
cpuCoreCap.setReservedCapacity(reservedCpuCore);
}
if (cpuCoreCap.getUsedCapacity() != usedCpuCore) {
s_logger.debug("Calibrate used cpu core for host: " + host.getId() + " old usedCpuCore:" + cpuCoreCap.getUsedCapacity() + " new usedCpuCore:" + usedCpuCore);
cpuCoreCap.setUsedCapacity(usedCpuCore);
}
}
try {
_capacityDao.update(cpuCoreCap.getId(), cpuCoreCap);
} catch (Exception e) {
s_logger.error("Caught exception while updating cpucore capacity for the host " + host.getId(), e);
}
} else {
final long usedCpuCoreFinal = usedCpuCore;
final long reservedCpuCoreFinal = reservedCpuCore;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuCoreFinal, host.getCpus().longValue(), CapacityVO.CAPACITY_TYPE_CPU_CORE);
capacity.setReservedCapacity(reservedCpuCoreFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
}
});
}
if (cpuCap != null && memCap != null) {
if (host.getTotalMemory() != null) {
memCap.setTotalCapacity(host.getTotalMemory());
}
long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue();
if (cpuCap.getTotalCapacity() != hostTotalCpu) {
s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu);
cpuCap.setTotalCapacity(hostTotalCpu);
}
// Set the capacity state as per the host allocation state.
if (capacityState != cpuCap.getCapacityState()) {
s_logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
cpuCap.setCapacityState(capacityState);
}
memCap.setCapacityState(capacityState);
if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) {
s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + cpuCap.getReservedCapacity());
} else {
if (cpuCap.getReservedCapacity() != reservedCpu) {
s_logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + reservedCpu);
cpuCap.setReservedCapacity(reservedCpu);
}
if (cpuCap.getUsedCapacity() != usedCpu) {
s_logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu);
cpuCap.setUsedCapacity(usedCpu);
}
}
if (memCap.getTotalCapacity() != host.getTotalMemory()) {
s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + toHumanReadableSize(memCap.getTotalCapacity()) + " new total memory:" + toHumanReadableSize(host.getTotalMemory()));
memCap.setTotalCapacity(host.getTotalMemory());
}
// Set the capacity state as per the host allocation state.
if (capacityState != memCap.getCapacityState()) {
s_logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
memCap.setCapacityState(capacityState);
}
if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) {
s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + toHumanReadableSize(memCap.getUsedCapacity()) + " reservedMem: " + toHumanReadableSize(memCap.getReservedCapacity()));
} else {
if (memCap.getReservedCapacity() != reservedMemory) {
s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + reservedMemory);
memCap.setReservedCapacity(reservedMemory);
}
if (memCap.getUsedCapacity() != usedMemory) {
/*
* Didn't calibrate for used memory, because VMs can be in
* state(starting/migrating) that I don't know on which host
* they are allocated
*/
s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + toHumanReadableSize(memCap.getUsedCapacity()) + " new usedMem: " + toHumanReadableSize(usedMemory));
memCap.setUsedCapacity(usedMemory);
}
}
try {
_capacityDao.update(cpuCap.getId(), cpuCap);
_capacityDao.update(memCap.getId(), memCap);
} catch (Exception e) {
s_logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e);
}
} else {
final long usedMemoryFinal = usedMemory;
final long reservedMemoryFinal = reservedMemory;
final long usedCpuFinal = usedCpu;
final long reservedCpuFinal = reservedCpu;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemoryFinal, host.getTotalMemory(), Capacity.CAPACITY_TYPE_MEMORY);
capacity.setReservedCapacity(reservedMemoryFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuFinal, host.getCpus().longValue() * host.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU);
capacity.setReservedCapacity(reservedCpuFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
}
});
}
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class ClusterDaoImpl method getAvailableHypervisorInZone.
@Override
public List<HypervisorType> getAvailableHypervisorInZone(Long zoneId) {
SearchCriteria<ClusterVO> sc = AvailHyperSearch.create();
if (zoneId != null) {
sc.setParameters("zoneId", zoneId);
}
List<ClusterVO> clusters = listBy(sc);
List<HypervisorType> hypers = new ArrayList<HypervisorType>(4);
for (ClusterVO cluster : clusters) {
hypers.add(cluster.getHypervisorType());
}
return hypers;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class ClusterDaoImpl method remove.
@Override
public boolean remove(Long id) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
ClusterVO cluster = createForUpdate();
cluster.setName(null);
cluster.setGuid(null);
update(id, cluster);
boolean result = super.remove(id);
txn.commit();
return result;
}
Aggregations