use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateReConfigureVm.
private VMInstanceVO orchestrateReConfigureVm(final String vmUuid, final ServiceOffering oldServiceOffering, final boolean reconfiguringOnExistingHost) throws ResourceUnavailableException, ConcurrentOperationException {
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final long newServiceofferingId = vm.getServiceOfferingId();
final ServiceOffering newServiceOffering = _offeringDao.findById(vm.getId(), newServiceofferingId);
final HostVO hostVo = _hostDao.findById(vm.getHostId());
final Float memoryOvercommitRatio = CapacityManager.MemOverprovisioningFactor.valueIn(hostVo.getClusterId());
final Float cpuOvercommitRatio = CapacityManager.CpuOverprovisioningFactor.valueIn(hostVo.getClusterId());
final long minMemory = (long) (newServiceOffering.getRamSize() / memoryOvercommitRatio);
final ScaleVmCommand reconfigureCmd = new ScaleVmCommand(vm.getInstanceName(), newServiceOffering.getCpu(), (int) (newServiceOffering.getSpeed() / cpuOvercommitRatio), newServiceOffering.getSpeed(), minMemory * 1024L * 1024L, newServiceOffering.getRamSize() * 1024L * 1024L, newServiceOffering.getLimitCpuUse());
final Long dstHostId = vm.getHostId();
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
Map<String, String> details = null;
details = hvGuru.getClusterSettings(vm.getId());
reconfigureCmd.getVirtualMachine().setDetails(details);
}
final ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Running, vm.getType(), vm.getId());
work.setStep(Step.Prepare);
work.setResourceType(ItWorkVO.ResourceType.Host);
work.setResourceId(vm.getHostId());
_workDao.persist(work);
boolean success = false;
try {
if (reconfiguringOnExistingHost) {
vm.setServiceOfferingId(oldServiceOffering.getId());
//release the old capacity
_capacityMgr.releaseVmCapacity(vm, false, false, vm.getHostId());
vm.setServiceOfferingId(newServiceofferingId);
// lock the new capacity
_capacityMgr.allocateVmCapacity(vm, false);
}
final Answer reconfigureAnswer = _agentMgr.send(vm.getHostId(), reconfigureCmd);
if (reconfigureAnswer == null || !reconfigureAnswer.getResult()) {
s_logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails()));
throw new CloudRuntimeException("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails()));
}
success = true;
} catch (final OperationTimedoutException e) {
throw new AgentUnavailableException("Operation timed out on reconfiguring " + vm, dstHostId);
} catch (final AgentUnavailableException e) {
throw e;
} finally {
if (!success) {
// release the new capacity
_capacityMgr.releaseVmCapacity(vm, false, false, vm.getHostId());
vm.setServiceOfferingId(oldServiceOffering.getId());
// allocate the old capacity
_capacityMgr.allocateVmCapacity(vm, false);
}
}
return vm;
}
use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class VolumeOrchestrator method createVolumeOnPrimaryStorage.
@Override
public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException {
VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId());
DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId());
Pod pod = _entityMgr.findById(Pod.class, storagePool.getPodId());
ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId());
DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
Long clusterId = storagePool.getClusterId();
VolumeInfo vol = null;
if (volume.getState() == Volume.State.Allocated) {
vol = createVolume(volume, vm, rootDiskTmplt, dcVO, pod, clusterId, svo, diskVO, new ArrayList<StoragePool>(), volume.getSize(), rootDiskHyperType);
} else if (volume.getState() == Volume.State.Uploaded) {
vol = copyVolume(storagePool, volume, vm, rootDiskTmplt, dcVO, pod, diskVO, svo, rootDiskHyperType);
if (vol != null) {
// Moving of Volume is successful, decrement the volume resource count from secondary for an account and increment it into primary storage under same account.
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, volume.getSize());
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, volume.getSize());
}
}
if (vol == null) {
throw new CloudRuntimeException("Volume shouldn't be null " + volume.getId());
}
VolumeVO volVO = _volsDao.findById(vol.getId());
if (volVO.getFormat() == null) {
volVO.setFormat(getSupportedImageFormatForCluster(rootDiskHyperType));
}
_volsDao.update(volVO.getId(), volVO);
return volFactory.getVolume(volVO.getId());
}
use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class CapacityManagerImpl method updateCapacityForHost.
@DB
@Override
public void updateCapacityForHost(final Host host) {
// prepare the service offerings
List<ServiceOfferingVO> offerings = _offeringsDao.listAllIncludingRemoved();
Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>();
for (ServiceOfferingVO offering : offerings) {
offeringsMap.put(offering.getId(), offering);
}
long usedCpu = 0;
long usedMemory = 0;
long reservedMemory = 0;
long reservedCpu = 0;
final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled;
List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId());
}
ClusterVO cluster = _clusterDao.findById(host.getClusterId());
ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
ClusterDetailsVO clusterDetailRam = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
Float clusterCpuOvercommitRatio = Float.parseFloat(clusterDetailCpu.getValue());
Float clusterRamOvercommitRatio = Float.parseFloat(clusterDetailRam.getValue());
Float cpuOvercommitRatio = 1f;
Float ramOvercommitRatio = 1f;
for (VMInstanceVO vm : vms) {
Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
String vmDetailCpu = vmDetails.get("cpuOvercommitRatio");
String vmDetailRam = vmDetails.get("memoryOvercommitRatio");
if (vmDetailCpu != null) {
//if vmDetail_cpu is not null it means it is running in a overcommited cluster.
cpuOvercommitRatio = Float.parseFloat(vmDetailCpu);
ramOvercommitRatio = Float.parseFloat(vmDetailRam);
}
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
if (so.isDynamic()) {
usedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
usedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
} else {
usedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
usedCpu += ((so.getCpu() * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
}
}
List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId());
}
for (VMInstanceVO vm : vmsByLastHostId) {
long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000;
if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio");
UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), "memoryOvercommitRatio");
if (vmDetailCpu != null) {
//if vmDetail_cpu is not null it means it is running in a overcommited cluster.
cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue());
ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue());
}
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (so.isDynamic()) {
reservedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
reservedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
} else {
reservedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
reservedCpu += (so.getCpu() * so.getSpeed() / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
}
} else {
// signal if not done already, that the VM has been stopped for skip.counting.hours,
// hence capacity will not be reserved anymore.
UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG);
if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) {
_messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm);
if (vm.getType() == VirtualMachine.Type.User) {
UserVmVO userVM = _userVMDao.findById(vm.getId());
_userVMDao.loadDetails(userVM);
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true");
_userVMDao.saveDetails(userVM);
}
}
}
}
CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU);
CapacityVO memCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY);
if (cpuCap != null && memCap != null) {
if (host.getTotalMemory() != null) {
memCap.setTotalCapacity(host.getTotalMemory());
}
long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue();
if (cpuCap.getTotalCapacity() != hostTotalCpu) {
s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu);
cpuCap.setTotalCapacity(hostTotalCpu);
}
// Set the capacity state as per the host allocation state.
if (capacityState != cpuCap.getCapacityState()) {
s_logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
cpuCap.setCapacityState(capacityState);
}
memCap.setCapacityState(capacityState);
if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) {
s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + cpuCap.getReservedCapacity());
} else {
if (cpuCap.getReservedCapacity() != reservedCpu) {
s_logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + reservedCpu);
cpuCap.setReservedCapacity(reservedCpu);
}
if (cpuCap.getUsedCapacity() != usedCpu) {
s_logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu);
cpuCap.setUsedCapacity(usedCpu);
}
}
if (memCap.getTotalCapacity() != host.getTotalMemory()) {
s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" + host.getTotalMemory());
memCap.setTotalCapacity(host.getTotalMemory());
}
// Set the capacity state as per the host allocation state.
if (capacityState != memCap.getCapacityState()) {
s_logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
memCap.setCapacityState(capacityState);
}
if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) {
s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " + memCap.getReservedCapacity());
} else {
if (memCap.getReservedCapacity() != reservedMemory) {
s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + reservedMemory);
memCap.setReservedCapacity(reservedMemory);
}
if (memCap.getUsedCapacity() != usedMemory) {
/*
* Didn't calibrate for used memory, because VMs can be in
* state(starting/migrating) that I don't know on which host
* they are allocated
*/
s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory);
memCap.setUsedCapacity(usedMemory);
}
}
try {
_capacityDao.update(cpuCap.getId(), cpuCap);
_capacityDao.update(memCap.getId(), memCap);
} catch (Exception e) {
s_logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e);
}
} else {
final long usedMemoryFinal = usedMemory;
final long reservedMemoryFinal = reservedMemory;
final long usedCpuFinal = usedCpu;
final long reservedCpuFinal = reservedCpu;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemoryFinal, host.getTotalMemory(), Capacity.CAPACITY_TYPE_MEMORY);
capacity.setReservedCapacity(reservedMemoryFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuFinal, host.getCpus().longValue() * host.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU);
capacity.setReservedCapacity(reservedCpuFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
}
});
}
}
use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class AutoScaleManagerImpl method createAutoScaleVmProfile.
@Override
@ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEVMPROFILE_CREATE, eventDescription = "creating autoscale vm profile", create = true)
public AutoScaleVmProfile createAutoScaleVmProfile(CreateAutoScaleVmProfileCmd cmd) {
Account owner = _accountDao.findById(cmd.getAccountId());
Account caller = CallContext.current().getCallingAccount();
_accountMgr.checkAccess(caller, null, true, owner);
long zoneId = cmd.getZoneId();
long serviceOfferingId = cmd.getServiceOfferingId();
long autoscaleUserId = cmd.getAutoscaleUserId();
DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId);
if (zone == null) {
throw new InvalidParameterValueException("Unable to find zone by id");
}
ServiceOffering serviceOffering = _entityMgr.findById(ServiceOffering.class, serviceOfferingId);
if (serviceOffering == null) {
throw new InvalidParameterValueException("Unable to find service offering by id");
}
// validations
HashMap<String, String> deployParams = cmd.getDeployParamMap();
if (deployParams.containsKey("networks") && deployParams.get("networks").length() > 0) {
throw new InvalidParameterValueException("'networks' is not a valid parameter, network for an AutoScaled VM is chosen automatically. An autoscaled VM is deployed in the loadbalancer's network");
}
/*
* Just for making sure the values are right in other deploy params.
* For ex. if projectId is given as a string instead of an long value, this
* will be throwing an error.
*/
dispatchChainFactory.getStandardDispatchChain().dispatch(new DispatchTask(ComponentContext.inject(DeployVMCmd.class), deployParams));
AutoScaleVmProfileVO profileVO = new AutoScaleVmProfileVO(cmd.getZoneId(), cmd.getDomainId(), cmd.getAccountId(), cmd.getServiceOfferingId(), cmd.getTemplateId(), cmd.getOtherDeployParams(), cmd.getCounterParamList(), cmd.getDestroyVmGraceperiod(), autoscaleUserId);
if (cmd.getDisplay() != null) {
profileVO.setDisplay(cmd.getDisplay());
}
profileVO = checkValidityAndPersist(profileVO);
s_logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId());
return profileVO;
}
use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class UserVmManagerTest method testScaleVMF3.
// Test scaleVm for Stopped vm.
//@Test(expected=InvalidParameterValueException.class)
public void testScaleVMF3() throws Exception {
ScaleVMCmd cmd = new ScaleVMCmd();
Class<?> _class = cmd.getClass();
Field idField = _class.getDeclaredField("id");
idField.setAccessible(true);
idField.set(cmd, 1L);
Field serviceOfferingIdField = _class.getDeclaredField("serviceOfferingId");
serviceOfferingIdField.setAccessible(true);
serviceOfferingIdField.set(cmd, 1L);
when(_vmInstanceDao.findById(anyLong())).thenReturn(_vmInstance);
doReturn(Hypervisor.HypervisorType.XenServer).when(_vmInstance).getHypervisorType();
ServiceOffering so1 = getSvcoffering(512);
ServiceOffering so2 = getSvcoffering(256);
when(_entityMgr.findById(eq(ServiceOffering.class), anyLong())).thenReturn(so2);
when(_entityMgr.findById(ServiceOffering.class, 1L)).thenReturn(so1);
doReturn(VirtualMachine.State.Stopped).when(_vmInstance).getState();
when(_vmDao.findById(anyLong())).thenReturn(null);
doReturn(true).when(_itMgr).upgradeVmDb(anyLong(), anyLong());
//when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
Account account = new AccountVO("testaccount", 1L, "networkdomain", (short) 0, UUID.randomUUID().toString());
UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
try {
_userVmMgr.upgradeVirtualMachine(cmd);
} finally {
CallContext.unregister();
}
}
Aggregations