use of com.cloud.utils.db.TransactionCallbackNoReturn in project cosmic by MissionCriticalCloud.
the class VolumeOrchestrator method cleanupVolumes.
@Override
@DB
public void cleanupVolumes(final long vmId) throws ConcurrentOperationException {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cleaning storage for vm: " + vmId);
}
final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
final List<VolumeVO> toBeExpunged = new ArrayList<>();
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
for (final VolumeVO vol : volumesForVm) {
if (vol.getVolumeType().equals(Type.ROOT)) {
// Destroy volume if not already destroyed
final boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged || vol.getState() == Volume.State.Expunging);
if (!volumeAlreadyDestroyed) {
volService.destroyVolume(vol.getId());
} else {
s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString());
}
toBeExpunged.add(vol);
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detaching " + vol);
}
_volsDao.detachVolume(vol.getId());
}
}
}
});
AsyncCallFuture<VolumeService.VolumeApiResult> future = null;
for (final VolumeVO expunge : toBeExpunged) {
future = volService.expungeVolumeAsync(volFactory.getVolume(expunge.getId()));
try {
future.get();
} catch (final InterruptedException e) {
s_logger.debug("failed expunge volume" + expunge.getId(), e);
} catch (final ExecutionException e) {
s_logger.debug("failed expunge volume" + expunge.getId(), e);
}
}
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cosmic by MissionCriticalCloud.
the class NetworkOrchestrator method destroyNetwork.
@Override
@DB
public boolean destroyNetwork(final long networkId, final ReservationContext context, final boolean forced) {
final Account callerAccount = context.getAccount();
NetworkVO network = _networksDao.findById(networkId);
if (network == null) {
s_logger.debug("Unable to find network with id: " + networkId);
return false;
}
// Make sure that there are no user vms in the network that are not Expunged/Error
final List<UserVmVO> userVms = _userVmDao.listByNetworkIdAndStates(networkId);
for (final UserVmVO vm : userVms) {
if (!(vm.getState() == VirtualMachine.State.Expunging && vm.getRemoved() != null)) {
s_logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state");
return false;
}
}
// Don't allow to delete network via api call when it has vms assigned to it
final int nicCount = getActiveNicsInNetwork(networkId);
if (nicCount > 0) {
s_logger.debug("The network id=" + networkId + " has active Nics, but shouldn't.");
// at this point we have already determined that there are no active user vms in network
// if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks
_networksDao.changeActiveNicsBy(networkId, -1 * nicCount);
}
// In Basic zone, make sure that there are no non-removed console proxies and SSVMs using the network
final Zone zone = _zoneRepository.findOne(network.getDataCenterId());
if (zone.getNetworkType() == com.cloud.model.enumeration.NetworkType.Basic) {
final List<VMInstanceVO> systemVms = _vmDao.listNonRemovedVmsByTypeAndNetwork(network.getId(), Type.ConsoleProxy, Type.SecondaryStorageVm);
if (systemVms != null && !systemVms.isEmpty()) {
s_logger.warn("Can't delete the network, not all consoleProxy/secondaryStorage vms are expunged");
return false;
}
}
// Shutdown network first
shutdownNetwork(networkId, context, false);
// get updated state for the network
network = _networksDao.findById(networkId);
if (network.getState() != Network.State.Allocated && network.getState() != Network.State.Setup && !forced) {
s_logger.debug("Network is not not in the correct state to be destroyed: " + network.getState());
return false;
}
boolean success = true;
if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) {
s_logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources");
return false;
}
// get providers to destroy
final List<Provider> providersToDestroy = getNetworkProviders(network.getId());
for (final NetworkElement element : networkElements) {
if (providersToDestroy.contains(element.getProvider())) {
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Sending destroy to " + element);
}
if (!element.destroy(network, context)) {
success = false;
s_logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName());
}
} catch (final ResourceUnavailableException e) {
s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e);
success = false;
} catch (final ConcurrentOperationException e) {
s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e);
success = false;
} catch (final Exception e) {
s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e);
success = false;
}
}
}
if (success) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now.");
}
final NetworkVO networkFinal = network;
try {
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName());
// Deleting sync networks
final List<NetworkVO> syncNetworks = _networksDao.listSyncNetworksByRelatedNetwork(networkId);
syncNetworks.forEach(syncNetwork -> removeAndShutdownSyncNetwork(syncNetwork.getId()));
guru.trash(networkFinal, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId()));
if (!deleteVlansInNetwork(networkFinal.getId(), context.getCaller().getId(), callerAccount)) {
s_logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges");
throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges");
} else {
// commit transaction only when ips and vlans for the network are released successfully
try {
stateTransitTo(networkFinal, Event.DestroyNetwork);
} catch (final NoTransitionException e) {
s_logger.debug(e.getMessage());
}
if (_networksDao.remove(networkFinal.getId())) {
final NetworkDomainVO networkDomain = _networkDomainDao.getDomainNetworkMapByNetworkId(networkFinal.getId());
if (networkDomain != null) {
_networkDomainDao.remove(networkDomain.getId());
}
final NetworkAccountVO networkAccount = _networkAccountDao.getAccountNetworkMapByNetworkId(networkFinal.getId());
if (networkAccount != null) {
_networkAccountDao.remove(networkAccount.getId());
}
}
final NetworkOffering ntwkOff = _entityMgr.findById(NetworkOffering.class, networkFinal.getNetworkOfferingId());
final boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, networkFinal.getAclType());
if (updateResourceCount) {
_resourceLimitMgr.decrementResourceCount(networkFinal.getAccountId(), ResourceType.network, networkFinal.getDisplayNetwork());
}
}
}
});
if (_networksDao.findById(network.getId()) == null) {
// remove its related ACL permission
final Pair<Class<?>, Long> networkMsg = new Pair<>(Network.class, networkFinal.getId());
_messageBus.publish(_name, EntityManager.MESSAGE_REMOVE_ENTITY_EVENT, PublishScope.LOCAL, networkMsg);
}
return true;
} catch (final CloudRuntimeException e) {
s_logger.error("Failed to delete network", e);
return false;
}
}
return success;
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cosmic by MissionCriticalCloud.
the class AsyncJobManagerImpl method updateAsyncJobAttachment.
@Override
@DB
public void updateAsyncJobAttachment(final long jobId, final String instanceType, final Long instanceId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId);
}
final AsyncJobVO job = _jobDao.findById(jobId);
publishOnEventBus(job, "update");
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
final AsyncJobVO job = _jobDao.createForUpdate();
job.setInstanceType(instanceType);
job.setInstanceId(instanceId);
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
}
});
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cosmic by MissionCriticalCloud.
the class CapacityManagerImpl method updateCapacityForHost.
@DB
@Override
public void updateCapacityForHost(final Host host) {
// prepare the service offerings
final List<ServiceOfferingVO> offerings = _offeringsDao.listAllIncludingRemoved();
final Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<>();
for (final ServiceOfferingVO offering : offerings) {
offeringsMap.put(offering.getId(), offering);
}
long usedCpu = 0;
long usedMemory = 0;
long reservedMemory = 0;
long reservedCpu = 0;
final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled;
final List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId());
}
for (final VMInstanceVO vm : vms) {
final ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
usedMemory += (so.getRamSize() * 1024L * 1024L);
usedCpu += so.getCpu();
}
final List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId());
}
for (final VMInstanceVO vm : vmsByLastHostId) {
final long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000;
if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
final ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
reservedMemory += (so.getRamSize() * 1024L * 1024L);
reservedCpu += so.getCpu();
} else {
// signal if not done already, that the VM has been stopped for skip.counting.hours,
// hence capacity will not be reserved anymore.
final UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG);
if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) {
_messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm);
if (vm.getType() == VirtualMachine.Type.User) {
final UserVmVO userVM = _userVMDao.findById(vm.getId());
_userVMDao.loadDetails(userVM);
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true");
_userVMDao.saveDetails(userVM);
}
}
}
}
final CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU);
final CapacityVO memCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY);
if (cpuCap != null && memCap != null) {
if (host.getTotalMemory() != null) {
memCap.setTotalCapacity(host.getTotalMemory());
}
final long hostTotalCpu = host.getCpus().longValue();
if (cpuCap.getTotalCapacity() != hostTotalCpu) {
s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu);
cpuCap.setTotalCapacity(hostTotalCpu);
}
// Set the capacity state as per the host allocation state.
if (capacityState != cpuCap.getCapacityState()) {
s_logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
cpuCap.setCapacityState(capacityState);
}
memCap.setCapacityState(capacityState);
if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) {
s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + cpuCap.getReservedCapacity());
} else {
if (cpuCap.getReservedCapacity() != reservedCpu) {
s_logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + reservedCpu);
cpuCap.setReservedCapacity(reservedCpu);
}
if (cpuCap.getUsedCapacity() != usedCpu) {
s_logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu);
cpuCap.setUsedCapacity(usedCpu);
}
}
if (memCap.getTotalCapacity() != host.getTotalMemory()) {
s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" + host.getTotalMemory());
memCap.setTotalCapacity(host.getTotalMemory());
}
// Set the capacity state as per the host allocation state.
if (capacityState != memCap.getCapacityState()) {
s_logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
memCap.setCapacityState(capacityState);
}
if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) {
s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " + memCap.getReservedCapacity());
} else {
if (memCap.getReservedCapacity() != reservedMemory) {
s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + reservedMemory);
memCap.setReservedCapacity(reservedMemory);
}
if (memCap.getUsedCapacity() != usedMemory) {
/*
* Didn't calibrate for used memory, because VMs can be in
* state(starting/migrating) that I don't know on which host
* they are allocated
*/
s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory);
memCap.setUsedCapacity(usedMemory);
}
}
try {
_capacityDao.update(cpuCap.getId(), cpuCap);
_capacityDao.update(memCap.getId(), memCap);
} catch (final Exception e) {
s_logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e);
}
} else {
final long usedMemoryFinal = usedMemory;
final long reservedMemoryFinal = reservedMemory;
final long usedCpuFinal = usedCpu;
final long reservedCpuFinal = reservedCpu;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemoryFinal, host.getTotalMemory(), Capacity.CAPACITY_TYPE_MEMORY);
capacity.setReservedCapacity(reservedMemoryFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuFinal, host.getCpus().longValue(), Capacity.CAPACITY_TYPE_CPU);
capacity.setReservedCapacity(reservedCpuFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
}
});
}
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cosmic by MissionCriticalCloud.
the class ConfigurationManagerImpl method editPod.
@Override
@DB
public Pod editPod(final long id, String name, String startIp, String endIp, String gateway, String netmask, String allocationStateStr) {
// verify parameters
final HostPodVO pod = _podDao.findById(id);
if (pod == null) {
throw new InvalidParameterValueException("Unable to find pod by id " + id);
}
final String[] existingPodIpRange = pod.getDescription().split("-");
String[] leftRangeToAdd = null;
String[] rightRangeToAdd = null;
boolean allowToDownsize = false;
// pod has allocated private IP addresses
if (podHasAllocatedPrivateIPs(id)) {
if (netmask != null) {
final long newCidr = NetUtils.getCidrSize(netmask);
final long oldCidr = pod.getCidrSize();
if (newCidr > oldCidr) {
throw new CloudRuntimeException("The specified pod has allocated private IP addresses, so its IP address range can be extended only");
}
}
if (startIp != null && !startIp.equals(existingPodIpRange[0])) {
if (NetUtils.ipRangesOverlap(startIp, null, existingPodIpRange[0], existingPodIpRange[1])) {
throw new CloudRuntimeException("The specified pod has allocated private IP addresses, so its IP address range can be extended only");
} else {
leftRangeToAdd = new String[2];
final long endIpForUpdate = NetUtils.ip2Long(existingPodIpRange[0]) - 1;
leftRangeToAdd[0] = startIp;
leftRangeToAdd[1] = NetUtils.long2Ip(endIpForUpdate);
}
}
if (endIp != null && !endIp.equals(existingPodIpRange[1])) {
if (NetUtils.ipRangesOverlap(endIp, endIp, existingPodIpRange[0], existingPodIpRange[1])) {
throw new CloudRuntimeException("The specified pod has allocated private IP addresses, so its IP address range can be extended only");
} else {
rightRangeToAdd = new String[2];
final long startIpForUpdate = NetUtils.ip2Long(existingPodIpRange[1]) + 1;
rightRangeToAdd[0] = NetUtils.long2Ip(startIpForUpdate);
rightRangeToAdd[1] = endIp;
}
}
} else {
allowToDownsize = true;
}
if (gateway == null) {
gateway = pod.getGateway();
}
if (netmask == null) {
netmask = NetUtils.getCidrNetmask(pod.getCidrSize());
}
final String oldPodName = pod.getName();
if (name == null) {
name = oldPodName;
}
if (gateway == null) {
gateway = pod.getGateway();
}
if (startIp == null) {
startIp = existingPodIpRange[0];
}
if (endIp == null) {
endIp = existingPodIpRange[1];
}
if (allocationStateStr == null) {
allocationStateStr = pod.getAllocationState().toString();
}
// Verify pod's attributes
final String cidr = NetUtils.ipAndNetMaskToCidr(gateway, netmask);
final boolean checkForDuplicates = !oldPodName.equals(name);
checkPodAttributes(id, name, pod.getDataCenterId(), gateway, cidr, startIp, endIp, allocationStateStr, checkForDuplicates, false);
try {
final String[] existingPodIpRangeFinal = existingPodIpRange;
final String[] leftRangeToAddFinal = leftRangeToAdd;
final String[] rightRangeToAddFinal = rightRangeToAdd;
final boolean allowToDownsizeFinal = allowToDownsize;
final String allocationStateStrFinal = allocationStateStr;
final String startIpFinal = startIp;
final String endIpFinal = endIp;
final String nameFinal = name;
final String gatewayFinal = gateway;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
final long zoneId = pod.getDataCenterId();
String startIp = startIpFinal;
String endIp = endIpFinal;
if (!allowToDownsizeFinal) {
if (leftRangeToAddFinal != null) {
_zoneDao.addPrivateIpAddress(zoneId, pod.getId(), leftRangeToAddFinal[0], leftRangeToAddFinal[1]);
}
if (rightRangeToAddFinal != null) {
_zoneDao.addPrivateIpAddress(zoneId, pod.getId(), rightRangeToAddFinal[0], rightRangeToAddFinal[1]);
}
} else {
// delete the old range
_zoneDao.deletePrivateIpAddressByPod(pod.getId());
// add the new one
if (startIp == null) {
startIp = existingPodIpRangeFinal[0];
}
if (endIp == null) {
endIp = existingPodIpRangeFinal[1];
}
_zoneDao.addPrivateIpAddress(zoneId, pod.getId(), startIp, endIp);
}
pod.setName(nameFinal);
pod.setDataCenterId(zoneId);
pod.setGateway(gatewayFinal);
pod.setCidrAddress(getCidrAddress(cidr));
pod.setCidrSize(getCidrSize(cidr));
final String ipRange = startIp + "-" + endIp;
pod.setDescription(ipRange);
final AllocationState allocationState;
if (allocationStateStrFinal != null && !allocationStateStrFinal.isEmpty()) {
allocationState = AllocationState.valueOf(allocationStateStrFinal);
pod.setAllocationState(allocationState);
}
_podDao.update(id, pod);
}
});
} catch (final Exception e) {
s_logger.error("Unable to edit pod due to " + e.getMessage(), e);
throw new CloudRuntimeException("Failed to edit pod. Please contact Cloud Support.");
}
return pod;
}
Aggregations