use of com.cloud.utils.db.TransactionCallbackNoReturn in project cloudstack by apache.
the class RulesManagerImpl method reservePorts.
@Override
@DB
public FirewallRuleVO[] reservePorts(final IpAddress ip, final String protocol, final FirewallRule.Purpose purpose, final boolean openFirewall, final Account caller, final int... ports) throws NetworkRuleConflictException {
final FirewallRuleVO[] rules = new FirewallRuleVO[ports.length];
Transaction.execute(new TransactionCallbackWithExceptionNoReturn<NetworkRuleConflictException>() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) throws NetworkRuleConflictException {
for (int i = 0; i < ports.length; i++) {
rules[i] = new FirewallRuleVO(null, ip.getId(), ports[i], protocol, ip.getAssociatedWithNetworkId(), ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), purpose, null, null, null, null);
rules[i] = _firewallDao.persist(rules[i]);
if (openFirewall) {
_firewallMgr.createRuleForAllCidrs(ip.getId(), caller, ports[i], ports[i], protocol, null, null, rules[i].getId(), ip.getAssociatedWithNetworkId());
}
}
}
});
boolean success = false;
try {
for (FirewallRuleVO newRule : rules) {
_firewallMgr.detectRulesConflict(newRule);
}
success = true;
return rules;
} finally {
if (!success) {
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
for (FirewallRuleVO newRule : rules) {
_firewallMgr.removeRule(newRule);
}
}
});
}
}
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cloudstack by apache.
the class SecurityGroupManagerImpl method work.
@DB
public void work() {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Checking the database");
}
final SecurityGroupWorkVO work = _workDao.take(_serverId);
if (work == null) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Security Group work: no work found");
}
return;
}
final Long userVmId = work.getInstanceId();
if (work.getStep() == Step.Done) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Security Group work: found a job in done state, rescheduling for vm: " + userVmId);
}
ArrayList<Long> affectedVms = new ArrayList<Long>();
affectedVms.add(userVmId);
scheduleRulesetUpdateToHosts(affectedVms, false, _timeBetweenCleanups * 1000l);
return;
}
s_logger.debug("Working on " + work);
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
UserVm vm = null;
Long seqnum = null;
boolean locked = false;
try {
vm = _userVMDao.acquireInLockTable(work.getInstanceId());
if (vm == null) {
vm = _userVMDao.findById(work.getInstanceId());
if (vm == null) {
s_logger.info("VM " + work.getInstanceId() + " is removed");
locked = true;
return;
}
s_logger.warn("Unable to acquire lock on vm id=" + userVmId);
return;
}
locked = true;
Long agentId = null;
VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId);
if (log == null) {
s_logger.warn("Cannot find log record for vm id=" + userVmId);
return;
}
seqnum = log.getLogsequence();
if (vm != null && vm.getState() == State.Running) {
Map<PortAndProto, Set<String>> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule);
Map<PortAndProto, Set<String>> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule);
agentId = vm.getHostId();
if (agentId != null) {
// get nic secondary ip address
String privateIp = vm.getPrivateIpAddress();
NicVO nic = _nicDao.findByIp4AddressAndVmId(privateIp, vm.getId());
List<String> nicSecIps = null;
if (nic != null) {
if (nic.getSecondaryIp()) {
//get secondary ips of the vm
long networkId = nic.getNetworkId();
nicSecIps = _nicSecIpDao.getSecondaryIpAddressesForNic(nic.getId());
}
}
SecurityGroupRulesCmd cmd = generateRulesetCmd(vm.getInstanceName(), nic.getIPv6Address(), vm.getPrivateIpAddress(), vm.getPrivateMacAddress(), vm.getId(), generateRulesetSignature(ingressRules, egressRules), seqnum, ingressRules, egressRules, nicSecIps);
Commands cmds = new Commands(cmd);
try {
_agentMgr.send(agentId, cmds, _answerListener);
} catch (AgentUnavailableException e) {
s_logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")");
_workDao.updateStep(work.getInstanceId(), seqnum, Step.Done);
}
}
}
} finally {
if (locked) {
_userVMDao.releaseFromLockTable(userVmId);
_workDao.updateStep(work.getId(), Step.Done);
}
}
}
});
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cloudstack by apache.
the class UserVmManagerImpl method finalizeStart.
@Override
public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) {
UserVmVO vm = _vmDao.findById(profile.getId());
Answer[] answersToCmds = cmds.getAnswers();
if (answersToCmds == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Returning from finalizeStart() since there are no answers to read");
}
return true;
}
Answer startAnswer = cmds.getAnswer(StartAnswer.class);
String returnedIp = null;
String originalIp = null;
if (startAnswer != null) {
StartAnswer startAns = (StartAnswer) startAnswer;
VirtualMachineTO vmTO = startAns.getVirtualMachine();
for (NicTO nicTO : vmTO.getNics()) {
if (nicTO.getType() == TrafficType.Guest) {
returnedIp = nicTO.getIp();
}
}
}
List<NicVO> nics = _nicDao.listByVmId(vm.getId());
NicVO guestNic = null;
NetworkVO guestNetwork = null;
for (NicVO nic : nics) {
NetworkVO network = _networkDao.findById(nic.getNetworkId());
long isDefault = (nic.isDefaultNic()) ? 1 : 0;
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay());
if (network.getTrafficType() == TrafficType.Guest) {
originalIp = nic.getIPv4Address();
guestNic = nic;
guestNetwork = network;
// In vmware, we will be effecting pvlan settings in portgroups in StartCommand.
if (profile.getHypervisorType() != HypervisorType.VMware) {
if (nic.getBroadcastUri().getScheme().equals("pvlan")) {
NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), 0, false, "pvlan-nic");
if (!setupVmForPvlan(true, hostId, nicProfile)) {
return false;
}
}
}
}
}
boolean ipChanged = false;
if (originalIp != null && !originalIp.equalsIgnoreCase(returnedIp)) {
if (returnedIp != null && guestNic != null) {
guestNic.setIPv4Address(returnedIp);
ipChanged = true;
}
}
if (returnedIp != null && !returnedIp.equalsIgnoreCase(originalIp)) {
if (guestNic != null) {
guestNic.setIPv4Address(returnedIp);
ipChanged = true;
}
}
if (ipChanged) {
_dcDao.findById(vm.getDataCenterId());
UserVmVO userVm = _vmDao.findById(profile.getId());
// dc.getDhcpProvider().equalsIgnoreCase(Provider.ExternalDhcpServer.getName())
if (_ntwkSrvcDao.canProviderSupportServiceInNetwork(guestNetwork.getId(), Service.Dhcp, Provider.ExternalDhcpServer)) {
_nicDao.update(guestNic.getId(), guestNic);
userVm.setPrivateIpAddress(guestNic.getIPv4Address());
_vmDao.update(userVm.getId(), userVm);
s_logger.info("Detected that ip changed in the answer, updated nic in the db with new ip " + returnedIp);
}
}
// get system ip and create static nat rule for the vm
try {
_rulesMgr.getSystemIpAndEnableStaticNatForVm(profile.getVirtualMachine(), false);
} catch (Exception ex) {
s_logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex);
return false;
}
Answer answer = cmds.getAnswer("restoreVMSnapshot");
if (answer != null && answer instanceof RestoreVMSnapshotAnswer) {
RestoreVMSnapshotAnswer restoreVMSnapshotAnswer = (RestoreVMSnapshotAnswer) answer;
if (restoreVMSnapshotAnswer == null || !restoreVMSnapshotAnswer.getResult()) {
s_logger.warn("Unable to restore the vm snapshot from image file to the VM: " + restoreVMSnapshotAnswer.getDetails());
}
}
final VirtualMachineProfile vmProfile = profile;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
final UserVmVO vm = _vmDao.findById(vmProfile.getId());
final List<NicVO> nics = _nicDao.listByVmId(vm.getId());
for (NicVO nic : nics) {
Network network = _networkModel.getNetwork(nic.getNetworkId());
if (_networkModel.isSharedNetworkWithoutServices(network.getId())) {
vmIdCountMap.put(nic.getId(), new VmAndCountDetails(nic.getInstanceId(), VmIpFetchTrialMax.value()));
}
}
}
});
return true;
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cloudstack by apache.
the class DedicatedResourceManagerImpl method releaseDedicatedResource.
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE, eventDescription = "Releasing dedicated resource")
public boolean releaseDedicatedResource(final Long zoneId, Long podId, Long clusterId, Long hostId) throws InvalidParameterValueException {
DedicatedResourceVO resource = null;
if (zoneId != null) {
resource = _dedicatedDao.findByZoneId(zoneId);
}
if (podId != null) {
resource = _dedicatedDao.findByPodId(podId);
}
if (clusterId != null) {
resource = _dedicatedDao.findByClusterId(clusterId);
}
if (hostId != null) {
resource = _dedicatedDao.findByHostId(hostId);
}
if (resource == null) {
throw new InvalidParameterValueException("No Dedicated Resource available to release");
} else {
final DedicatedResourceVO resourceFinal = resource;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
Long resourceId = resourceFinal.getId();
if (!_dedicatedDao.remove(resourceId)) {
throw new CloudRuntimeException("Failed to delete Resource " + resourceId);
}
if (zoneId != null) {
// remove the domainId set in zone
DataCenterVO dc = _zoneDao.findById(zoneId);
if (dc != null) {
dc.setDomainId(null);
dc.setDomain(null);
if (!_zoneDao.update(zoneId, dc)) {
throw new CloudRuntimeException("Failed to release dedicated zone, could not clear domainId. Please contact Cloud Support.");
}
}
}
}
});
// find the group associated and check if there are any more
// resources under that group
List<DedicatedResourceVO> resourcesInGroup = _dedicatedDao.listByAffinityGroupId(resource.getAffinityGroupId());
if (resourcesInGroup.isEmpty()) {
// delete the group
_affinityGroupService.deleteAffinityGroup(resource.getAffinityGroupId(), null, null, null, null);
}
}
return true;
}
use of com.cloud.utils.db.TransactionCallbackNoReturn in project cloudstack by apache.
the class CapacityManagerImpl method updateCapacityForHost.
@DB
@Override
public void updateCapacityForHost(final Host host) {
// prepare the service offerings
List<ServiceOfferingVO> offerings = _offeringsDao.listAllIncludingRemoved();
Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>();
for (ServiceOfferingVO offering : offerings) {
offeringsMap.put(offering.getId(), offering);
}
long usedCpu = 0;
long usedMemory = 0;
long reservedMemory = 0;
long reservedCpu = 0;
final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled;
List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId());
}
ClusterVO cluster = _clusterDao.findById(host.getClusterId());
ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
ClusterDetailsVO clusterDetailRam = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
Float clusterCpuOvercommitRatio = Float.parseFloat(clusterDetailCpu.getValue());
Float clusterRamOvercommitRatio = Float.parseFloat(clusterDetailRam.getValue());
Float cpuOvercommitRatio = 1f;
Float ramOvercommitRatio = 1f;
for (VMInstanceVO vm : vms) {
Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
String vmDetailCpu = vmDetails.get("cpuOvercommitRatio");
String vmDetailRam = vmDetails.get("memoryOvercommitRatio");
if (vmDetailCpu != null) {
//if vmDetail_cpu is not null it means it is running in a overcommited cluster.
cpuOvercommitRatio = Float.parseFloat(vmDetailCpu);
ramOvercommitRatio = Float.parseFloat(vmDetailRam);
}
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
if (so.isDynamic()) {
usedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
usedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
} else {
usedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
usedCpu += ((so.getCpu() * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
}
}
List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId());
}
for (VMInstanceVO vm : vmsByLastHostId) {
long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000;
if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio");
UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), "memoryOvercommitRatio");
if (vmDetailCpu != null) {
//if vmDetail_cpu is not null it means it is running in a overcommited cluster.
cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue());
ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue());
}
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (so.isDynamic()) {
reservedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
reservedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
} else {
reservedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio;
reservedCpu += (so.getCpu() * so.getSpeed() / cpuOvercommitRatio) * clusterCpuOvercommitRatio;
}
} else {
// signal if not done already, that the VM has been stopped for skip.counting.hours,
// hence capacity will not be reserved anymore.
UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG);
if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) {
_messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm);
if (vm.getType() == VirtualMachine.Type.User) {
UserVmVO userVM = _userVMDao.findById(vm.getId());
_userVMDao.loadDetails(userVM);
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true");
_userVMDao.saveDetails(userVM);
}
}
}
}
CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU);
CapacityVO memCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY);
if (cpuCap != null && memCap != null) {
if (host.getTotalMemory() != null) {
memCap.setTotalCapacity(host.getTotalMemory());
}
long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue();
if (cpuCap.getTotalCapacity() != hostTotalCpu) {
s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu);
cpuCap.setTotalCapacity(hostTotalCpu);
}
// Set the capacity state as per the host allocation state.
if (capacityState != cpuCap.getCapacityState()) {
s_logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
cpuCap.setCapacityState(capacityState);
}
memCap.setCapacityState(capacityState);
if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) {
s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + cpuCap.getReservedCapacity());
} else {
if (cpuCap.getReservedCapacity() != reservedCpu) {
s_logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + reservedCpu);
cpuCap.setReservedCapacity(reservedCpu);
}
if (cpuCap.getUsedCapacity() != usedCpu) {
s_logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu);
cpuCap.setUsedCapacity(usedCpu);
}
}
if (memCap.getTotalCapacity() != host.getTotalMemory()) {
s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" + host.getTotalMemory());
memCap.setTotalCapacity(host.getTotalMemory());
}
// Set the capacity state as per the host allocation state.
if (capacityState != memCap.getCapacityState()) {
s_logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu);
memCap.setCapacityState(capacityState);
}
if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) {
s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " + memCap.getReservedCapacity());
} else {
if (memCap.getReservedCapacity() != reservedMemory) {
s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + reservedMemory);
memCap.setReservedCapacity(reservedMemory);
}
if (memCap.getUsedCapacity() != usedMemory) {
/*
* Didn't calibrate for used memory, because VMs can be in
* state(starting/migrating) that I don't know on which host
* they are allocated
*/
s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory);
memCap.setUsedCapacity(usedMemory);
}
}
try {
_capacityDao.update(cpuCap.getId(), cpuCap);
_capacityDao.update(memCap.getId(), memCap);
} catch (Exception e) {
s_logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e);
}
} else {
final long usedMemoryFinal = usedMemory;
final long reservedMemoryFinal = reservedMemory;
final long usedCpuFinal = usedCpu;
final long reservedCpuFinal = reservedCpu;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemoryFinal, host.getTotalMemory(), Capacity.CAPACITY_TYPE_MEMORY);
capacity.setReservedCapacity(reservedMemoryFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuFinal, host.getCpus().longValue() * host.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU);
capacity.setReservedCapacity(reservedCpuFinal);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
}
});
}
}
Aggregations