use of com.cloud.exception.InsufficientCapacityException in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateStart.
@Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
final User caller = cctxt.getCallingUser();
VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vmGuru, vm, caller, account);
if (start == null) {
return;
}
vm = start.first();
final ReservationContext ctx = start.second();
ItWorkVO work = start.third();
VMInstanceVO startedVm = null;
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
}
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId());
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx);
}
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
boolean canRetry = true;
ExcludeList avoids = null;
try {
final Journal journal = start.second().getJournal();
if (planToDeploy != null) {
avoids = planToDeploy.getAvoids();
}
if (avoids == null) {
avoids = new ExcludeList();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
boolean planChangedByVolume = false;
boolean reuseVolume = true;
final DataCenterDeployment originalPlan = plan;
int retry = StartRetry.value();
while (retry-- != 0) {
if (reuseVolume) {
// edit plan if this vm's ROOT volume is in READY state already
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO vol : vols) {
// make sure if the templateId is unchanged. If it is changed,
// let planner
// reassign pool for the volume even if it ready.
final Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != template.getId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
}
continue;
}
final StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
}
final long rootVolDcId = pool.getDataCenterId();
final Long rootVolPodId = pool.getPodId();
final Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (rootVolClusterId.longValue() != clusterIdSpecified.longValue()) {
// planner
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
}
throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified);
}
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
}
planChangedByVolume = true;
}
}
}
}
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
DeployDestination dest = null;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest == null) {
if (planChangedByVolume) {
plan = originalPlan;
planChangedByVolume = false;
//do not enter volume reuse for next retry, since we want to look for resources outside the volume's cluster
reuseVolume = false;
continue;
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
if (dest != null) {
avoids.addHost(dest.getHost().getId());
journal.record("Deployment found ", vmProfile, dest);
}
long destHostId = dest.getHost().getId();
vm.setPodIdToDeployIn(dest.getPod().getId());
final Long cluster_id = dest.getCluster().getId();
final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
//storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed.
if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
} else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) {
_uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue(), true);
_uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue(), true);
}
vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine " + vm.getUuid() + " oldstate: " + vm.getState() + "Event :" + Event.OperationRetry);
}
} catch (final NoTransitionException e1) {
throw new ConcurrentOperationException(e1.getMessage());
}
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
}
_networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx);
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
volumeMgr.prepare(vmProfile, dest);
}
//since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
if (!reuseVolume) {
reuseVolume = true;
}
Commands cmds = null;
vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
handlePath(vmTO.getDisks(), vm.getHypervisorType());
cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));
vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);
work = _workDao.findById(work.getId());
if (work == null || work.getStep() != Step.Prepare) {
throw new ConcurrentOperationException("Work steps have been changed: " + work);
}
_workDao.updateStep(work, Step.Starting);
_agentMgr.send(destHostId, cmds);
_workDao.updateStep(work, Step.Started);
startAnswer = cmds.getAnswer(StartAnswer.class);
if (startAnswer != null && startAnswer.getResult()) {
handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
final String host_guid = startAnswer.getHost_guid();
if (host_guid != null) {
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
if (finalHost == null) {
throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: " + startAnswer);
}
destHostId = finalHost.getId();
}
if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
s_logger.error("Unable to transition to a new state. VM uuid: " + vm.getUuid() + "VM oldstate:" + vm.getState() + "Event:" + Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM" + vm.getUuid());
}
// Update GPU device capacity
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
if (gpuDevice != null) {
_resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
}
// information isn't set,
if (_uservmDetailsDao.findDetail(vm.getId(), "deployvm") != null) {
_uservmDetailsDao.removeDetail(vm.getId(), "deployvm");
}
startedVm = vm;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Start completed for VM " + vm);
}
return;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.info("The guru did not like the answers so stopping " + vm);
}
StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
stopCmd.setControlIp(getControlNicIpForVM(vm));
final StopCommand cmd = stopCmd;
final Answer answer = _agentMgr.easySend(destHostId, cmd);
if (answer != null && answer instanceof StopAnswer) {
final StopAnswer stopAns = (StopAnswer) answer;
if (vm.getType() == VirtualMachine.Type.User) {
final String platform = stopAns.getPlatform();
if (platform != null) {
final Map<String, String> vmmetadata = new HashMap<String, String>();
vmmetadata.put(vm.getInstanceName(), platform);
syncVMMetaData(vmmetadata);
}
}
}
if (answer == null || !answer.getResult()) {
s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
_haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
}
throw new ExecutionException("Unable to start VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
} catch (OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost() + " failed to start VM: " + vm.getUuid());
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.info("Unable to contact resource.", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
throw e;
}
}
} catch (final InsufficientCapacityException e) {
s_logger.info("Insufficient capacity ", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
}
}
} catch (final ExecutionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} catch (final NoTransitionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} finally {
if (startedVm == null && canRetry) {
final Step prevStep = work.getStep();
_workDao.updateStep(work, Step.Release);
// If previous step was started/ing && we got a valid answer
if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) {
//TODO check the response of cleanup and record it in DB for retry
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false);
} else {
//if step is not starting/started, send cleanup command with force=true
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true);
}
}
}
}
} finally {
if (startedVm == null) {
if (canRetry) {
try {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
} catch (final NoTransitionException e) {
throw new ConcurrentOperationException(e.getMessage());
}
}
}
if (planToDeploy != null) {
planToDeploy.setAvoids(avoids);
}
}
if (startedVm == null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
use of com.cloud.exception.InsufficientCapacityException in project cloudstack by apache.
the class VMEntityManagerImpl method deployVirtualMachine.
@Override
public void deployVirtualMachine(String reservationId, VMEntityVO vmEntityVO, String caller, Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException {
//grab the VM Id and destination using the reservationId.
VMInstanceVO vm = _vmDao.findByUuid(vmEntityVO.getUuid());
VMReservationVO vmReservation = _reservationDao.findByReservationId(reservationId);
if (vmReservation != null) {
DataCenterDeployment reservedPlan = new DataCenterDeployment(vm.getDataCenterId(), vmReservation.getPodId(), vmReservation.getClusterId(), vmReservation.getHostId(), null, null);
try {
_itMgr.start(vm.getUuid(), params, reservedPlan, _planningMgr.getDeploymentPlannerByName(vmReservation.getDeploymentPlanner()));
} catch (Exception ex) {
// Retry the deployment without using the reservation plan
DataCenterDeployment plan = new DataCenterDeployment(0, null, null, null, null, null);
if (reservedPlan.getAvoids() != null) {
plan.setAvoids(reservedPlan.getAvoids());
}
_itMgr.start(vm.getUuid(), params, plan, null);
}
} else {
// no reservation found. Let VirtualMachineManager retry
_itMgr.start(vm.getUuid(), params, null, null);
}
}
use of com.cloud.exception.InsufficientCapacityException in project cloudstack by apache.
the class AsyncJobExecutionContext method disjoinJob.
//
// check failure exception before we disjoin the worker job, work job usually fails with exception
// this will help propogate exception between jobs
// TODO : it is ugly and this will become unnecessary after we switch to full-async mode
//
public void disjoinJob(long joinedJobId) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
assert (_job != null);
AsyncJobJoinMapVO record = s_joinMapDao.getJoinRecord(_job.getId(), joinedJobId);
s_jobMgr.disjoinJob(_job.getId(), joinedJobId);
if (record.getJoinStatus() == JobInfo.Status.FAILED) {
if (record.getJoinResult() != null) {
Object exception = JobSerializerHelper.fromObjectSerializedString(record.getJoinResult());
if (exception != null && exception instanceof Exception) {
if (exception instanceof InsufficientCapacityException) {
s_logger.error("Job " + joinedJobId + " failed with InsufficientCapacityException");
throw (InsufficientCapacityException) exception;
} else if (exception instanceof ConcurrentOperationException) {
s_logger.error("Job " + joinedJobId + " failed with ConcurrentOperationException");
throw (ConcurrentOperationException) exception;
} else if (exception instanceof ResourceUnavailableException) {
s_logger.error("Job " + joinedJobId + " failed with ResourceUnavailableException");
throw (ResourceUnavailableException) exception;
} else {
s_logger.error("Job " + joinedJobId + " failed with exception");
throw new RuntimeException((Exception) exception);
}
}
} else {
s_logger.error("Job " + joinedJobId + " failed without providing an error object");
throw new RuntimeException("Job " + joinedJobId + " failed without providing an error object");
}
}
}
use of com.cloud.exception.InsufficientCapacityException in project cloudstack by apache.
the class CiscoVnmcElement method implement.
@Override
public boolean implement(final Network network, final NetworkOffering offering, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException {
final DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId());
if (zone.getNetworkType() == NetworkType.Basic) {
s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic);
return false;
}
if (!canHandle(network)) {
return false;
}
final List<CiscoVnmcControllerVO> devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId());
if (devices.isEmpty()) {
s_logger.error("No Cisco Vnmc device on network " + network.getName());
return false;
}
List<CiscoAsa1000vDeviceVO> asaList = _ciscoAsa1000vDao.listByPhysicalNetwork(network.getPhysicalNetworkId());
if (asaList.isEmpty()) {
s_logger.debug("No Cisco ASA 1000v device on network " + network.getName());
return false;
}
NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId());
if (asaForNetwork != null) {
s_logger.debug("Cisco ASA 1000v device already associated with network " + network.getName());
return true;
}
if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.CiscoVnmc)) {
s_logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName());
return false;
}
try {
// ensure that there is an ASA 1000v assigned to this network
CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network);
if (assignedAsa == null) {
s_logger.error("Unable to assign ASA 1000v device to network " + network.getName());
throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName());
}
ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId());
ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId());
if (clusterVsmMap == null) {
s_logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it");
throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it");
}
CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId());
if (vsmDevice == null) {
s_logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName());
throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName());
}
CiscoVnmcControllerVO ciscoVnmcDevice = devices.get(0);
HostVO ciscoVnmcHost = _hostDao.findById(ciscoVnmcDevice.getHostId());
_hostDao.loadDetails(ciscoVnmcHost);
Account owner = context.getAccount();
PublicIp sourceNatIp = _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network);
long vlanId = Long.parseLong(BroadcastDomainType.getValue(network.getBroadcastUri()));
List<VlanVO> vlanVOList = _vlanDao.listVlansByPhysicalNetworkId(network.getPhysicalNetworkId());
List<String> publicGateways = new ArrayList<String>();
for (VlanVO vlanVO : vlanVOList) {
publicGateways.add(vlanVO.getVlanGateway());
}
// due to VNMC limitation of not allowing source NAT ip as the outside ip of firewall,
// an additional public ip needs to acquired for assigning as firewall outside ip.
// In case there are already additional ip addresses available (network restart) use one
// of them such that it is not the source NAT ip
IpAddress outsideIp = null;
List<IPAddressVO> publicIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null);
for (IPAddressVO ip : publicIps) {
if (!ip.isSourceNat()) {
outsideIp = ip;
break;
}
}
if (outsideIp == null) {
// none available, acquire one
try {
Account caller = CallContext.current().getCallingAccount();
long callerUserId = CallContext.current().getCallingUserId();
outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone, true);
} catch (ResourceAllocationException e) {
s_logger.error("Unable to allocate additional public Ip address. Exception details " + e);
throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e);
}
try {
outsideIp = _ipAddrMgr.associateIPToGuestNetwork(outsideIp.getId(), network.getId(), true);
} catch (ResourceAllocationException e) {
s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e);
throw new CloudRuntimeException("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e);
}
}
// create logical edge firewall in VNMC
String gatewayNetmask = NetUtils.getCidrNetmask(network.getCidr());
// all public ip addresses must be from same subnet, this essentially means single public subnet in zone
if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) {
s_logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName());
throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName());
}
// create stuff in VSM for ASA device
if (!configureNexusVsmForAsa(vlanId, network.getGateway(), vsmDevice.getUserName(), vsmDevice.getPassword(), vsmDevice.getipaddr(), assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) {
s_logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName());
throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName());
}
// configure source NAT
if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) {
s_logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName());
throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName());
}
// associate Asa 1000v instance with logical edge firewall
if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) {
s_logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName());
throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName());
}
} catch (CloudRuntimeException e) {
unassignAsa1000vFromNetwork(network);
s_logger.error("CiscoVnmcElement failed", e);
return false;
} catch (Exception e) {
unassignAsa1000vFromNetwork(network);
ExceptionUtil.rethrowRuntime(e);
ExceptionUtil.rethrow(e, InsufficientAddressCapacityException.class);
ExceptionUtil.rethrow(e, ResourceUnavailableException.class);
throw new IllegalStateException(e);
}
return true;
}
use of com.cloud.exception.InsufficientCapacityException in project cloudstack by apache.
the class NetworkServiceImpl method commitNetwork.
private Network commitNetwork(final Long networkOfferingId, final String gateway, final String startIP, final String endIP, final String netmask, final String networkDomain, final String vlanId, final String name, final String displayText, final Account caller, final Long physicalNetworkId, final Long zoneId, final Long domainId, final boolean isDomainSpecific, final Boolean subdomainAccessFinal, final Long vpcId, final String startIPv6, final String endIPv6, final String ip6Gateway, final String ip6Cidr, final Boolean displayNetwork, final Long aclId, final String isolatedPvlan, final NetworkOfferingVO ntwkOff, final PhysicalNetwork pNtwk, final ACLType aclType, final Account ownerFinal, final String cidr, final boolean createVlan) throws InsufficientCapacityException, ResourceAllocationException {
try {
Network network = Transaction.execute(new TransactionCallbackWithException<Network, Exception>() {
@Override
public Network doInTransaction(TransactionStatus status) throws InsufficientCapacityException, ResourceAllocationException {
Account owner = ownerFinal;
Boolean subdomainAccess = subdomainAccessFinal;
Long sharedDomainId = null;
if (isDomainSpecific) {
if (domainId != null) {
sharedDomainId = domainId;
} else {
sharedDomainId = _domainMgr.getDomain(Domain.ROOT_DOMAIN).getId();
subdomainAccess = true;
}
}
// default owner to system if network has aclType=Domain
if (aclType == ACLType.Domain) {
owner = _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM);
}
// Create guest network
Network network = null;
if (vpcId != null) {
if (!_configMgr.isOfferingForVpc(ntwkOff)) {
throw new InvalidParameterValueException("Network offering can't be used for VPC networks");
}
if (aclId != null) {
NetworkACL acl = _networkACLDao.findById(aclId);
if (acl == null) {
throw new InvalidParameterValueException("Unable to find specified NetworkACL");
}
if (aclId != NetworkACL.DEFAULT_DENY && aclId != NetworkACL.DEFAULT_ALLOW) {
// ACL should be associated with a VPC
if (!vpcId.equals(acl.getVpcId())) {
throw new InvalidParameterValueException("ACL: " + aclId + " do not belong to the VPC");
}
}
}
network = _vpcMgr.createVpcGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, subdomainAccess, vpcId, aclId, caller, displayNetwork);
} else {
if (_configMgr.isOfferingForVpc(ntwkOff)) {
throw new InvalidParameterValueException("Network offering can be used for VPC networks only");
}
if (ntwkOff.getInternalLb()) {
throw new InvalidParameterValueException("Internal Lb can be enabled on vpc networks only");
}
network = _networkMgr.createGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, subdomainAccess, vpcId, ip6Gateway, ip6Cidr, displayNetwork, isolatedPvlan);
}
if (_accountMgr.isRootAdmin(caller.getId()) && createVlan && network != null) {
// Create vlan ip range
_configMgr.createVlanAndPublicIpRange(pNtwk.getDataCenterId(), network.getId(), physicalNetworkId, false, null, startIP, endIP, gateway, netmask, vlanId, null, null, startIPv6, endIPv6, ip6Gateway, ip6Cidr);
}
return network;
}
});
if (domainId != null && aclType == ACLType.Domain) {
// send event for storing the domain wide resource access
Map<String, Object> params = new HashMap<String, Object>();
params.put(ApiConstants.ENTITY_TYPE, Network.class);
params.put(ApiConstants.ENTITY_ID, network.getId());
params.put(ApiConstants.DOMAIN_ID, domainId);
params.put(ApiConstants.SUBDOMAIN_ACCESS, subdomainAccessFinal == null ? Boolean.TRUE : subdomainAccessFinal);
_messageBus.publish(_name, EntityManager.MESSAGE_ADD_DOMAIN_WIDE_ENTITY_EVENT, PublishScope.LOCAL, params);
}
return network;
} catch (Exception e) {
ExceptionUtil.rethrowRuntime(e);
ExceptionUtil.rethrow(e, InsufficientCapacityException.class);
ExceptionUtil.rethrow(e, ResourceAllocationException.class);
throw new IllegalStateException(e);
}
}
Aggregations