use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class CiscoVnmcElement method implement.
@Override
public boolean implement(final Network network, final NetworkOffering offering, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException {
final DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId());
if (zone.getNetworkType() == NetworkType.Basic) {
s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic);
return false;
}
if (!canHandle(network)) {
return false;
}
final List<CiscoVnmcControllerVO> devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId());
if (devices.isEmpty()) {
s_logger.error("No Cisco Vnmc device on network " + network.getName());
return false;
}
List<CiscoAsa1000vDeviceVO> asaList = _ciscoAsa1000vDao.listByPhysicalNetwork(network.getPhysicalNetworkId());
if (asaList.isEmpty()) {
s_logger.debug("No Cisco ASA 1000v device on network " + network.getName());
return false;
}
NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId());
if (asaForNetwork != null) {
s_logger.debug("Cisco ASA 1000v device already associated with network " + network.getName());
return true;
}
if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.CiscoVnmc)) {
s_logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName());
return false;
}
try {
// ensure that there is an ASA 1000v assigned to this network
CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network);
if (assignedAsa == null) {
s_logger.error("Unable to assign ASA 1000v device to network " + network.getName());
throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName());
}
ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId());
ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId());
if (clusterVsmMap == null) {
s_logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it");
throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it");
}
CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId());
if (vsmDevice == null) {
s_logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName());
throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName());
}
CiscoVnmcControllerVO ciscoVnmcDevice = devices.get(0);
HostVO ciscoVnmcHost = _hostDao.findById(ciscoVnmcDevice.getHostId());
_hostDao.loadDetails(ciscoVnmcHost);
Account owner = context.getAccount();
PublicIp sourceNatIp = _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network);
long vlanId = Long.parseLong(BroadcastDomainType.getValue(network.getBroadcastUri()));
List<VlanVO> vlanVOList = _vlanDao.listVlansByPhysicalNetworkId(network.getPhysicalNetworkId());
List<String> publicGateways = new ArrayList<String>();
for (VlanVO vlanVO : vlanVOList) {
publicGateways.add(vlanVO.getVlanGateway());
}
// due to VNMC limitation of not allowing source NAT ip as the outside ip of firewall,
// an additional public ip needs to acquired for assigning as firewall outside ip.
// In case there are already additional ip addresses available (network restart) use one
// of them such that it is not the source NAT ip
IpAddress outsideIp = null;
List<IPAddressVO> publicIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null);
for (IPAddressVO ip : publicIps) {
if (!ip.isSourceNat()) {
outsideIp = ip;
break;
}
}
if (outsideIp == null) {
// none available, acquire one
try {
Account caller = CallContext.current().getCallingAccount();
long callerUserId = CallContext.current().getCallingUserId();
outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone, true, null);
} catch (ResourceAllocationException e) {
s_logger.error("Unable to allocate additional public Ip address. Exception details " + e);
throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e);
}
try {
outsideIp = _ipAddrMgr.associateIPToGuestNetwork(outsideIp.getId(), network.getId(), true);
} catch (ResourceAllocationException e) {
s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e);
throw new CloudRuntimeException("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e);
}
}
// create logical edge firewall in VNMC
String gatewayNetmask = NetUtils.getCidrNetmask(network.getCidr());
// all public ip addresses must be from same subnet, this essentially means single public subnet in zone
if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) {
s_logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName());
throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName());
}
// create stuff in VSM for ASA device
if (!configureNexusVsmForAsa(vlanId, network.getGateway(), vsmDevice.getUserName(), vsmDevice.getPassword(), vsmDevice.getipaddr(), assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) {
s_logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName());
throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName());
}
// configure source NAT
if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) {
s_logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName());
throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName());
}
// associate Asa 1000v instance with logical edge firewall
if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) {
s_logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName());
throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName());
}
} catch (CloudRuntimeException e) {
unassignAsa1000vFromNetwork(network);
s_logger.error("CiscoVnmcElement failed", e);
return false;
} catch (Exception e) {
unassignAsa1000vFromNetwork(network);
ExceptionUtil.rethrowRuntime(e);
ExceptionUtil.rethrow(e, InsufficientAddressCapacityException.class);
ExceptionUtil.rethrow(e, ResourceUnavailableException.class);
throw new IllegalStateException(e);
}
return true;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class SimulatorDiscoverer method find.
/**
* Finds ServerResources of an in-process simulator
*
* @see com.cloud.resource.Discoverer#find(long, java.lang.Long,
* java.lang.Long, java.net.URI, java.lang.String, java.lang.String)
*/
@Override
public Map<? extends ServerResource, Map<String, String>> find(long dcId, Long podId, Long clusterId, URI uri, String username, String password, List<String> hostTags) throws DiscoveryException {
Map<AgentResourceBase, Map<String, String>> resources;
try {
// http://sim/count=$count, it will add $count number of hosts into the cluster
String scheme = uri.getScheme();
String host = uri.getAuthority();
String commands = URLDecoder.decode(uri.getPath());
long cpuSpeed = MockAgentManager.DEFAULT_HOST_SPEED_MHZ;
long cpuCores = MockAgentManager.DEFAULT_HOST_CPU_CORES;
long memory = MockAgentManager.DEFAULT_HOST_MEM_SIZE;
long localstorageSize = MockStorageManager.DEFAULT_HOST_STORAGE_SIZE;
if (scheme.equals("http")) {
if (host == null || !host.startsWith("sim")) {
String msg = "uri is not of simulator type so we're not taking care of the discovery for this: " + uri;
if (s_logger.isDebugEnabled()) {
s_logger.debug(msg);
}
return null;
}
if (commands != null) {
int index = commands.lastIndexOf("/");
if (index != -1) {
commands = commands.substring(index + 1);
String[] cmds = commands.split("&");
for (String cmd : cmds) {
String[] parameter = cmd.split("=");
if (parameter[0].equalsIgnoreCase("cpuspeed") && parameter[1] != null) {
cpuSpeed = Long.parseLong(parameter[1]);
} else if (parameter[0].equalsIgnoreCase("cpucore") && parameter[1] != null) {
cpuCores = Long.parseLong(parameter[1]);
} else if (parameter[0].equalsIgnoreCase("memory") && parameter[1] != null) {
memory = Long.parseLong(parameter[1]);
} else if (parameter[0].equalsIgnoreCase("localstorage") && parameter[1] != null) {
localstorageSize = Long.parseLong(parameter[1]);
}
}
}
}
} else {
String msg = "uriString is not http so we're not taking care of the discovery for this: " + uri;
if (s_logger.isDebugEnabled()) {
s_logger.debug(msg);
}
return null;
}
String cluster = null;
if (clusterId == null) {
String msg = "must specify cluster Id when adding host";
if (s_logger.isDebugEnabled()) {
s_logger.debug(msg);
}
throw new RuntimeException(msg);
} else {
ClusterVO clu = _clusterDao.findById(clusterId);
if (clu == null || (clu.getHypervisorType() != HypervisorType.Simulator)) {
if (s_logger.isInfoEnabled())
s_logger.info("invalid cluster id or cluster is not for Simulator hypervisors");
return null;
}
cluster = Long.toString(clusterId);
if (clu.getGuid() == null) {
clu.setGuid(UUID.randomUUID().toString());
}
_clusterDao.update(clusterId, clu);
}
String pod;
if (podId == null) {
String msg = "must specify pod Id when adding host";
if (s_logger.isDebugEnabled()) {
s_logger.debug(msg);
}
throw new RuntimeException(msg);
} else {
pod = Long.toString(podId);
}
Map<String, String> details = new HashMap<String, String>();
Map<String, Object> params = new HashMap<String, Object>();
details.put("username", username);
params.put("username", username);
details.put("password", password);
params.put("password", password);
params.put("zone", Long.toString(dcId));
params.put("pod", pod);
params.put("cluster", cluster);
params.put("cpuspeed", Long.toString(cpuSpeed));
params.put("cpucore", Long.toString(cpuCores));
params.put("memory", Long.toString(memory));
params.put("localstorage", Long.toString(localstorageSize));
resources = createAgentResources(params);
return resources;
} catch (Exception ex) {
s_logger.error("Exception when discovering simulator hosts: " + ex.getMessage());
}
return null;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class KubernetesClusterManagerImpl method plan.
private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
final int cpu_requested = offering.getCpu() * offering.getSpeed();
final long ram_requested = offering.getRamSize() * 1024L * 1024L;
List<HostVO> hosts = resourceManager.listAllHostsInOneZoneByType(Type.Routing, zone.getId());
final Map<String, Pair<HostVO, Integer>> hosts_with_resevered_capacity = new ConcurrentHashMap<String, Pair<HostVO, Integer>>();
for (HostVO h : hosts) {
hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
}
boolean suitable_host_found = false;
Cluster planCluster = null;
for (int i = 1; i <= nodesCount; i++) {
suitable_host_found = false;
for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
Pair<HostVO, Integer> hp = hostEntry.getValue();
HostVO h = hp.first();
hostDao.loadHostTags(h);
if (StringUtils.isNotEmpty(offering.getHostTag()) && !(h.getHostTags() != null && h.getHostTags().contains(offering.getHostTag()))) {
continue;
}
int reserved = hp.second();
reserved++;
ClusterVO cluster = clusterDao.findById(h.getClusterId());
ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", h.getUuid(), reserved));
}
if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%s", h.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)));
}
hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
suitable_host_found = true;
planCluster = cluster;
break;
}
}
if (!suitable_host_found) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d with offering ID: %s", zone.getUuid(), i, offering.getUuid()));
}
break;
}
}
if (suitable_host_found) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid()));
}
return new DeployDestination(zone, null, planCluster, null);
}
String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering ID: %s", cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getUuid());
LOGGER.warn(msg);
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class DateraHostListener method hostRemoved.
@Override
public boolean hostRemoved(long hostId, long clusterId) {
ClusterVO clusterVO = _clusterDao.findById(clusterId);
HostVO hostVO = _hostDao.findByIdIncludingRemoved(hostId);
String initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + hostVO.getUuid();
int s_lockTimeInSeconds = 5;
GlobalLock lock = GlobalLock.getInternLock(clusterVO.getUuid());
if (!lock.lock(s_lockTimeInSeconds)) {
String errMsg = "Couldn't lock the DB on the following string: " + clusterVO.getUuid();
s_logger.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(DateraUtil.PROVIDER_NAME);
if (storagePools != null && storagePools.size() > 0) {
for (StoragePoolVO storagePool : storagePools) {
ClusterDetailsVO clusterDetail = _clusterDetailsDao.findDetail(clusterId, DateraUtil.getInitiatorGroupKey(storagePool.getId()));
String initiatorGroupName = clusterDetail != null ? clusterDetail.getValue() : null;
if (initiatorGroupName != null && DateraUtil.hostSupport_iScsi(hostVO)) {
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePool.getId(), _storagePoolDetailsDao);
DateraObject.Initiator initiator = DateraUtil.getInitiator(conn, hostVO.getStorageUrl());
DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
if (initiator != null && DateraUtil.isInitiatorPresentInGroup(initiator, initiatorGroup)) {
DateraUtil.removeInitiatorFromGroup(conn, initiator.getPath(), initiatorGroupName);
}
}
}
}
} catch (DateraObject.DateraError | UnsupportedEncodingException e) {
s_logger.warn("Error while removing host from initiator groups ", e);
} finally {
lock.unlock();
lock.releaseRef();
}
return true;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class DateraPrimaryDataStoreDriver method revokeAccess.
/**
* Removes access of the initiator group to which {@code host} belongs from the
* appInstance given by {@code dataObject}
* @param dataObject Datera volume
* @param host the host which is currently having access to the volume
* @param dataStore The primary store to which volume belongs
*/
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
s_logger.debug("revokeAccess() called");
Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'");
Preconditions.checkArgument(host != null, "'host' should not be 'null'");
Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'");
String appInstanceName = getAppInstanceName(dataObject);
long clusterId = host.getClusterId();
long storagePoolId = dataStore.getId();
ClusterVO cluster = _clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(s_lockTimeInSeconds)) {
s_logger.debug("Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid());
}
try {
String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid();
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
DateraObject.AppInstance appInstance = DateraUtil.getAppInstance(conn, appInstanceName);
DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
if (initiatorGroup != null && appInstance != null) {
DateraUtil.removeGroupFromAppInstance(conn, initiatorGroupName, appInstanceName);
int retries = DateraUtil.DEFAULT_RETRIES;
while (isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance) && retries > 0) {
Thread.sleep(DateraUtil.POLL_TIMEOUT_MS);
retries--;
}
}
} catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) {
String errMesg = "Error revoking access for Volume : " + dataObject.getId();
s_logger.warn(errMesg, dateraError);
throw new CloudRuntimeException(errMesg);
} finally {
lock.unlock();
lock.releaseRef();
}
}
Aggregations