use of com.sequenceiq.ambari.client.AmbariClient in project cloudbreak by hortonworks.
the class AmbariClusterService method getClusterJson.
@Override
public String getClusterJson(String ambariIp, Long stackId) {
try {
AmbariClient ambariClient = getAmbariClient(stackId);
String clusterJson = ambariClient.getClusterAsJson();
if (clusterJson == null) {
throw new BadRequestException(String.format("Cluster response coming from Ambari server was null. [Ambari Server IP: '%s']", ambariIp));
}
return clusterJson;
} catch (HttpResponseException e) {
if ("Not Found".equals(e.getMessage())) {
throw new NotFoundException("Ambari validation not found.", e);
} else {
String errorMessage = AmbariClientExceptionUtil.getErrorMessage(e);
throw new CloudbreakServiceException("Could not get Cluster from Ambari as JSON: " + errorMessage, e);
}
}
}
use of com.sequenceiq.ambari.client.AmbariClient in project cloudbreak by hortonworks.
the class DNDecommissionStatusCheckerTask method checkStatus.
@Override
public boolean checkStatus(AmbariOperations t) {
AmbariClient ambariClient = t.getAmbariClient();
Map<String, Long> dataNodes = ambariClient.getDecommissioningDataNodes();
boolean finished = dataNodes.isEmpty();
if (!finished) {
LOGGER.info("DataNode decommission is in progress: {}", dataNodes);
}
return finished;
}
use of com.sequenceiq.ambari.client.AmbariClient in project cloudbreak by hortonworks.
the class RSDecommissionStatusCheckerTask method checkStatus.
@Override
public boolean checkStatus(AmbariHostsWithNames t) {
MDCBuilder.buildMdcContext(t.getStack());
AmbariClient ambariClient = t.getAmbariClient();
Map<String, String> rs = ambariClient.getHBaseRegionServersState(t.getHostNames());
for (Entry<String, String> entry : rs.entrySet()) {
if (!FINAL_STATE.equals(entry.getValue())) {
LOGGER.info("RegionServer: {} decommission is in progress, current state: {}", entry.getKey(), entry.getValue());
return false;
}
}
return true;
}
use of com.sequenceiq.ambari.client.AmbariClient in project cloudbreak by hortonworks.
the class AmbariDecommissioner method decommissionAmbariNodes.
public Set<String> decommissionAmbariNodes(Stack stack, Map<String, HostMetadata> hostsToRemove) throws CloudbreakException {
Cluster cluster = stack.getCluster();
HttpClientConfig clientConfig = tlsSecurityService.buildTLSClientConfigForPrimaryGateway(stack.getId(), cluster.getAmbariIp());
AmbariClient ambariClient = ambariClientProvider.getAmbariClient(clientConfig, stack.getGatewayPort(), cluster);
Map<String, HostMetadata> unhealthyHosts = new HashMap<>();
Map<String, HostMetadata> healthyHosts = new HashMap<>();
for (Entry<String, HostMetadata> hostToRemove : hostsToRemove.entrySet()) {
if ("UNKNOWN".equals(ambariClient.getHostState(hostToRemove.getKey()))) {
unhealthyHosts.put(hostToRemove.getKey(), hostToRemove.getValue());
} else {
healthyHosts.put(hostToRemove.getKey(), hostToRemove.getValue());
}
}
Set<String> deletedHosts = new HashSet<>();
Map<String, Map<String, String>> runningComponents = ambariClient.getHostComponentsStates();
if (!unhealthyHosts.isEmpty()) {
List<String> hostList = new ArrayList<>(hostsToRemove.keySet());
removeHostsFromOrchestrator(stack, ambariClient, hostList);
for (Entry<String, HostMetadata> host : unhealthyHosts.entrySet()) {
deleteHostFromAmbari(host.getValue(), runningComponents, ambariClient);
hostMetadataRepository.delete(host.getValue().getId());
deletedHosts.add(host.getKey());
}
}
if (!healthyHosts.isEmpty()) {
deletedHosts.addAll(decommissionAmbariNodes(stack, healthyHosts, runningComponents, ambariClient));
}
return deletedHosts;
}
use of com.sequenceiq.ambari.client.AmbariClient in project cloudbreak by hortonworks.
the class AmbariDecommissioner method collectDownscaleCandidates.
private Iterable<HostMetadata> collectDownscaleCandidates(Stack stack, Cluster cluster, String hostGroupName, Integer scalingAdjustment) {
List<HostMetadata> downScaleCandidates;
HttpClientConfig clientConfig = tlsSecurityService.buildTLSClientConfigForPrimaryGateway(stack.getId(), cluster.getAmbariIp());
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(cluster.getId(), hostGroupName);
Set<HostMetadata> hostsInHostGroup = hostGroup.getHostMetadata();
List<HostMetadata> filteredHostList = hostFilterService.filterHostsForDecommission(cluster, hostsInHostGroup, hostGroupName);
int reservedInstances = hostsInHostGroup.size() - filteredHostList.size();
String blueprintName = cluster.getBlueprint().getAmbariName();
AmbariClient ambariClient = ambariClientProvider.getAmbariClient(clientConfig, stack.getGatewayPort(), cluster);
if (ambariClient.getBlueprintMap(blueprintName).get(hostGroupName).contains(DATANODE)) {
int replication = getReplicationFactor(ambariClient, hostGroupName);
verifyNodeCount(replication, scalingAdjustment, filteredHostList.size(), reservedInstances);
downScaleCandidates = checkAndSortByAvailableSpace(stack, ambariClient, replication, scalingAdjustment, filteredHostList);
} else {
verifyNodeCount(NO_REPLICATION, scalingAdjustment, filteredHostList.size(), reservedInstances);
downScaleCandidates = filteredHostList;
}
return downScaleCandidates;
}
Aggregations