use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class AmbariDecommissioner method collectHostMetadata.
private Map<String, HostMetadata> collectHostMetadata(Cluster cluster, String hostGroupName, Collection<String> hostNames) {
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(cluster.getId(), hostGroupName);
Set<HostMetadata> hostsInHostGroup = hostGroup.getHostMetadata();
Map<String, HostMetadata> hostMetadatas = hostsInHostGroup.stream().filter(hostMetadata -> hostNames.contains(hostMetadata.getHostName())).collect(Collectors.toMap(HostMetadata::getHostName, hostMetadata -> hostMetadata));
return hostMetadatas;
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class AmbariDecommissioner method collectHostsToRemove.
public Map<String, HostMetadata> collectHostsToRemove(Stack stack, String hostGroupName, Collection<String> hostNames) throws CloudbreakException {
Map<String, HostMetadata> hostsToRemove = collectHostMetadata(stack.getCluster(), hostGroupName, hostNames);
if (hostsToRemove.size() != hostNames.size()) {
throw new CloudbreakException("Not all the hosts found in the given host group.");
}
Cluster cluster = stack.getCluster();
HttpClientConfig clientConfig = tlsSecurityService.buildTLSClientConfigForPrimaryGateway(stack.getId(), cluster.getAmbariIp());
AmbariClient ambariClient = ambariClientProvider.getAmbariClient(clientConfig, stack.getGatewayPort(), cluster);
List<String> runningHosts = ambariClient.getClusterHosts();
Sets.newHashSet(hostsToRemove.keySet()).forEach(hostName -> {
if (!runningHosts.contains(hostName)) {
hostsToRemove.remove(hostName);
}
});
return hostsToRemove;
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class ClusterTerminationService method deleteClusterContainers.
public Boolean deleteClusterContainers(Cluster cluster) {
try {
Orchestrator orchestrator = cluster.getStack().getOrchestrator();
ContainerOrchestrator containerOrchestrator = containerOrchestratorResolver.get(orchestrator.getType());
try {
Map<String, Object> map = new HashMap<>(orchestrator.getAttributes().getMap());
OrchestrationCredential credential = new OrchestrationCredential(orchestrator.getApiEndpoint(), map);
Set<Container> containers = containerRepository.findContainersInCluster(cluster.getId());
List<ContainerInfo> containerInfo = containers.stream().map(c -> new ContainerInfo(c.getContainerId(), c.getName(), c.getHost(), c.getImage())).collect(Collectors.toList());
containerOrchestrator.deleteContainer(containerInfo, credential);
containerRepository.delete(containers);
deleteClusterHostGroupsWithItsMetadata(cluster);
} catch (CloudbreakOrchestratorException e) {
throw new TerminationFailedException(String.format("Failed to delete containers of cluster (id:'%s',name:'%s').", cluster.getId(), cluster.getName()), e);
}
return Boolean.TRUE;
} catch (CloudbreakException ignored) {
return Boolean.FALSE;
}
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class ConsulServerFilter method filter.
@Override
public List<HostMetadata> filter(long clusterId, Map<String, String> config, List<HostMetadata> hosts) {
List<HostMetadata> copy = new ArrayList<>(hosts);
Cluster cluster = clusterRepository.findById(clusterId);
for (HostMetadata host : hosts) {
InstanceMetaData instanceMetaData = instanceMetadataRepository.findHostInStack(cluster.getStack().getId(), host.getHostName());
if (instanceMetaData != null && instanceMetaData.getConsulServer()) {
copy.remove(host);
}
}
return copy;
}
use of com.sequenceiq.cloudbreak.domain.Cluster in project cloudbreak by hortonworks.
the class RDSConfigToRDSConfigResponseConverter method convert.
@Override
public RDSConfigResponse convert(RDSConfig source) {
RDSConfigResponse json = new RDSConfigResponse();
json.setId(source.getId());
json.setName(source.getName());
json.setConnectionURL(source.getConnectionURL());
json.setDatabaseEngine(source.getDatabaseEngine());
json.setConnectionDriver(source.getConnectionDriver());
json.setPublicInAccount(source.isPublicInAccount());
json.setCreationDate(source.getCreationDate());
if (source.getClusters() != null) {
json.setClusterNames(source.getClusters().stream().map(Cluster::getName).collect(Collectors.toSet()));
} else {
json.setClusterNames(new HashSet<>());
}
json.setStackVersion(source.getStackVersion());
json.setType(source.getType());
return json;
}
Aggregations