use of com.sequenceiq.cloudbreak.domain.KerberosConfig in project cloudbreak by hortonworks.
the class AmbariClusterService method recreate.
@Override
public Cluster recreate(Long stackId, Long blueprintId, Set<HostGroup> hostGroups, boolean validateBlueprint, StackRepoDetails stackRepoDetails, String kerberosPassword, String kerberosPrincipal) {
if (blueprintId == null || hostGroups == null) {
throw new BadRequestException("Blueprint id and hostGroup assignments can not be null.");
}
Stack stack = stackService.getByIdWithLists(stackId);
Cluster cluster = getCluster(stack);
if (cluster != null && stack.getCluster().isSecure()) {
List<String> missing = Stream.of(Pair.of("password", kerberosPassword), Pair.of("principal", kerberosPrincipal)).filter(p -> !StringUtils.hasLength(p.getRight())).map(Pair::getLeft).collect(Collectors.toList());
if (!missing.isEmpty()) {
throw new BadRequestException(String.format("Missing Kerberos credential detail(s): %s", String.join(", ", missing)));
}
KerberosConfig kerberosConfig = cluster.getKerberosConfig();
kerberosConfig.setPassword(kerberosPassword);
kerberosConfig.setPrincipal(kerberosPrincipal);
kerberosConfigRepository.save(kerberosConfig);
}
Blueprint blueprint = blueprintService.get(blueprintId);
if (!withEmbeddedAmbariDB(cluster)) {
throw new BadRequestException("Ambari doesn't support resetting external DB automatically. To reset Ambari Server schema you must first drop " + "and then create it using DDL scripts from /var/lib/ambari-server/resources");
}
if (validateBlueprint) {
blueprintValidator.validateBlueprintForStack(cluster, blueprint, hostGroups, stack.getInstanceGroups());
}
Boolean containerOrchestrator;
try {
containerOrchestrator = orchestratorTypeResolver.resolveType(stack.getOrchestrator()).containerOrchestrator();
} catch (CloudbreakException ignored) {
containerOrchestrator = false;
}
if (containerOrchestrator) {
clusterTerminationService.deleteClusterComponents(cluster.getId());
cluster = clusterRepository.findById(stack.getCluster().getId());
}
hostGroups = hostGroupService.saveOrUpdateWithMetadata(hostGroups, cluster);
cluster = prepareCluster(hostGroups, stackRepoDetails, blueprint, stack, cluster);
try {
triggerClusterInstall(stack, cluster);
} catch (CloudbreakException e) {
throw new CloudbreakServiceException(e);
}
return stack.getCluster();
}
Aggregations