use of com.sequenceiq.cloudbreak.domain.ClusterComponent in project cloudbreak by hortonworks.
the class ClusterComponentConfigProvider method store.
public List<ClusterComponent> store(List<ClusterComponent> components, Cluster cluster) {
for (ClusterComponent component : components) {
component.setCluster(cluster);
store(component);
}
return components;
}
use of com.sequenceiq.cloudbreak.domain.ClusterComponent in project cloudbreak by hortonworks.
the class ClusterBootstrapper method bootstrapNewNodesOnHost.
private void bootstrapNewNodesOnHost(Stack stack, List<GatewayConfig> allGatewayConfigs, Set<Node> nodes, Set<Node> allNodes) throws CloudbreakException, CloudbreakOrchestratorException {
HostOrchestrator hostOrchestrator = hostOrchestratorResolver.get(stack.getOrchestrator().getType());
Cluster cluster = stack.getCluster();
Boolean enableKnox = cluster.getGateway().getEnableGateway();
for (InstanceMetaData gateway : stack.getGatewayInstanceMetadata()) {
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, gateway, enableKnox);
PollingResult bootstrapApiPolling = hostBootstrapApiPollingService.pollWithTimeoutSingleFailure(hostBootstrapApiCheckerTask, new HostBootstrapApiContext(stack, gatewayConfig, hostOrchestrator), POLL_INTERVAL, MAX_POLLING_ATTEMPTS);
validatePollingResultForCancellation(bootstrapApiPolling, "Polling of bootstrap API was cancelled.");
}
byte[] stateZip = null;
ClusterComponent stateComponent = clusterComponentProvider.getComponent(cluster.getId(), ComponentType.SALT_STATE);
if (stateComponent != null) {
String content = (String) stateComponent.getAttributes().getMap().getOrDefault(ComponentType.SALT_STATE.name(), "");
if (!content.isEmpty()) {
stateZip = Base64.decodeBase64(content);
}
}
hostOrchestrator.bootstrapNewNodes(allGatewayConfigs, nodes, allNodes, stateZip, clusterDeletionBasedModel(stack.getId(), null));
InstanceMetaData primaryGateway = stack.getPrimaryGatewayInstance();
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, primaryGateway, enableKnox);
PollingResult allNodesAvailabilityPolling = hostClusterAvailabilityPollingService.pollWithTimeoutSingleFailure(hostClusterAvailabilityCheckerTask, new HostOrchestratorClusterContext(stack, hostOrchestrator, gatewayConfig, nodes), POLL_INTERVAL, MAX_POLLING_ATTEMPTS);
validatePollingResultForCancellation(allNodesAvailabilityPolling, "Polling of new nodes availability was cancelled.");
if (TIMEOUT.equals(allNodesAvailabilityPolling)) {
clusterBootstrapperErrorHandler.terminateFailedNodes(hostOrchestrator, null, stack, gatewayConfig, nodes);
}
}
use of com.sequenceiq.cloudbreak.domain.ClusterComponent in project cloudbreak by hortonworks.
the class ClusterCreationSetupService method determineAmbariRepoConfig.
private ClusterComponent determineAmbariRepoConfig(Optional<Component> stackAmbariRepoConfig, AmbariRepoDetailsJson ambariRepoDetailsJson, Optional<Component> stackImageComponent, Cluster cluster) throws IOException {
Json json;
if (!stackAmbariRepoConfig.isPresent()) {
AmbariRepo ambariRepo = ambariRepoDetailsJson != null ? conversionService.convert(ambariRepoDetailsJson, AmbariRepo.class) : defaultAmbariRepoService.getDefault(getOsType(stackImageComponent));
if (ambariRepo == null) {
throw new BadRequestException(String.format("Couldn't determine Ambari repo for the stack: %s", cluster.getStack().getName()));
}
json = new Json(ambariRepo);
} else {
json = stackAmbariRepoConfig.get().getAttributes();
}
return new ClusterComponent(ComponentType.AMBARI_REPO_DETAILS, json, cluster);
}
use of com.sequenceiq.cloudbreak.domain.ClusterComponent in project cloudbreak by hortonworks.
the class ClusterBootstrapper method bootstrapOnHost.
@SuppressFBWarnings("REC_CATCH_EXCEPTION")
@SuppressWarnings("unchecked")
public void bootstrapOnHost(Stack stack) throws CloudbreakException {
Set<Node> nodes = new HashSet<>();
String domain = hostDiscoveryService.determineDomain(stack.getCustomDomain(), stack.getName(), stack.isClusterNameAsSubdomain());
for (InstanceMetaData im : stack.getRunningInstanceMetaData()) {
if (im.getPrivateIp() == null && im.getPublicIpWrapper() == null) {
LOGGER.warn("Skipping instance metadata because the public ip and private ips are null '{}'.", im);
} else {
String generatedHostName = hostDiscoveryService.generateHostname(stack.getCustomHostname(), im.getInstanceGroupName(), im.getPrivateId(), stack.isHostgroupNameAsHostname());
nodes.add(new Node(im.getPrivateIp(), im.getPublicIpWrapper(), generatedHostName, domain, im.getInstanceGroupName()));
}
}
try {
HostOrchestrator hostOrchestrator = hostOrchestratorResolver.get(stack.getOrchestrator().getType());
List<GatewayConfig> allGatewayConfig = new ArrayList<>();
Boolean enableKnox = stack.getCluster().getGateway().getEnableGateway();
for (InstanceMetaData gateway : stack.getGatewayInstanceMetadata()) {
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, gateway, enableKnox);
allGatewayConfig.add(gatewayConfig);
PollingResult bootstrapApiPolling = hostBootstrapApiPollingService.pollWithTimeoutSingleFailure(hostBootstrapApiCheckerTask, new HostBootstrapApiContext(stack, gatewayConfig, hostOrchestrator), POLL_INTERVAL, MAX_POLLING_ATTEMPTS);
validatePollingResultForCancellation(bootstrapApiPolling, "Polling of bootstrap API was cancelled.");
}
ClusterComponent saltComponent = clusterComponentProvider.getComponent(stack.getCluster().getId(), ComponentType.SALT_STATE);
if (saltComponent == null) {
byte[] stateConfigZip = hostOrchestrator.getStateConfigZip();
saltComponent = new ClusterComponent(ComponentType.SALT_STATE, new Json(singletonMap(ComponentType.SALT_STATE.name(), Base64.encodeBase64String(stateConfigZip))), stack.getCluster());
clusterComponentProvider.store(saltComponent);
}
hostOrchestrator.bootstrap(allGatewayConfig, nodes, clusterDeletionBasedModel(stack.getId(), null));
InstanceMetaData primaryGateway = stack.getPrimaryGatewayInstance();
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, primaryGateway, enableKnox);
String gatewayIp = gatewayConfigService.getGatewayIp(stack, primaryGateway);
PollingResult allNodesAvailabilityPolling = hostClusterAvailabilityPollingService.pollWithTimeoutSingleFailure(hostClusterAvailabilityCheckerTask, new HostOrchestratorClusterContext(stack, hostOrchestrator, gatewayConfig, nodes), POLL_INTERVAL, MAX_POLLING_ATTEMPTS);
validatePollingResultForCancellation(allNodesAvailabilityPolling, "Polling of all nodes availability was cancelled.");
Orchestrator orchestrator = stack.getOrchestrator();
orchestrator.setApiEndpoint(gatewayIp + ':' + stack.getGatewayPort());
orchestrator.setType(hostOrchestrator.name());
orchestratorRepository.save(orchestrator);
if (TIMEOUT.equals(allNodesAvailabilityPolling)) {
clusterBootstrapperErrorHandler.terminateFailedNodes(hostOrchestrator, null, stack, gatewayConfig, nodes);
}
} catch (Exception e) {
throw new CloudbreakException(e);
}
}
use of com.sequenceiq.cloudbreak.domain.ClusterComponent in project cloudbreak by hortonworks.
the class AmbariClusterService method createHDPRepoComponent.
private void createHDPRepoComponent(StackRepoDetails stackRepoDetailsUpdate, Stack stack) {
if (stackRepoDetailsUpdate != null) {
StackRepoDetails stackRepoDetails = clusterComponentConfigProvider.getHDPRepo(stack.getCluster().getId());
if (stackRepoDetails == null) {
try {
ClusterComponent clusterComp = new ClusterComponent(ComponentType.HDP_REPO_DETAILS, new Json(stackRepoDetailsUpdate), stack.getCluster());
clusterComponentConfigProvider.store(clusterComp);
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("HDP Repo parameters cannot be converted. %s", stackRepoDetailsUpdate));
}
} else {
ClusterComponent component = clusterComponentConfigProvider.getComponent(stack.getCluster().getId(), ComponentType.HDP_REPO_DETAILS);
stackRepoDetails.setHdpVersion(stackRepoDetailsUpdate.getHdpVersion());
stackRepoDetails.setVerify(stackRepoDetailsUpdate.isVerify());
stackRepoDetails.setStack(stackRepoDetailsUpdate.getStack());
stackRepoDetails.setUtil(stackRepoDetailsUpdate.getUtil());
stackRepoDetails.setEnableGplRepo(stackRepoDetailsUpdate.isEnableGplRepo());
stackRepoDetails.setKnox(stackRepoDetailsUpdate.getKnox());
try {
component.setAttributes(new Json(stackRepoDetails));
clusterComponentConfigProvider.store(component);
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("HDP Repo parameters cannot be converted. %s", stackRepoDetailsUpdate));
}
}
}
}
Aggregations