use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterServiceRunner method redeployGatewayPillar.
public void redeployGatewayPillar(Long stackId) {
Stack stack = stackService.getByIdWithListsInTransaction(stackId);
Long clusterId = stack.getCluster().getId();
Cluster cluster = clusterService.findOneWithLists(clusterId).orElseThrow(NotFoundException.notFound("Cluster", clusterId));
hostRunner.redeployGatewayPillarOnly(stack, cluster);
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class StackToStackV4RequestConverter method convert.
public StackV4Request convert(Stack source) {
StackV4Request stackV4Request = new StackV4Request();
stackV4Request.setCloudPlatform(getIfNotNull(source.getCloudPlatform(), cp -> Enum.valueOf(CloudPlatform.class, cp)));
stackV4Request.setEnvironmentCrn(source.getEnvironmentCrn());
stackV4Request.setCustomDomain(getCustomDomainSettings(source));
providerParameterCalculator.parse(new HashMap<>(source.getParameters()), stackV4Request);
stackV4Request.setAuthentication(stackAuthenticationToStackAuthenticationV4RequestConverter.convert(source.getStackAuthentication()));
stackV4Request.setNetwork(networkToNetworkV4RequestConverter.convert(source.getNetwork()));
stackV4Request.setCluster(clusterToClusterV4RequestConverter.convert(source.getCluster()));
stackV4Request.setExternalDatabase(getIfNotNull(source, stackToExternalDatabaseRequestConverter::convert));
if (!source.getLoadBalancers().isEmpty()) {
stackV4Request.setEnableLoadBalancer(true);
}
stackV4Request.setInstanceGroups(getInstanceGroups(source));
prepareImage(source, stackV4Request);
prepareTags(source, stackV4Request);
prepareTelemetryRequest(source, stackV4Request);
datalakeService.prepareDatalakeRequest(source, stackV4Request);
stackV4Request.setPlacement(getPlacementSettings(source.getRegion(), source.getAvailabilityZone()));
prepareInputs(source, stackV4Request);
stackV4Request.setTimeToLive(getStackTimeToLive(source));
return stackV4Request;
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterHostServiceRunner method changePrimaryGateway.
public String changePrimaryGateway(Stack stack) throws CloudbreakException {
GatewayConfig formerPrimaryGatewayConfig = gatewayConfigService.getPrimaryGatewayConfig(stack);
List<GatewayConfig> gatewayConfigs = gatewayConfigService.getAllGatewayConfigs(stack);
Optional<GatewayConfig> newPrimaryCandidate = gatewayConfigs.stream().filter(gc -> !gc.isPrimary()).findFirst();
if (newPrimaryCandidate.isPresent()) {
GatewayConfig newPrimary = newPrimaryCandidate.get();
Set<Node> allNodes = stackUtil.collectNodes(stack);
try {
hostOrchestrator.changePrimaryGateway(formerPrimaryGatewayConfig, newPrimary, gatewayConfigs, allNodes, clusterDeletionBasedModel(stack.getId(), stack.getCluster().getId()));
return newPrimary.getHostname();
} catch (CloudbreakOrchestratorException ex) {
throw new CloudbreakException(ex);
}
} else {
throw new CloudbreakException("Primary gateway change is not possible because there is no available node for the action");
}
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class ClusterHostServiceRunner method createSaltConfig.
private SaltConfig createSaltConfig(Stack stack, Cluster cluster, List<GrainProperties> grainsProperties) throws IOException, CloudbreakOrchestratorException {
GatewayConfig primaryGatewayConfig = gatewayConfigService.getPrimaryGatewayConfig(stack);
ClouderaManagerRepo clouderaManagerRepo = clusterComponentConfigProvider.getClouderaManagerRepoDetails(cluster.getId());
Map<String, SaltPillarProperties> servicePillar = new HashMap<>();
KerberosConfig kerberosConfig = kerberosConfigService.get(stack.getEnvironmentCrn(), stack.getName()).orElse(null);
saveCustomNameservers(stack, kerberosConfig, servicePillar);
servicePillar.putAll(createUnboundEliminationPillar(stack.getDomainDnsResolver()));
addKerberosConfig(servicePillar, kerberosConfig);
servicePillar.putAll(hostAttributeDecorator.createHostAttributePillars(stack));
servicePillar.put("discovery", new SaltPillarProperties("/discovery/init.sls", singletonMap("platform", stack.cloudPlatform())));
String virtualGroupsEnvironmentCrn = environmentConfigProvider.getParentEnvironmentCrn(stack.getEnvironmentCrn());
boolean deployedInChildEnvironment = !virtualGroupsEnvironmentCrn.equals(stack.getEnvironmentCrn());
Map<String, ? extends Serializable> clusterProperties = Map.of("name", stack.getCluster().getName(), "deployedInChildEnvironment", deployedInChildEnvironment);
servicePillar.put("metadata", new SaltPillarProperties("/metadata/init.sls", singletonMap("cluster", clusterProperties)));
ClusterPreCreationApi connector = clusterApiConnectors.getConnector(cluster);
Map<String, List<String>> serviceLocations = getServiceLocations(cluster);
Optional<LdapView> ldapView = ldapConfigService.get(stack.getEnvironmentCrn(), stack.getName());
VirtualGroupRequest virtualGroupRequest = getVirtualGroupRequest(virtualGroupsEnvironmentCrn, ldapView);
servicePillar.putAll(createGatewayPillar(primaryGatewayConfig, cluster, stack, virtualGroupRequest, connector, kerberosConfig, serviceLocations, clouderaManagerRepo));
saveIdBrokerPillar(cluster, servicePillar);
postgresConfigService.decorateServicePillarWithPostgresIfNeeded(servicePillar, stack, cluster);
addClouderaManagerConfig(stack, cluster, servicePillar, clouderaManagerRepo, primaryGatewayConfig);
ldapView.ifPresent(ldap -> saveLdapPillar(ldap, servicePillar));
saveSssdAdPillar(servicePillar, kerberosConfig);
servicePillar.putAll(saveSssdIpaPillar(kerberosConfig, serviceLocations, stack.getEnvironmentCrn()));
Map<String, Map<String, String>> mountPathMap = stack.getInstanceGroups().stream().flatMap(group -> group.getInstanceMetaDataSet().stream().filter(instanceMetaData -> instanceMetaData.getDiscoveryFQDN() != null).collect(Collectors.toMap(InstanceMetaData::getDiscoveryFQDN, node -> Map.of("mount_path", getMountPath(group), "cloud_platform", stack.getCloudPlatform(), "temporary_storage", group.getTemplate().getTemporaryStorage().name()), (l, r) -> Map.of("mount_path", getMountPath(group), "cloud_platform", stack.getCloudPlatform(), "temporary_storage", group.getTemplate().getTemporaryStorage().name()))).entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
servicePillar.put("startup", new SaltPillarProperties("/mount/startup.sls", singletonMap("mount", mountPathMap)));
proxyConfigProvider.decoratePillarWithProxyDataIfNeeded(servicePillar, cluster);
decoratePillarWithJdbcConnectors(cluster, servicePillar);
return new SaltConfig(servicePillar, grainsProperties);
}
use of com.sequenceiq.cloudbreak.domain.stack.Stack in project cloudbreak by hortonworks.
the class AmbariClusterResetService method resetCluster.
public void resetCluster(Long stackId) throws CloudbreakOrchestratorException {
Stack stack = stackService.getByIdWithListsInTransaction(stackId);
InstanceMetaData gatewayInstance = stack.getPrimaryGatewayInstance();
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, gatewayInstance, stack.getCluster().hasGateway());
Set<String> gatewayFQDN = Collections.singleton(gatewayInstance.getDiscoveryFQDN());
ExitCriteriaModel exitCriteriaModel = clusterDeletionBasedModel(stack.getId(), stack.getCluster().getId());
hostOrchestrator.resetClusterManager(gatewayConfig, gatewayFQDN, stackUtil.collectNodes(stack), exitCriteriaModel);
}
Aggregations