use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class KerberosBlueprintServiceTest method testExtendBlueprintWithKerberosOnKerberizedBlueprint.
@Test
public void testExtendBlueprintWithKerberosOnKerberizedBlueprint() throws IOException {
String expectedBlueprint = FileReaderUtils.readFileFromClasspath("blueprints-jackson/bp-kerberized-test.bp");
Blueprint blueprint = TestUtil.blueprint("name", expectedBlueprint);
Stack stack = TestUtil.stack();
Cluster cluster = TestUtil.cluster(blueprint, stack, 1L, TestUtil.kerberosConfig());
BlueprintPreparationObject object = BlueprintPreparationObject.Builder.builder().withKerberosConfig(cluster.getKerberosConfig()).withGeneralClusterConfigs(BlueprintTestUtil.generalClusterConfigs()).build();
BlueprintTextProcessor b = new BlueprintTextProcessor(blueprint.getBlueprintText());
String actualBlueprint = underTest.customTextManipulation(object, b).asText();
JsonNode expectedNode = JsonUtil.readTree(expectedBlueprint);
JsonNode resultNode = JsonUtil.readTree(actualBlueprint);
Assert.assertEquals(expectedNode, resultNode);
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class KerberosBlueprintServiceTest method testExtendBlueprintWithKerberosCustomConfig.
@Test
public void testExtendBlueprintWithKerberosCustomConfig() throws IOException {
String blueprintText = FileReaderUtils.readFileFromClasspath("blueprints-jackson/bp-not-kerberized.bp");
Blueprint blueprint = TestUtil.blueprint("name", blueprintText);
Stack stack = TestUtil.stack();
KerberosConfig kerberosConfig = new KerberosConfig();
kerberosConfig.setDescriptor("{\"kerberos-env\":{\"properties\":{\"install_packages\":false,\"realm\":\"REALM.BP\",\"kdc_type\":\"mit-kdc\"," + "\"kdc_hosts\":\"kdc_host.bp\",\"admin_server_host\":\"admin_server_host.bp\",\"encryption_types\":\"enc_types.bp\",\"ldap_url\":\"\"," + "\"container_dn\":\"\"}}}");
kerberosConfig.setKrb5Conf("{\"krb5-conf\":{\"properties\":{\"domains\":\".domains.bp\",\"manage_krb5_conf\":\"true\",\"content\":\"content.bp\"}}}");
kerberosConfig.setTcpAllowed(true);
Cluster cluster = TestUtil.cluster(blueprint, stack, 1L, kerberosConfig);
BlueprintPreparationObject object = BlueprintPreparationObject.Builder.builder().withKerberosConfig(cluster.getKerberosConfig()).withGeneralClusterConfigs(BlueprintTestUtil.generalClusterConfigs()).build();
BlueprintTextProcessor b = new BlueprintTextProcessor(blueprint.getBlueprintText());
String actualBlueprint = underTest.customTextManipulation(object, b).asText();
String expectedBlueprint = FileReaderUtils.readFileFromClasspath("blueprints-jackson/bp-not-kerberized-custom-config-expected.bp");
JsonNode expectedNode = JsonUtil.readTree(expectedBlueprint);
JsonNode resultNode = JsonUtil.readTree(actualBlueprint);
Assert.assertEquals(expectedNode, resultNode);
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class ClusterHostServiceRunner method changePrimaryGateway.
public String changePrimaryGateway(Stack stack) throws CloudbreakException {
GatewayConfig formerPrimaryGatewayConfig = gatewayConfigService.getPrimaryGatewayConfig(stack);
List<GatewayConfig> gatewayConfigs = gatewayConfigService.getAllGatewayConfigs(stack);
Optional<GatewayConfig> newPrimaryCandidate = gatewayConfigs.stream().filter(gc -> !gc.isPrimary()).findFirst();
if (newPrimaryCandidate.isPresent()) {
GatewayConfig newPrimary = newPrimaryCandidate.get();
Set<Node> allNodes = collectNodes(stack);
try {
hostOrchestratorResolver.get(stack.getOrchestrator().getType()).changePrimaryGateway(formerPrimaryGatewayConfig, newPrimary, gatewayConfigs, allNodes, clusterDeletionBasedModel(stack.getId(), stack.getCluster().getId()));
return newPrimary.getHostname();
} catch (CloudbreakOrchestratorException ex) {
throw new CloudbreakException(ex);
}
} else {
throw new CloudbreakException("Primary gateway change is not possible because there is no available node for the action");
}
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class ClusterHostServiceRunner method saveSharedRangerService.
private void saveSharedRangerService(Stack stack, Map<String, SaltPillarProperties> servicePillar) {
Long datalakeId = stack.getDatalakeId();
if (datalakeId != null) {
Stack dataLakeStack = stackRepository.findOne(datalakeId);
Cluster dataLakeCluster = dataLakeStack.getCluster();
Set<String> groupNames = blueprintProcessorFactory.get(dataLakeCluster.getBlueprint().getBlueprintText()).getHostGroupsWithComponent("RANGER_ADMIN");
List<HostGroup> groups = dataLakeCluster.getHostGroups().stream().filter(hg -> groupNames.contains(hg.getName())).collect(Collectors.toList());
Set<String> hostNames = new HashSet<>();
groups.forEach(hg -> hostNames.addAll(hg.getHostMetadata().stream().map(HostMetadata::getHostName).collect(Collectors.toList())));
Map<String, Object> rangerMap = new HashMap<>();
rangerMap.put("servers", hostNames);
rangerMap.put("port", "6080");
servicePillar.put("datalake-services", new SaltPillarProperties("/datalake/init.sls", singletonMap("datalake-services", singletonMap("ranger", rangerMap))));
}
}
use of com.sequenceiq.cloudbreak.domain.Stack in project cloudbreak by hortonworks.
the class ClusterCredentialChangeHandler method accept.
@Override
public void accept(Event<ClusterCredentialChangeRequest> event) {
ClusterCredentialChangeRequest request = event.getData();
ClusterCredentialChangeResult result;
try {
Stack stack = stackService.getByIdWithLists(request.getStackId());
switch(request.getType()) {
case REPLACE:
ambariClusterConnector.replaceUserNamePassword(stack, request.getUser(), request.getPassword());
break;
case UPDATE:
ambariClusterConnector.updateUserNamePassword(stack, request.getPassword());
break;
default:
throw new UnsupportedOperationException("Ambari credential update request not supported: " + request.getType());
}
result = new ClusterCredentialChangeResult(request);
} catch (Exception e) {
result = new ClusterCredentialChangeResult(e.getMessage(), e, request);
}
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
}
Aggregations