use of com.sequenceiq.cloudbreak.domain.stack.cluster.gateway.Gateway in project cloudbreak by hortonworks.
the class TestUtil method gatewayEnabledWithExposedKnoxServices.
public static Gateway gatewayEnabledWithExposedKnoxServices(String... exposedKnoxServices) {
Gateway gateway = gatewayEnabled();
GatewayTopology gatewayTopology = new GatewayTopology();
gatewayTopology.setExposedServices(new Json(exposedKnoxServices));
gateway.setTopologies(Sets.newHashSet(gatewayTopology));
return gateway;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.gateway.Gateway in project cloudbreak by hortonworks.
the class OpdbServiceEndpointCollectorTest method testPrepareClusterExposedServices.
@Test
public void testPrepareClusterExposedServices() {
Cluster cluster = createClusterWithComponents(GatewayType.INDIVIDUAL);
cluster.getGateway().setGatewayPort(443);
mockTemplateComponents();
mockComponentLocator(Lists.newArrayList("10.0.0.1"));
Map<String, Collection<ClusterExposedServiceV4Response>> clusterExposedServicesMap = underTest.prepareClusterExposedServices(cluster, "10.0.0.1");
assertEquals(clusterExposedServicesMap.toString(), 2L, clusterExposedServicesMap.keySet().size());
Collection<ClusterExposedServiceV4Response> proxyServices = clusterExposedServicesMap.get("proxy");
Collection<ClusterExposedServiceV4Response> proxyApiServices = clusterExposedServicesMap.get("proxy-api");
assertNotNull("Topology proxy services was null", proxyServices);
assertNotNull("Topology proxy API services was null", proxyApiServices);
Set<String> proxyServiceNames = proxyServices.stream().map(ClusterExposedServiceV4Response::getKnoxService).collect(Collectors.toSet());
Set<String> proxyApiServiceNames = proxyApiServices.stream().map(ClusterExposedServiceV4Response::getKnoxService).collect(Collectors.toSet());
assertEquals(proxyServiceNames.toString(), 2, proxyServiceNames.size());
assertEquals(proxyApiServiceNames.toString(), 4, proxyApiServiceNames.size());
assertEquals(new HashSet<>(Arrays.asList("CM-UI", "HBASEUI")), proxyServiceNames);
assertEquals(new HashSet<>(Arrays.asList("CM-API", "HBASEJARS", "WEBHBASE", "AVATICA")), proxyApiServiceNames);
Optional<ClusterExposedServiceV4Response> hbasejars = proxyApiServices.stream().filter(service -> service.getKnoxService().equals("HBASEJARS")).findFirst();
Optional<ClusterExposedServiceV4Response> avatica = proxyApiServices.stream().filter(service -> service.getKnoxService().equals("AVATICA")).findFirst();
assertTrue(hbasejars.isPresent());
assertTrue(avatica.isPresent());
assertEquals("https://10.0.0.1/gateway-path/proxy-api/hbase/jars", hbasejars.get().getServiceUrl());
assertEquals("https://10.0.0.1/gateway-path/proxy-api/avatica/", avatica.get().getServiceUrl());
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.gateway.Gateway in project cloudbreak by hortonworks.
the class OpdbServiceEndpointCollectorTest method clusterWithOrchestrator.
private Cluster clusterWithOrchestrator(String orchestratorType) {
Cluster cluster = new Cluster();
Stack stack = new Stack();
Orchestrator orchestrator = new Orchestrator();
orchestrator.setType(orchestratorType);
Gateway gateway = new Gateway();
gateway.setPath(GATEWAY_PATH);
stack.setOrchestrator(orchestrator);
cluster.setStack(stack);
cluster.setGateway(gateway);
Blueprint blueprint = new Blueprint();
try {
String testBlueprint = FileReaderUtils.readFileFromClasspath("/defaults/blueprints/7.2.10/cdp-opdb.bp");
blueprint.setBlueprintText(testBlueprint);
} catch (IOException e) {
throw new RuntimeException(e);
}
cluster.setBlueprint(blueprint);
return cluster;
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.gateway.Gateway in project cloudbreak by hortonworks.
the class ServiceEndpointCollectorTest method testPrepareClusterExposedServices.
@Test
public void testPrepareClusterExposedServices() {
Cluster cluster = createClusterWithComponents(new ExposedService[] { exposedService("ATLAS") }, new ExposedService[] { exposedService("HIVE_SERVER"), exposedService("WEBHDFS") }, GatewayType.INDIVIDUAL);
cluster.getGateway().setGatewayPort(443);
mockBlueprintTextProcessor();
mockComponentLocator(Lists.newArrayList("10.0.0.1"));
Map<String, Collection<ClusterExposedServiceV4Response>> clusterExposedServicesMap = underTest.prepareClusterExposedServices(cluster, "10.0.0.1");
assertEquals(4L, clusterExposedServicesMap.keySet().size());
Collection<ClusterExposedServiceV4Response> topology2ClusterExposedServiceV4Responses = clusterExposedServicesMap.get("topology2");
Optional<ClusterExposedServiceV4Response> webHDFS = topology2ClusterExposedServiceV4Responses.stream().filter(service -> "WEBHDFS".equals(service.getKnoxService())).findFirst();
if (webHDFS.isPresent()) {
assertEquals("https://10.0.0.1/gateway-path/topology2/webhdfs/v1", webHDFS.get().getServiceUrl());
assertEquals("WEBHDFS", webHDFS.get().getKnoxService());
assertEquals("WebHDFS", webHDFS.get().getDisplayName());
assertEquals("NAMENODE", webHDFS.get().getServiceName());
assertTrue(webHDFS.get().isOpen());
}
Optional<ClusterExposedServiceV4Response> sparkHistoryUI = topology2ClusterExposedServiceV4Responses.stream().filter(service -> "SPARKHISTORYUI".equals(service.getKnoxService())).findFirst();
if (sparkHistoryUI.isPresent()) {
assertEquals("https://10.0.0.1/gateway-path/topology2/sparkhistory/", sparkHistoryUI.get().getServiceUrl());
assertEquals("SPARKHISTORYUI", sparkHistoryUI.get().getKnoxService());
assertEquals("Spark 1.x History Server", sparkHistoryUI.get().getDisplayName());
assertEquals("SPARK_YARN_HISTORY_SERVER", sparkHistoryUI.get().getServiceName());
assertFalse(sparkHistoryUI.get().isOpen());
}
Optional<ClusterExposedServiceV4Response> hiveServer = topology2ClusterExposedServiceV4Responses.stream().filter(service -> "HIVE".equals(service.getKnoxService())).findFirst();
if (hiveServer.isPresent()) {
assertEquals("jdbc:hive2://10.0.0.1/;ssl=true;sslTrustStore=/cert/gateway.jks;trustStorePassword=${GATEWAY_JKS_PASSWORD};" + "transportMode=http;httpPath=gateway-path/topology2/hive", hiveServer.get().getServiceUrl());
assertEquals("HIVE", hiveServer.get().getKnoxService());
assertEquals("Hive Server", hiveServer.get().getDisplayName());
assertEquals("HIVE_SERVER", hiveServer.get().getServiceName());
assertTrue(hiveServer.get().isOpen());
}
}
use of com.sequenceiq.cloudbreak.domain.stack.cluster.gateway.Gateway in project cloudbreak by hortonworks.
the class StackToTemplatePreparationObjectConverter method convert.
public TemplatePreparationObject convert(Stack source) {
try {
Map<String, Collection<ClusterExposedServiceView>> views = serviceEndpointCollector.prepareClusterExposedServicesViews(source.getCluster(), stackUtil.extractClusterManagerAddress(source));
DetailedEnvironmentResponse environment = environmentClientService.getByCrn(source.getEnvironmentCrn());
Credential credential = credentialConverter.convert(environment.getCredential());
Cluster cluster = clusterService.getById(source.getCluster().getId());
FileSystem fileSystem = cluster.getFileSystem();
Optional<LdapView> ldapView = ldapConfigService.get(source.getEnvironmentCrn(), source.getName());
ClouderaManagerRepo cm = clusterComponentConfigProvider.getClouderaManagerRepoDetails(cluster.getId());
List<ClouderaManagerProduct> products = clusterComponentConfigProvider.getClouderaManagerProductDetails(cluster.getId());
BaseFileSystemConfigurationsView fileSystemConfigurationView = getFileSystemConfigurationView(credential, source, fileSystem);
updateFileSystemViewWithBackupLocation(environment, fileSystemConfigurationView);
StackInputs stackInputs = getStackInputs(source);
Map<String, Object> fixInputs = stackInputs.getFixInputs() == null ? new HashMap<>() : stackInputs.getFixInputs();
fixInputs.putAll(stackInputs.getDatalakeInputs() == null ? new HashMap<>() : stackInputs.getDatalakeInputs());
Gateway gateway = cluster.getGateway();
String gatewaySignKey = null;
if (gateway != null) {
gatewaySignKey = gateway.getSignKey();
}
IdBroker idbroker = idBrokerService.getByCluster(cluster);
if (idbroker == null) {
idbroker = idBrokerConverterUtil.generateIdBrokerSignKeys(cluster);
idBrokerService.save(idbroker);
}
String envCrnForVirtualGroups = getEnvironmentCrnForVirtualGroups(environment);
VirtualGroupRequest virtualGroupRequest = new VirtualGroupRequest(envCrnForVirtualGroups, ldapView.map(LdapView::getAdminGroup).orElse(""));
String accountId = Crn.safeFromString(source.getResourceCrn()).getAccountId();
List<UserManagementProto.ServicePrincipalCloudIdentities> servicePrincipalCloudIdentities = grpcUmsClient.listServicePrincipalCloudIdentities(accountId, source.getEnvironmentCrn(), MDCUtils.getRequestId());
BlueprintView blueprintView = blueprintViewProvider.getBlueprintView(cluster.getBlueprint());
Optional<String> version = Optional.ofNullable(blueprintView.getVersion());
Builder builder = Builder.builder().withCloudPlatform(CloudPlatform.valueOf(source.getCloudPlatform())).withRdsConfigs(postgresConfigService.createRdsConfigIfNeeded(source, cluster)).withRdsSslCertificateFilePath(dbCertificateProvider.getSslCertsFilePath()).withGateway(gateway, gatewaySignKey, exposedServiceCollector.getAllKnoxExposed(version)).withIdBroker(idbroker).withCustomConfigurationsView(getCustomConfigurationsView(source, cluster)).withCustomInputs(stackInputs.getCustomInputs() == null ? new HashMap<>() : stackInputs.getCustomInputs()).withFixInputs(fixInputs).withBlueprintView(blueprintView).withFileSystemConfigurationView(fileSystemConfigurationView).withGeneralClusterConfigs(calculateGeneralClusterConfigs(source, cluster)).withLdapConfig(ldapView.orElse(null)).withKerberosConfig(kerberosConfigService.get(source.getEnvironmentCrn(), source.getName()).orElse(null)).withProductDetails(cm, products).withExposedServices(views).withDefaultTags(getStackTags(source)).withSharedServiceConfigs(datalakeService.createSharedServiceConfigsView(source)).withStackType(source.getType()).withVirtualGroupView(virtualGroupRequest);
transactionService.required(() -> {
builder.withHostgroups(hostGroupService.getByCluster(cluster.getId()));
});
decorateBuilderWithPlacement(source, builder);
decorateBuilderWithAccountMapping(source, environment, credential, builder, virtualGroupRequest);
decorateBuilderWithServicePrincipals(source, builder, servicePrincipalCloudIdentities);
decorateDatalakeView(source, builder);
return builder.build();
} catch (AccountTagValidationFailed aTVF) {
throw new CloudbreakServiceException(aTVF);
} catch (BlueprintProcessingException | IOException | TransactionService.TransactionExecutionException e) {
throw new CloudbreakServiceException(e.getMessage(), e);
}
}
Aggregations