use of com.sequenceiq.cloudbreak.domain.json.Json in project cloudbreak by hortonworks.
the class SmartSenseConfigProvider method getSmartSenseServerConfigs.
private Collection<? extends BlueprintConfigurationEntry> getSmartSenseServerConfigs(BlueprintPreparationObject source, String smartSenseId) {
Collection<BlueprintConfigurationEntry> configs = new ArrayList<>();
configs.add(new BlueprintConfigurationEntry(SMART_SENSE_SERVER_CONFIG_FILE, "customer.account.name", "Hortonworks_Cloud_HDP"));
configs.add(new BlueprintConfigurationEntry(SMART_SENSE_SERVER_CONFIG_FILE, "customer.notification.email", "aws-marketplace@hortonworks.com"));
String clusterName = getClusterName(source);
configs.add(new BlueprintConfigurationEntry(SMART_SENSE_SERVER_CONFIG_FILE, "cluster.name", clusterName));
configs.add(new BlueprintConfigurationEntry(SMART_SENSE_SERVER_CONFIG_FILE, "customer.smartsense.id", smartSenseId));
HSTMetadataInstanceInfoJson instanceInfoJson = new HSTMetadataInstanceInfoJson(source.getFlexSubscription().isPresent() ? source.getFlexSubscription().get().getSubscriptionId() : "", clusterName, source.getGeneralClusterConfigs().getUuid(), cloudbreakNodeConfig.getInstanceUUID());
HSTMetadataJson productInfo = new HSTMetadataJson(clustersComponentId, instanceInfoJson, productId, cbVersion);
try {
Json productInfoJson = new Json(productInfo);
configs.add(new BlueprintConfigurationEntry(SMART_SENSE_PRODUCT_INFO_FILE, "product-info-content", productInfoJson.getValue()));
} catch (JsonProcessingException ignored) {
LOGGER.error("The 'product-info-content' SmartSense config could not be added to the Blueprint.");
}
return configs;
}
use of com.sequenceiq.cloudbreak.domain.json.Json in project cloudbreak by hortonworks.
the class BlueprintTemplateProcessorTest method testMustacheGeneratorWithSimpleUseCase.
@Test
public void testMustacheGeneratorWithSimpleUseCase() throws Exception {
String testBlueprint = FileReaderUtils.readFileFromClasspath("blueprints-jackson/bp-mustache-test.bp");
Cluster cluster = cluster();
BlueprintStackInfo blueprintStackInfo = new BlueprintStackInfo("hdp", "2.4");
GeneralClusterConfigs generalClusterConfigs = generalClusterConfigs();
generalClusterConfigs.setClusterName("dummyCluster");
generalClusterConfigs.setStackName("dummyCluster");
Map<String, Object> properties = new HashMap<>();
properties.put("S3_BUCKET", "testbucket");
BlueprintPreparationObject blueprintPreparationObject = BlueprintPreparationObject.Builder.builder().withRdsConfigs(cluster.getRdsConfigs()).withGateway(cluster.getGateway()).withLdapConfig(cluster.getLdapConfig()).withGeneralClusterConfigs(generalClusterConfigs).withBlueprintView(new BlueprintView(testBlueprint, new Json(properties), blueprintStackInfo.getVersion(), blueprintStackInfo.getType())).build();
String result = underTest.process(testBlueprint, blueprintPreparationObject, Maps.newHashMap());
assertTrue(result.contains("testbucket"));
assertTrue(result.contains("{{ zookeeper_quorum }}"));
assertTrue(result.contains("{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}"));
assertTrue(result.contains(cluster.getName()));
assertTrue(result.contains("jdbc:postgresql://10.1.1.1:5432/ranger"));
assertTrue(result.contains("cn=users,dc=example,dc=org"));
assertTrue(result.contains("ldap://localhost:389"));
}
use of com.sequenceiq.cloudbreak.domain.json.Json in project cloudbreak by hortonworks.
the class BlueprintTemplateProcessorTest method cluster.
private Cluster cluster() {
Cluster cluster = TestUtil.cluster();
Set<RDSConfig> rdsConfigSet = new HashSet<>();
rdsConfigSet.add(rdsConfig(RdsType.DRUID.name().toLowerCase()));
RDSConfig hiveRds = rdsConfig(RdsType.HIVE.name().toLowerCase());
rdsConfigSet.add(hiveRds);
rdsConfigSet.add(rdsConfig(RdsType.RANGER.name().toLowerCase()));
cluster.setRdsConfigs(rdsConfigSet);
Map<String, String> inputs = new HashMap<>();
inputs.put("S3_BUCKET", "testbucket");
try {
cluster.setBlueprintInputs(new Json(inputs));
} catch (JsonProcessingException ignored) {
cluster.setBlueprintInputs(null);
}
return cluster;
}
use of com.sequenceiq.cloudbreak.domain.json.Json in project cloudbreak by hortonworks.
the class ClusterCreationSetupService method determineHDPRepoConfig.
private ClusterComponent determineHDPRepoConfig(Blueprint blueprint, long stackId, Optional<Component> stackHdpRepoConfig, ClusterRequest request, Cluster cluster, IdentityUser user, Optional<Component> stackImageComponent) throws JsonProcessingException {
Json stackRepoDetailsJson;
if (!stackHdpRepoConfig.isPresent()) {
AmbariStackDetailsJson ambariStackDetails = request.getAmbariStackDetails();
if (ambariStackDetails != null) {
setOsTypeFromImageIfMissing(cluster, stackImageComponent, ambariStackDetails);
StackRepoDetails stackRepoDetails = conversionService.convert(ambariStackDetails, StackRepoDetails.class);
stackRepoDetailsJson = new Json(stackRepoDetails);
} else {
StackRepoDetails stackRepoDetails = SerializationUtils.clone(defaultHDPInfo(blueprint, request, user).getRepo());
Optional<String> vdfUrl = getVDFUrlByOsType(stackId, stackRepoDetails);
vdfUrl.ifPresent(s -> stackRepoDetails.getStack().put(CUSTOM_VDF_REPO_KEY, s));
stackRepoDetailsJson = new Json(stackRepoDetails);
}
} else {
stackRepoDetailsJson = stackHdpRepoConfig.get().getAttributes();
}
return new ClusterComponent(ComponentType.HDP_REPO_DETAILS, stackRepoDetailsJson, cluster);
}
use of com.sequenceiq.cloudbreak.domain.json.Json in project cloudbreak by hortonworks.
the class AmbariClusterService method upgrade.
@Override
public void upgrade(Long stackId, AmbariRepo ambariRepoUpgrade) {
if (ambariRepoUpgrade != null) {
Stack stack = stackService.getByIdWithLists(stackId);
Cluster cluster = clusterRepository.findById(stack.getCluster().getId());
if (cluster == null) {
throw new BadRequestException(String.format("Cluster does not exist on stack with '%s' id.", stackId));
}
if (!stack.isAvailable()) {
throw new BadRequestException(String.format("Stack '%s' is currently in '%s' state. Upgrade requests to a cluster can only be made if the underlying stack is 'AVAILABLE'.", stackId, stack.getStatus()));
}
if (!cluster.isAvailable()) {
throw new BadRequestException(String.format("Cluster '%s' is currently in '%s' state. Upgrade requests to a cluster can only be made if the underlying stack is 'AVAILABLE'.", stackId, stack.getStatus()));
}
AmbariRepo ambariRepo = clusterComponentConfigProvider.getAmbariRepo(cluster.getId());
if (ambariRepo == null) {
try {
clusterComponentConfigProvider.store(new ClusterComponent(ComponentType.AMBARI_REPO_DETAILS, new Json(ambariRepoUpgrade), stack.getCluster()));
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("Ambari repo details cannot be saved. %s", ambariRepoUpgrade));
}
} else {
ClusterComponent component = clusterComponentConfigProvider.getComponent(cluster.getId(), ComponentType.AMBARI_REPO_DETAILS);
ambariRepo.setBaseUrl(ambariRepoUpgrade.getBaseUrl());
ambariRepo.setGpgKeyUrl(ambariRepoUpgrade.getGpgKeyUrl());
ambariRepo.setPredefined(false);
ambariRepo.setVersion(ambariRepoUpgrade.getVersion());
try {
component.setAttributes(new Json(ambariRepo));
clusterComponentConfigProvider.store(component);
} catch (JsonProcessingException ignored) {
throw new BadRequestException(String.format("Ambari repo details cannot be saved. %s", ambariRepoUpgrade));
}
}
try {
flowManager.triggerClusterUpgrade(stack.getId());
} catch (RuntimeException e) {
throw new CloudbreakServiceException(e);
}
}
}
Aggregations