use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.cluster.ClusterV4Response in project cloudbreak by hortonworks.
the class SdxUpgradeServiceTest method getStackV4Response.
private StackV4Response getStackV4Response() {
ClouderaManagerProductV4Response cdp = new ClouderaManagerProductV4Response();
cdp.setName("CDH");
cdp.setVersion("7.2.1-1.cdh7.2.0.p0.3758356");
ClouderaManagerProductV4Response cfm = new ClouderaManagerProductV4Response();
cfm.setName("CFM");
cfm.setVersion("2.0.0.0");
ClouderaManagerProductV4Response spark3 = new ClouderaManagerProductV4Response();
spark3.setName("SPARK3");
spark3.setVersion("3.0.0.2.99.7110.0-18-1.p0.3525631");
ClouderaManagerV4Response cm = new ClouderaManagerV4Response();
cm.setProducts(List.of(cdp, cfm, spark3));
ClusterV4Response clusterV4Response = new ClusterV4Response();
clusterV4Response.setCm(cm);
StackV4Response stackV4Response = new StackV4Response();
stackV4Response.setName("test-sdx-cluster");
stackV4Response.setCluster(clusterV4Response);
return stackV4Response;
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.cluster.ClusterV4Response in project cloudbreak by hortonworks.
the class SdxService method getCdpVersion.
private Optional<String> getCdpVersion(StackV4Response stack) {
String stackName = stack.getName();
ClusterV4Response cluster = stack.getCluster();
if (cluster != null) {
ClouderaManagerV4Response cm = cluster.getCm();
if (cm != null) {
LOGGER.info("Repository details are available for cluster: {}: {}", stackName, cm);
List<ClouderaManagerProductV4Response> products = cm.getProducts();
if (products != null && !products.isEmpty()) {
Optional<ClouderaManagerProductV4Response> cdpOpt = products.stream().filter(p -> "CDH".equals(p.getName())).findFirst();
if (cdpOpt.isPresent()) {
return getRuntimeVersionFromCdpVersion(cdpOpt.get().getVersion());
}
}
}
}
return Optional.empty();
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.cluster.ClusterV4Response in project cloudbreak by hortonworks.
the class ClusterToClusterV4ResponseConverterTest method testConvertWithoutMasterComponent.
@Test
public void testConvertWithoutMasterComponent() {
// GIVEN
given(proxyConfigDtoService.getByCrn(anyString())).willReturn(ProxyConfig.builder().withCrn("crn").withName("name").build());
// WHEN
ClusterV4Response result = underTest.convert(getSource());
// THEN
assertEquals(1L, (long) result.getId());
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.cluster.ClusterV4Response in project cloudbreak by hortonworks.
the class ClusterToClusterV4ResponseConverter method convertContainerConfig.
private void convertContainerConfig(Cluster source, ClusterV4Response clusterResponse) {
Json customContainerDefinition = source.getCustomContainerDefinition();
if (customContainerDefinition != null && StringUtils.isNotEmpty(customContainerDefinition.getValue())) {
try {
Map<String, String> map = customContainerDefinition.get(Map.class);
Map<String, String> result = new HashMap<>();
for (Entry<String, String> stringStringEntry : map.entrySet()) {
result.put(stringStringEntry.getKey(), stringStringEntry.getValue());
}
CustomContainerV4Response customContainers = new CustomContainerV4Response();
customContainers.setDefinitions(result);
clusterResponse.setCustomContainers(customContainers);
} catch (IOException e) {
LOGGER.info("Failed to add customContainerDefinition to response", e);
throw new CloudbreakApiException("Failed to add customContainerDefinition to response", e);
}
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.cluster.ClusterV4Response in project cloudbreak by hortonworks.
the class ProvisionerService method waitCloudbreakClusterDeletion.
public void waitCloudbreakClusterDeletion(Long id, PollingConfig pollingConfig) {
SdxCluster sdxCluster = sdxService.getById(id);
AtomicInteger deleteFailedCount = new AtomicInteger(1);
Polling.waitPeriodly(pollingConfig.getSleepTime(), pollingConfig.getSleepTimeUnit()).stopIfException(pollingConfig.getStopPollingIfExceptionOccurred()).stopAfterDelay(pollingConfig.getDuration(), pollingConfig.getDurationTimeUnit()).run(() -> {
LOGGER.info("Deletion polling cloudbreak for stack status: '{}' in '{}' env", sdxCluster.getClusterName(), sdxCluster.getEnvName());
try {
StackV4Response stackV4Response = ThreadBasedUserCrnProvider.doAsInternalActor(regionAwareInternalCrnGeneratorFactory.iam().getInternalCrnForServiceAsString(), () -> stackV4Endpoint.get(0L, sdxCluster.getClusterName(), Collections.emptySet(), sdxCluster.getAccountId()));
LOGGER.info("Stack status of SDX {} by response from cloudbreak: {}", sdxCluster.getClusterName(), stackV4Response.getStatus().name());
LOGGER.debug("Response from cloudbreak: {}", JsonUtil.writeValueAsString(stackV4Response));
ClusterV4Response cluster = stackV4Response.getCluster();
if (cluster != null) {
if (StatusKind.PROGRESS.equals(cluster.getStatus().getStatusKind())) {
return AttemptResults.justContinue();
}
if (Status.DELETE_FAILED.equals(cluster.getStatus())) {
// if it is implemented, please remove this
if (deleteFailedCount.getAndIncrement() >= DELETE_FAILED_RETRY_COUNT) {
LOGGER.error("Cluster deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getCluster().getStatusReason());
return AttemptResults.breakFor("Data Lake deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getCluster().getStatusReason());
} else {
return AttemptResults.justContinue();
}
}
}
if (Status.DELETE_FAILED.equals(stackV4Response.getStatus())) {
// if it is implemented, please remove this
if (deleteFailedCount.getAndIncrement() >= DELETE_FAILED_RETRY_COUNT) {
LOGGER.error("Stack deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getStatusReason());
return AttemptResults.breakFor("Data Lake deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getStatusReason());
} else {
return AttemptResults.justContinue();
}
} else {
return AttemptResults.justContinue();
}
} catch (NotFoundException e) {
return AttemptResults.finishWith(null);
}
});
sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.STACK_DELETED, "Datalake stack deleted", sdxCluster);
}
Aggregations