use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.StackV4Response in project cloudbreak by hortonworks.
the class ProvisionerService method waitCloudbreakClusterDeletion.
public void waitCloudbreakClusterDeletion(Long id, PollingConfig pollingConfig) {
SdxCluster sdxCluster = sdxService.getById(id);
AtomicInteger deleteFailedCount = new AtomicInteger(1);
Polling.waitPeriodly(pollingConfig.getSleepTime(), pollingConfig.getSleepTimeUnit()).stopIfException(pollingConfig.getStopPollingIfExceptionOccurred()).stopAfterDelay(pollingConfig.getDuration(), pollingConfig.getDurationTimeUnit()).run(() -> {
LOGGER.info("Deletion polling cloudbreak for stack status: '{}' in '{}' env", sdxCluster.getClusterName(), sdxCluster.getEnvName());
try {
StackV4Response stackV4Response = ThreadBasedUserCrnProvider.doAsInternalActor(regionAwareInternalCrnGeneratorFactory.iam().getInternalCrnForServiceAsString(), () -> stackV4Endpoint.get(0L, sdxCluster.getClusterName(), Collections.emptySet(), sdxCluster.getAccountId()));
LOGGER.info("Stack status of SDX {} by response from cloudbreak: {}", sdxCluster.getClusterName(), stackV4Response.getStatus().name());
LOGGER.debug("Response from cloudbreak: {}", JsonUtil.writeValueAsString(stackV4Response));
ClusterV4Response cluster = stackV4Response.getCluster();
if (cluster != null) {
if (StatusKind.PROGRESS.equals(cluster.getStatus().getStatusKind())) {
return AttemptResults.justContinue();
}
if (Status.DELETE_FAILED.equals(cluster.getStatus())) {
// if it is implemented, please remove this
if (deleteFailedCount.getAndIncrement() >= DELETE_FAILED_RETRY_COUNT) {
LOGGER.error("Cluster deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getCluster().getStatusReason());
return AttemptResults.breakFor("Data Lake deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getCluster().getStatusReason());
} else {
return AttemptResults.justContinue();
}
}
}
if (Status.DELETE_FAILED.equals(stackV4Response.getStatus())) {
// if it is implemented, please remove this
if (deleteFailedCount.getAndIncrement() >= DELETE_FAILED_RETRY_COUNT) {
LOGGER.error("Stack deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getStatusReason());
return AttemptResults.breakFor("Data Lake deletion failed '" + sdxCluster.getClusterName() + "', " + stackV4Response.getStatusReason());
} else {
return AttemptResults.justContinue();
}
} else {
return AttemptResults.justContinue();
}
} catch (NotFoundException e) {
return AttemptResults.finishWith(null);
}
});
sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.STACK_DELETED, "Datalake stack deleted", sdxCluster);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.StackV4Response in project cloudbreak by hortonworks.
the class ProvisionerService method startStackProvisioning.
public void startStackProvisioning(Long id, DetailedEnvironmentResponse environment) {
SdxCluster sdxCluster = sdxService.getById(id);
LOGGER.info("Call cloudbreak with stackrequest");
try {
stackRequestManifester.configureStackForSdxCluster(sdxCluster, environment);
StackV4Request stackV4Request = JsonUtil.readValue(sdxCluster.getStackRequestToCloudbreak(), StackV4Request.class);
Optional.ofNullable(sdxCluster.getDatabaseCrn()).ifPresent(crn -> {
stackV4Request.getCluster().setDatabaseServerCrn(crn);
sdxCluster.setStackRequestToCloudbreak(JsonUtil.writeValueAsStringSilent(stackV4Request));
});
StackV4Response stackV4Response;
try {
stackV4Response = ThreadBasedUserCrnProvider.doAsInternalActor(regionAwareInternalCrnGeneratorFactory.iam().getInternalCrnForServiceAsString(), () -> stackV4Endpoint.getByCrn(0L, sdxCluster.getCrn(), null));
} catch (NotFoundException e) {
LOGGER.info("Stack does not exist on cloudbreak side, POST new cluster: {}", sdxCluster.getClusterName(), e);
String initiatorUserCrn = ThreadBasedUserCrnProvider.getUserCrn();
stackV4Response = ThreadBasedUserCrnProvider.doAsInternalActor(regionAwareInternalCrnGeneratorFactory.iam().getInternalCrnForServiceAsString(), () -> stackV4Endpoint.postInternal(0L, stackV4Request, initiatorUserCrn));
}
sdxCluster.setStackId(stackV4Response.getId());
sdxCluster.setStackCrn(stackV4Response.getCrn());
sdxClusterRepository.save(sdxCluster);
cloudbreakFlowService.saveLastCloudbreakFlowChainId(sdxCluster, stackV4Response.getFlowIdentifier());
LOGGER.info("Sdx cluster updated");
} catch (WebApplicationException e) {
String errorMessage = webApplicationExceptionMessageExtractor.getErrorMessage(e);
LOGGER.info("Cannot start provisioning: {}", errorMessage, e);
throw new RuntimeException("Cannot start provisioning, error happened during the operation: " + errorMessage);
} catch (IOException e) {
LOGGER.info("Cannot parse stackrequest to json", e);
throw new RuntimeException("Cannot write stackrequest to json: " + e.getMessage());
}
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.StackV4Response in project cloudbreak by hortonworks.
the class MockStackResponseGenerator method getMockStackV4Response.
public static StackV4Response getMockStackV4Response(String clusterCrn, String hostGroup, String fqdnBase, int currentHostGroupCount, int unhealthyInstancesCount) {
List<InstanceGroupV4Response> instanceGroupV4Responses = new ArrayList<>();
InstanceMetaDataV4Response master1 = new InstanceMetaDataV4Response();
master1.setDiscoveryFQDN("master1");
master1.setInstanceId("test_instanceid" + "master1");
instanceGroupV4Responses.add(instanceGroup("master", awsTemplate(), Set.of(master1)));
InstanceMetaDataV4Response worker1 = new InstanceMetaDataV4Response();
worker1.setDiscoveryFQDN("worker1");
worker1.setInstanceId("test_instanceid" + "worker1");
InstanceMetaDataV4Response worker2 = new InstanceMetaDataV4Response();
worker2.setDiscoveryFQDN("worker2");
worker2.setInstanceId("test_instanceid" + "worker2");
instanceGroupV4Responses.add(instanceGroup("worker", awsTemplate(), Set.of(worker1, worker2)));
Set fqdnToInstanceIds = new HashSet();
for (int i = 1; i <= unhealthyInstancesCount; i++) {
InstanceMetaDataV4Response metadataResponse = new InstanceMetaDataV4Response();
metadataResponse.setDiscoveryFQDN(fqdnBase + i);
metadataResponse.setInstanceId("test_instanceid_" + hostGroup + i);
metadataResponse.setInstanceStatus(InstanceStatus.SERVICES_UNHEALTHY);
fqdnToInstanceIds.add(metadataResponse);
}
for (int i = 1; i <= currentHostGroupCount - unhealthyInstancesCount; i++) {
InstanceMetaDataV4Response metadata1 = new InstanceMetaDataV4Response();
metadata1.setDiscoveryFQDN(fqdnBase + (unhealthyInstancesCount + i));
metadata1.setInstanceId("test_instanceid_" + hostGroup + (unhealthyInstancesCount + i));
metadata1.setInstanceStatus(InstanceStatus.SERVICES_HEALTHY);
fqdnToInstanceIds.add(metadata1);
}
instanceGroupV4Responses.add(instanceGroup(hostGroup, awsTemplate(), fqdnToInstanceIds));
StackV4Response mockReponse = new StackV4Response();
mockReponse.setCrn(clusterCrn);
mockReponse.setInstanceGroups(instanceGroupV4Responses);
mockReponse.setCloudPlatform(CloudPlatform.AWS);
return mockReponse;
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.StackV4Response in project cloudbreak by hortonworks.
the class MetricTest method getStackResponse.
private StackV4Response getStackResponse(Status stackStatus, Status clusterStatus) {
StackV4Response stackResponse = new StackV4Response();
stackResponse.setStatus(stackStatus);
stackResponse.setCluster(new ClusterV4Response());
stackResponse.getCluster().setStatus(clusterStatus);
return stackResponse;
}
use of com.sequenceiq.cloudbreak.api.endpoint.v4.stacks.response.StackV4Response in project cloudbreak by hortonworks.
the class YarnLoadEvaluatorTest method captureScalingEvent.
private Optional<ScalingEvent> captureScalingEvent(int runningNodeHostGroupCount, int stoppedNodeHostGroupCount, int yarnUpscaleCount, int yarnDownscaleCount, boolean scalingEventExpected) throws Exception {
MockitoAnnotations.openMocks(this);
Cluster cluster = getARunningCluster();
cluster.setStopStartScalingEnabled(true);
String hostGroup = "compute";
StackV4Response stackV4Response = MockStackResponseGenerator.getMockStackV4ResponseWithStoppedAndRunningNodes(CLOUDBREAK_STACK_CRN, hostGroup, fqdnBase, runningNodeHostGroupCount, stoppedNodeHostGroupCount);
YarnScalingServiceV1Response upScale = getMockYarnScalingResponse(hostGroup, yarnUpscaleCount, yarnDownscaleCount);
setupMocks(cluster, upScale, stackV4Response);
underTest.setContext(new ClusterIdEvaluatorContext(AUTOSCALE_CLUSTER_ID));
underTest.execute();
Optional<ScalingEvent> scalingEventCaptured = Optional.empty();
if (scalingEventExpected) {
ArgumentCaptor<ScalingEvent> captor = ArgumentCaptor.forClass(ScalingEvent.class);
verify(eventPublisher).publishEvent(captor.capture());
scalingEventCaptured = Optional.of(captor.getValue());
}
return scalingEventCaptured;
}
Aggregations