use of com.sequenceiq.cloudbreak.event.ResourceEvent in project cloudbreak by hortonworks.
the class ClusterOperationService method updateFailedNodes.
private void updateFailedNodes(Cluster cluster, Map<InstanceMetaData, Optional<String>> failedHostMetadata) throws TransactionExecutionException {
if (!failedHostMetadata.isEmpty()) {
Map<String, Optional<String>> hostNamesWithReason = failedHostMetadata.entrySet().stream().filter(e -> e.getKey().getDiscoveryFQDN() != null).collect(Collectors.toMap(e -> e.getKey().getDiscoveryFQDN(), e -> e.getValue()));
Set<InstanceStatus> expectedStates = Set.of(SERVICES_HEALTHY, SERVICES_RUNNING);
InstanceStatus newState = SERVICES_UNHEALTHY;
ResourceEvent clusterEvent = CLUSTER_FAILED_NODES_REPORTED_CLUSTER_EVENT;
ResourceEvent hostEvent = CLUSTER_FAILED_NODES_REPORTED_HOST_EVENT;
updateChangedHosts(cluster, hostNamesWithReason, expectedStates, newState, clusterEvent, Optional.of(hostEvent));
}
}
use of com.sequenceiq.cloudbreak.event.ResourceEvent in project cloudbreak by hortonworks.
the class ClusterOperationService method updateNewHealthyNodes.
private void updateNewHealthyNodes(Cluster cluster, Set<String> newHealthyNodes) throws TransactionExecutionException {
if (!newHealthyNodes.isEmpty()) {
Map<String, Optional<String>> hostNamesWithReason = newHealthyNodes.stream().collect(Collectors.toMap(host -> host, host -> Optional.empty()));
Set<InstanceStatus> expectedStates = EnumSet.of(SERVICES_UNHEALTHY, SERVICES_RUNNING, DECOMMISSION_FAILED, FAILED);
InstanceStatus newState = SERVICES_HEALTHY;
ResourceEvent clusterEvent = CLUSTER_RECOVERED_NODES_REPORTED_CLUSTER_EVENT;
updateChangedHosts(cluster, hostNamesWithReason, expectedStates, newState, clusterEvent, Optional.empty());
}
}
use of com.sequenceiq.cloudbreak.event.ResourceEvent in project cloudbreak by hortonworks.
the class RecoveryTeardownServiceTest method testTeardownFinished.
@Test
public void testTeardownFinished() {
TerminateStackResult terminateStackResult = new TerminateStackResult(STACK_ID);
ArgumentCaptor<ResourceEvent> captor = ArgumentCaptor.forClass(ResourceEvent.class);
when(stackTerminationContext.getStack()).thenReturn(stack);
when(stack.getId()).thenReturn(STACK_ID);
underTest.handleRecoveryTeardownSuccess(stackTerminationContext, terminateStackResult);
verifyNoInteractions(stackUpdater);
verify(terminationService).finalizeRecoveryTeardown(STACK_ID);
verify(metricService).incrementMetricCounter(MetricType.STACK_RECOVERY_TEARDOWN_SUCCESSFUL, stack);
verify(flowMessageService).fireEventAndLog(eq(STACK_ID), eq(Status.DELETE_COMPLETED.name()), captor.capture());
assertEquals(DATALAKE_RECOVERY_TEARDOWN_FINISHED, captor.getValue());
}
use of com.sequenceiq.cloudbreak.event.ResourceEvent in project cloudbreak by hortonworks.
the class RecoveryTeardownServiceTest method testTeardownFailure.
@Test
public void testTeardownFailure() {
ArgumentCaptor<ResourceEvent> captor = ArgumentCaptor.forClass(ResourceEvent.class);
Exception exception = new Exception(ERROR_MESSAGE);
String stackUpdateMessage = "Recovery failed: " + exception.getMessage();
StackView stackView = getStackView();
underTest.handleRecoveryTeardownError(stackView, exception);
verify(stackUpdater).updateStackStatus(STACK_ID, CLUSTER_RECOVERY_FAILED, stackUpdateMessage);
verify(metricService).incrementMetricCounter(MetricType.STACK_RECOVERY_TEARDOWN_FAILED, stackView, exception);
verify(flowMessageService).fireEventAndLog(eq(STACK_ID), eq(CLUSTER_RECOVERY_FAILED.name()), captor.capture(), eq(stackUpdateMessage));
assertEquals(DATALAKE_RECOVERY_FAILED, captor.getValue());
}
use of com.sequenceiq.cloudbreak.event.ResourceEvent in project cloudbreak by hortonworks.
the class EnvClustersDeleteActions method datalakeClustersDeleteAction.
@Bean(name = "DATALAKE_CLUSTERS_DELETE_STARTED_STATE")
public Action<?, ?> datalakeClustersDeleteAction() {
return new AbstractEnvClustersDeleteAction<>(EnvDeleteEvent.class) {
@Override
protected void doExecute(CommonContext context, EnvDeleteEvent payload, Map<Object, Object> variables) {
EnvironmentStatus environmentStatus = EnvironmentStatus.DATALAKE_CLUSTERS_DELETE_IN_PROGRESS;
ResourceEvent resourceEvent = ResourceEvent.ENVIRONMENT_DATALAKE_CLUSTERS_DELETION_STARTED;
EnvClustersDeleteState envClustersDeleteState = EnvClustersDeleteState.DATALAKE_CLUSTERS_DELETE_STARTED_STATE;
String logDeleteState = "Data Lake clusters";
EnvironmentDeletionDto envDto = commonUpdateEnvironmentAndNotify(context, payload, environmentStatus, resourceEvent, envClustersDeleteState, logDeleteState);
sendEvent(context, DELETE_DATALAKE_CLUSTERS_EVENT.selector(), envDto);
}
};
}
Aggregations