use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class DiagnosticsSaltValidationHandlerTest method testDoAcceptOnError.
@Test
public void testDoAcceptOnError() throws CloudbreakOrchestratorFailedException {
// GIVEN
doThrow(new CloudbreakOrchestratorFailedException("ex")).when(diagnosticsOperationsService).applyUnresponsiveHosts(anyLong(), any());
// WHEN
DiagnosticsCollectionEvent event = new DiagnosticsCollectionEvent(SALT_VALIDATION_DIAGNOSTICS_EVENT.selector(), STACK_ID, "crn", new DiagnosticParameters());
DiagnosticsFlowException result = assertThrows(DiagnosticsFlowException.class, () -> underTest.doAccept(new HandlerEvent<>(new Event<>(event))));
// THEN
assertTrue(result.getMessage().contains("Error during diagnostics operation: Salt validation"));
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class CreateFullBackupHandlerTest method testBackupSuccessful.
@Test
public void testBackupSuccessful() throws CloudbreakOrchestratorFailedException {
Stack stack = mock(Stack.class);
when(stackService.getByIdWithListsInTransaction(2L)).thenReturn(stack);
Set<InstanceMetaData> metaDataSet = Set.of();
when(stack.getNotDeletedInstanceMetaDataSet()).thenReturn(metaDataSet);
Node node1 = createNode("node1");
Node node2 = createNode("node2");
Set<Node> nodes = Set.of(node1, node2);
when(nodeService.mapInstancesToNodes(metaDataSet)).thenReturn(nodes);
GatewayConfig gatewayConfig = mock(GatewayConfig.class);
when(gatewayConfigService.getPrimaryGatewayConfig(stack)).thenReturn(gatewayConfig);
StackEvent result = (StackEvent) underTest.doAccept(new HandlerEvent<>(new Event<>(new CreateFullBackupEvent(2L))));
ArgumentCaptor<OrchestratorStateParams> captor = ArgumentCaptor.forClass(OrchestratorStateParams.class);
verify(orchestrator, times(2)).runOrchestratorState(captor.capture());
List<OrchestratorStateParams> stateParams = captor.getAllValues();
assertThat(stateParams, everyItem(allOf(hasProperty("primaryGatewayConfig", is(gatewayConfig)), hasProperty("state", is("freeipa.backup-full")), hasProperty("allNodes", is(nodes)))));
assertThat(stateParams, hasItem(hasProperty("targetHostNames", allOf(hasItem("node1"), iterableWithSize(1)))));
assertThat(stateParams, hasItem(hasProperty("targetHostNames", allOf(hasItem("node2"), iterableWithSize(1)))));
assertEquals(2L, result.getResourceId());
assertEquals(FullBackupEvent.FULL_BACKUP_SUCCESSFUL_EVENT.event(), result.selector());
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class StopStartDownscaleDecommissionViaCMHandlerTest method testErrorFromCmCommission.
@Test
void testErrorFromCmCommission() {
int instancesToDecommissionCount = 5;
int expcetedInstanceToCollectCount = 5;
int expectedInstancesDecommissionedCount = 5;
List<InstanceMetaData> instancesToDecommission = getInstancesToDecommission(instancesToDecommissionCount);
HostGroup hostGroup = createHostGroup(instancesToDecommission);
Map<String, InstanceMetaData> collected = instancesToDecommission.stream().limit(expcetedInstanceToCollectCount).collect(Collectors.toMap(i -> i.getDiscoveryFQDN(), i -> i));
List<InstanceMetaData> decommissionedMetadataList = collected.values().stream().limit(expectedInstancesDecommissionedCount).collect(Collectors.toList());
Set<String> fqdnsDecommissioned = decommissionedMetadataList.stream().map(InstanceMetaData::getDiscoveryFQDN).collect(Collectors.toUnmodifiableSet());
Set<Long> instanceIdsToDecommission = instancesToDecommission.stream().map(InstanceMetaData::getPrivateId).collect(Collectors.toUnmodifiableSet());
Set<String> hostnamesToDecommission = instancesToDecommission.stream().map(InstanceMetaData::getDiscoveryFQDN).collect(Collectors.toUnmodifiableSet());
setupAdditionalMocks(hostGroup, instancesToDecommission, collected, fqdnsDecommissioned);
when(clusterDecomissionService.decommissionClusterNodesStopStart(eq(collected), anyLong())).thenThrow(new RuntimeException("decommissionHostsError"));
StopStartDownscaleDecommissionViaCMRequest request = new StopStartDownscaleDecommissionViaCMRequest(1L, INSTANCE_GROUP_NAME, instanceIdsToDecommission);
HandlerEvent handlerEvent = new HandlerEvent(Event.wrap(request));
Selectable selectable = underTest.doAccept(handlerEvent);
verify(clusterDecomissionService).collectHostsToRemove(eq(hostGroup), eq(hostnamesToDecommission));
verify(clusterDecomissionService).decommissionClusterNodesStopStart(eq(collected), anyLong());
assertThat(selectable).isInstanceOf(StopStartDownscaleDecommissionViaCMResult.class);
StopStartDownscaleDecommissionViaCMResult result = (StopStartDownscaleDecommissionViaCMResult) selectable;
assertThat(result.getNotDecommissionedHostFqdns()).hasSize(0);
assertThat(result.getDecommissionedHostFqdns()).hasSize(0);
assertThat(result.getErrorDetails().getMessage()).isEqualTo("decommissionHostsError");
assertThat(result.getStatus()).isEqualTo(EventStatus.FAILED);
assertThat(result.selector()).isEqualTo("STOPSTARTDOWNSCALEDECOMMISSIONVIACMRESULT_ERROR");
verifyNoMoreInteractions(instanceMetaDataService);
verifyNoMoreInteractions(flowMessageService);
verifyNoMoreInteractions(clusterDecomissionService);
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class StopStartDownscaleDecommissionViaCMHandlerTest method testCollectDecommissionCombinationsInternal.
private void testCollectDecommissionCombinationsInternal(int instancesToDecommissionCount, int expcetedInstanceToCollectCount, int expectedInstancesDecommissionedCount) {
List<InstanceMetaData> instancesToDecommission = getInstancesToDecommission(instancesToDecommissionCount);
HostGroup hostGroup = createHostGroup(instancesToDecommission);
Map<String, InstanceMetaData> collected = instancesToDecommission.stream().limit(expcetedInstanceToCollectCount).collect(Collectors.toMap(i -> i.getDiscoveryFQDN(), i -> i));
List<InstanceMetaData> decommissionedMetadataList = collected.values().stream().limit(expectedInstancesDecommissionedCount).collect(Collectors.toList());
Set<String> fqdnsDecommissioned = decommissionedMetadataList.stream().map(InstanceMetaData::getDiscoveryFQDN).collect(Collectors.toUnmodifiableSet());
setupAdditionalMocks(hostGroup, instancesToDecommission, collected, fqdnsDecommissioned);
Set<Long> instanceIdsToDecommission = instancesToDecommission.stream().map(InstanceMetaData::getPrivateId).collect(Collectors.toUnmodifiableSet());
Set<String> hostnamesToDecommission = instancesToDecommission.stream().map(InstanceMetaData::getDiscoveryFQDN).collect(Collectors.toUnmodifiableSet());
StopStartDownscaleDecommissionViaCMRequest request = new StopStartDownscaleDecommissionViaCMRequest(1L, INSTANCE_GROUP_NAME, instanceIdsToDecommission);
HandlerEvent handlerEvent = new HandlerEvent(Event.wrap(request));
Selectable selectable = underTest.doAccept(handlerEvent);
assertThat(selectable).isInstanceOf(StopStartDownscaleDecommissionViaCMResult.class);
StopStartDownscaleDecommissionViaCMResult result = (StopStartDownscaleDecommissionViaCMResult) selectable;
assertThat(result.getDecommissionedHostFqdns()).hasSize(expectedInstancesDecommissionedCount);
assertThat(result.getNotDecommissionedHostFqdns()).hasSize(instancesToDecommissionCount - expectedInstancesDecommissionedCount);
for (InstanceMetaData instanceMetaData : decommissionedMetadataList) {
verify(instanceMetaDataService).updateInstanceStatus(eq(instanceMetaData), eq(InstanceStatus.DECOMMISSIONED), anyString());
}
verifyNoMoreInteractions(instanceMetaDataService);
verify(clusterDecomissionService).collectHostsToRemove(eq(hostGroup), eq(hostnamesToDecommission));
verify(clusterDecomissionService).decommissionClusterNodesStopStart(eq(collected), anyLong());
verify(flowMessageService).fireEventAndLog(eq(STACK_ID), eq(UPDATE_IN_PROGRESS.name()), eq(CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTERINGCMMAINTMODE), eq(String.valueOf(fqdnsDecommissioned.size())));
verify(clusterDecomissionService).enterMaintenanceMode(eq(fqdnsDecommissioned));
verify(flowMessageService).fireEventAndLog(eq(STACK_ID), eq(UPDATE_IN_PROGRESS.name()), eq(CLUSTER_SCALING_STOPSTART_DOWNSCALE_ENTEREDCMMAINTMODE), eq(String.valueOf(fqdnsDecommissioned.size())));
verifyNoMoreInteractions(flowMessageService);
verifyNoMoreInteractions(clusterDecomissionService);
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class StopStartUpscaleCommissionViaCMHandlerTest method testCmHasMissingNodes.
@Test
void testCmHasMissingNodes() throws ClusterClientInitException {
int commissionInstanceCount = 5;
List<InstanceMetaData> instancesToCommission = createInstancesToCommission(commissionInstanceCount);
HostGroup hostGroup = createHostGroup(instancesToCommission);
Set<String> hostNames = instancesToCommission.stream().map(i -> i.getDiscoveryFQDN()).collect(Collectors.toSet());
Map<String, InstanceMetaData> cmAvailableHosts = instancesToCommission.stream().limit(commissionInstanceCount - 1).collect(Collectors.toMap(i -> i.getDiscoveryFQDN(), i -> i));
Set<String> recommissionedFqdns = cmAvailableHosts.keySet().stream().collect(Collectors.toUnmodifiableSet());
setupPerTestMocks(hostGroup, hostNames, cmAvailableHosts, recommissionedFqdns);
StopStartUpscaleCommissionViaCMRequest request = new StopStartUpscaleCommissionViaCMRequest(1L, INSTANCE_GROUP_NAME, instancesToCommission, Collections.emptyList());
HandlerEvent handlerEvent = new HandlerEvent(Event.wrap(request));
Selectable selectable = underTest.doAccept(handlerEvent);
assertThat(selectable).isInstanceOf(StopStartUpscaleCommissionViaCMResult.class);
StopStartUpscaleCommissionViaCMResult result = (StopStartUpscaleCommissionViaCMResult) selectable;
assertThat(result.getNotRecommissionedFqdns()).hasSize(1);
assertThat(result.getSuccessfullyCommissionedFqdns()).hasSize(commissionInstanceCount - 1);
}
Aggregations