use of com.sequenceiq.flow.core.FlowLogService in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlowsWithTerminationFlowShouldBeDistributed.
@Test
public void testOneNodeTakesAllFlowsWithTerminationFlowShouldBeDistributed() {
List<Node> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000L);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
Node node = clusterNodes.get(i);
node.setLastUpdated(50_000L);
}
when(nodeService.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTimeMillis()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
node1FlowLogs.forEach(fl -> fl.setFlowType(ClassValue.of(HelloWorldFlowConfig.class)));
List<String> suspendedFlows = node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList());
when(flowLogService.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
node2FlowLogs.forEach(fl -> fl.setFlowType(ClassValue.of(HelloWorldFlowConfig.class)));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogService.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs);
Map<Node, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogService.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
List<Long> stackIds = myNewFlowLogs.stream().map(FlowLog::getResourceId).distinct().collect(Collectors.toList());
when(haApplication.getDeletingResources(anySet())).thenReturn(Set.of(stackIds.get(0), stackIds.get(2)));
doReturn(Collections.singletonList(HelloWorldFlowConfig.class)).when(applicationFlowInformation).getTerminationFlow();
heartbeatService.scheduledFlowDistribution();
verify(flowLogService).saveAll(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5L, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
use of com.sequenceiq.flow.core.FlowLogService in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlowsWithInvalidFlows.
@Test
public void testOneNodeTakesAllFlowsWithInvalidFlows() {
List<Node> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000L);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
Node node = clusterNodes.get(i);
node.setLastUpdated(50_000L);
}
when(nodeService.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTimeMillis()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
List<String> suspendedFlows = node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList());
when(flowLogService.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogService.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs);
Map<Node, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogService.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
List<Long> stackIds = myNewFlowLogs.stream().map(FlowLog::getResourceId).distinct().collect(Collectors.toList());
when(haApplication.getDeletingResources(anySet())).thenReturn(Set.of(stackIds.get(0), stackIds.get(2)));
doReturn(Collections.singletonList(HelloWorldFlowConfig.class)).when(applicationFlowInformation).getTerminationFlow();
List<FlowLog> invalidFlowLogs = myNewFlowLogs.stream().filter(fl -> fl.getResourceId().equals(stackIds.get(0)) || fl.getResourceId().equals(stackIds.get(2))).collect(Collectors.toList());
heartbeatService.scheduledFlowDistribution();
verify(flowLogService).saveAll(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
if (invalidFlowLogs.contains(updatedFlow)) {
assertEquals(StateStatus.SUCCESSFUL, updatedFlow.getStateStatus());
assertNull(updatedFlow.getCloudbreakNodeId());
} else {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5L, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
use of com.sequenceiq.flow.core.FlowLogService in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlows.
@Test
public void testOneNodeTakesAllFlows() {
List<Node> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000L);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
Node node = clusterNodes.get(i);
node.setLastUpdated(50_000L);
}
when(nodeService.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTimeMillis()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
List<String> suspendedFlows = node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList());
when(flowLogService.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogService.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs);
Map<Node, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogService.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
heartbeatService.scheduledFlowDistribution();
verify(flowLogService).saveAll(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5L, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
use of com.sequenceiq.flow.core.FlowLogService in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlowsWithCleanup.
@Test
public void testOneNodeTakesAllFlowsWithCleanup() {
List<Node> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000L);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
Node node = clusterNodes.get(i);
node.setLastUpdated(50_000L);
}
when(nodeService.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTimeMillis()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
List<String> suspendedFlows = node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList());
when(flowLogService.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs)).thenReturn(Collections.emptySet());
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogService.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs).thenReturn(Collections.emptySet());
Map<Node, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogService.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
heartbeatService.scheduledFlowDistribution();
verify(flowLogService).saveAll(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5L, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
Aggregations