use of com.sequenceiq.cloudbreak.domain.CloudbreakNode in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testDistributionConcurrency.
@Test
public void testDistributionConcurrency() {
List<CloudbreakNode> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000);
// failed node
clusterNodes.get(1).setLastUpdated(50_000);
// active node
clusterNodes.get(2).setLastUpdated(200_000);
when(cloudbreakNodeRepository.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTime()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<String> suspendedFlows = new ArrayList<>();
List<FlowLog> node1FlowLogs = getFlowLogs(3, 5000);
suspendedFlows.addAll(node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Map<CloudbreakNode, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(2)));
distribution.computeIfAbsent(clusterNodes.get(2), v -> new ArrayList<>()).addAll(Collections.singletonList(suspendedFlows.get(1)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs.stream().filter(fl -> fl.getFlowId().equalsIgnoreCase(suspendedFlows.get(0))).collect(Collectors.toList()));
myNewFlowLogs.addAll(node1FlowLogs.stream().filter(fl -> fl.getFlowId().equalsIgnoreCase(suspendedFlows.get(2))).collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
when(flowLogRepository.save(anyCollection())).thenThrow(new OptimisticLockingFailureException("Someone already distributed the flows.."));
heartbeatService.scheduledFlowDistribution();
verify(flow2Handler, times(2)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(2, allFlowIds.size());
for (FlowLog flowLog : myNewFlowLogs) {
assertTrue(allFlowIds.contains(flowLog.getFlowId()));
}
}
use of com.sequenceiq.cloudbreak.domain.CloudbreakNode in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlowsWithInvalidFlows.
@Test
public void testOneNodeTakesAllFlowsWithInvalidFlows() {
List<CloudbreakNode> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
CloudbreakNode node = clusterNodes.get(i);
node.setLastUpdated(50_000);
}
when(cloudbreakNodeRepository.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTime()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<String> suspendedFlows = new ArrayList<>();
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
suspendedFlows.addAll(node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs);
Map<CloudbreakNode, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogRepository.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
List<Long> stackIds = myNewFlowLogs.stream().map(FlowLog::getStackId).distinct().collect(Collectors.toList());
List<Object[]> statusResponse = new ArrayList<>();
statusResponse.add(new Object[] { stackIds.get(0), Status.DELETE_IN_PROGRESS });
statusResponse.add(new Object[] { stackIds.get(2), Status.DELETE_IN_PROGRESS });
when(stackRepository.findStackStatuses(any())).thenReturn(statusResponse);
List<FlowLog> invalidFlowLogs = myNewFlowLogs.stream().filter(fl -> fl.getStackId().equals(stackIds.get(0)) || fl.getStackId().equals(stackIds.get(2))).collect(Collectors.toList());
heartbeatService.scheduledFlowDistribution();
verify(flowLogRepository).save(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
if (invalidFlowLogs.contains(updatedFlow)) {
assertTrue(updatedFlow.getFinalized());
assertEquals(null, updatedFlow.getCloudbreakNodeId());
} else {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
use of com.sequenceiq.cloudbreak.domain.CloudbreakNode in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method getClusterNodes.
private List<CloudbreakNode> getClusterNodes() {
List<CloudbreakNode> nodes = new ArrayList<>();
nodes.add(new CloudbreakNode(MY_ID));
nodes.add(new CloudbreakNode(NODE_1_ID));
nodes.add(new CloudbreakNode(NODE_2_ID));
return nodes;
}
use of com.sequenceiq.cloudbreak.domain.CloudbreakNode in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlowsWithTerminationFlowShouldBeDistributed.
@Test
public void testOneNodeTakesAllFlowsWithTerminationFlowShouldBeDistributed() {
List<CloudbreakNode> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
CloudbreakNode node = clusterNodes.get(i);
node.setLastUpdated(50_000);
}
when(cloudbreakNodeRepository.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTime()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<String> suspendedFlows = new ArrayList<>();
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
node1FlowLogs.forEach(fl -> fl.setFlowType(StackTerminationFlowConfig.class));
suspendedFlows.addAll(node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
node2FlowLogs.forEach(fl -> fl.setFlowType(StackTerminationFlowConfig.class));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs);
Map<CloudbreakNode, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogRepository.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
List<Long> stackIds = myNewFlowLogs.stream().map(FlowLog::getStackId).distinct().collect(Collectors.toList());
List<Object[]> statusResponse = new ArrayList<>();
statusResponse.add(new Object[] { stackIds.get(0), Status.DELETE_IN_PROGRESS });
statusResponse.add(new Object[] { stackIds.get(2), Status.DELETE_IN_PROGRESS });
when(stackRepository.findStackStatuses(any())).thenReturn(statusResponse);
heartbeatService.scheduledFlowDistribution();
verify(flowLogRepository).save(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
use of com.sequenceiq.cloudbreak.domain.CloudbreakNode in project cloudbreak by hortonworks.
the class HeartbeatServiceTest method testOneNodeTakesAllFlows.
@Test
public void testOneNodeTakesAllFlows() {
List<CloudbreakNode> clusterNodes = getClusterNodes();
// myself
clusterNodes.get(0).setLastUpdated(200_000);
// set all nodes to failed except myself
for (int i = 1; i < clusterNodes.size(); i++) {
CloudbreakNode node = clusterNodes.get(i);
node.setLastUpdated(50_000);
}
when(cloudbreakNodeRepository.findAll()).thenReturn(clusterNodes);
when(clock.getCurrentTime()).thenReturn(200_000L);
// all flows that need to be re-distributed
List<String> suspendedFlows = new ArrayList<>();
List<FlowLog> node1FlowLogs = getFlowLogs(2, 5000);
suspendedFlows.addAll(node1FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_1_ID)).thenReturn(new HashSet<>(node1FlowLogs));
Set<FlowLog> node2FlowLogs = new HashSet<>(getFlowLogs(3, 3000));
suspendedFlows.addAll(node2FlowLogs.stream().map(FlowLog::getFlowId).distinct().collect(Collectors.toList()));
when(flowLogRepository.findAllByCloudbreakNodeId(NODE_2_ID)).thenReturn(node2FlowLogs);
Map<CloudbreakNode, List<String>> distribution = new HashMap<>();
distribution.computeIfAbsent(clusterNodes.get(0), v -> new ArrayList<>()).addAll(Arrays.asList(suspendedFlows.get(0), suspendedFlows.get(1), suspendedFlows.get(2), suspendedFlows.get(3), suspendedFlows.get(4)));
when(flowDistributor.distribute(any(), any())).thenReturn(distribution);
Set<FlowLog> myNewFlowLogs = new HashSet<>();
myNewFlowLogs.addAll(node1FlowLogs);
myNewFlowLogs.addAll(node2FlowLogs);
when(flowLogRepository.findAllByCloudbreakNodeId(MY_ID)).thenReturn(myNewFlowLogs);
when(runningFlows.get(any())).thenReturn(null);
heartbeatService.scheduledFlowDistribution();
verify(flowLogRepository).save(flowLogListCaptor.capture());
List<FlowLog> updatedFlows = flowLogListCaptor.getValue();
assertEquals(myNewFlowLogs.size(), updatedFlows.size());
for (FlowLog updatedFlow : updatedFlows) {
assertEquals(MY_ID, updatedFlow.getCloudbreakNodeId());
}
verify(flow2Handler, times(5)).restartFlow(stringCaptor.capture());
List<String> allFlowIds = stringCaptor.getAllValues();
assertEquals(5, allFlowIds.size());
for (String flowId : suspendedFlows) {
assertTrue(allFlowIds.contains(flowId));
}
}
Aggregations