use of com.sequenceiq.cloudbreak.domain.HostMetadata in project cloudbreak by hortonworks.
the class AmbariDecommissionerTest method getHostMetadata.
private HostMetadata getHostMetadata(String hostname2, HostMetadataState state) {
HostMetadata healhtyNode = new HostMetadata();
healhtyNode.setHostName(hostname2);
healhtyNode.setHostMetadataState(state);
return healhtyNode;
}
use of com.sequenceiq.cloudbreak.domain.HostMetadata in project cloudbreak by hortonworks.
the class AmbariDecommissionerTest method testSelectNodesWhenHasOneUnhealthyNodeButNotInAscendingList.
@Test
public void testSelectNodesWhenHasOneUnhealthyNodeButNotInAscendingList() {
String hostname1 = "10.0.0.1";
String hostname2 = "10.0.0.2";
String hostname3 = "10.0.0.3";
HostMetadata unhealhtyNode1 = getHostMetadata(hostname1, HostMetadataState.UNHEALTHY);
HostMetadata healhtyNode1 = getHostMetadata(hostname2, HostMetadataState.HEALTHY);
HostMetadata healhtyNode2 = getHostMetadata(hostname3, HostMetadataState.HEALTHY);
List<HostMetadata> nodes = Arrays.asList(unhealhtyNode1, healhtyNode1, healhtyNode2);
Map<String, Long> ascendingNodes = new LinkedHashMap<>();
ascendingNodes.put(hostname2, 110L);
ascendingNodes.put(hostname3, 120L);
Map<String, Long> selectedNodes = underTest.selectNodes(ascendingNodes, nodes, 1);
Assert.assertEquals(1, selectedNodes.size());
Assert.assertTrue(selectedNodes.keySet().contains(hostname2));
}
use of com.sequenceiq.cloudbreak.domain.HostMetadata in project cloudbreak by hortonworks.
the class AmbariDecommissionerTest method testSelectNodesWhenHasThreeUnhealthyNodeAndShouldSelectTwo.
@Test
public void testSelectNodesWhenHasThreeUnhealthyNodeAndShouldSelectTwo() {
String hostname1 = "10.0.0.1";
String hostname2 = "10.0.0.2";
String hostname3 = "10.0.0.3";
String hostname4 = "10.0.0.4";
String hostname5 = "10.0.0.5";
HostMetadata unhealhtyNode1 = getHostMetadata(hostname1, HostMetadataState.UNHEALTHY);
HostMetadata unhealhtyNode2 = getHostMetadata(hostname2, HostMetadataState.UNHEALTHY);
HostMetadata unhealhtyNode3 = getHostMetadata(hostname3, HostMetadataState.UNHEALTHY);
HostMetadata healhtyNode1 = getHostMetadata(hostname4, HostMetadataState.HEALTHY);
HostMetadata healhtyNode2 = getHostMetadata(hostname5, HostMetadataState.HEALTHY);
List<HostMetadata> nodes = Arrays.asList(unhealhtyNode1, unhealhtyNode2, unhealhtyNode3, healhtyNode1, healhtyNode2);
Map<String, Long> ascendingNodes = new LinkedHashMap<>();
ascendingNodes.put(hostname1, 100L);
ascendingNodes.put(hostname2, 110L);
ascendingNodes.put(hostname3, 120L);
ascendingNodes.put(hostname4, 130L);
ascendingNodes.put(hostname5, 140L);
Map<String, Long> selectedNodes = underTest.selectNodes(ascendingNodes, nodes, 2);
Assert.assertEquals(2, selectedNodes.size());
Assert.assertTrue(selectedNodes.keySet().containsAll(Arrays.asList(hostname1, hostname2)));
}
use of com.sequenceiq.cloudbreak.domain.HostMetadata in project cloudbreak by hortonworks.
the class AmbariClusterHostServiceTypeTest method testUpdateHostsForDownscaleCannotGoBelowReplication.
@Test
public void testUpdateHostsForDownscaleCannotGoBelowReplication() throws CloudbreakSecuritySetupException {
HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
json.setHostGroup("slave_1");
json.setScalingAdjustment(-1);
AmbariClient ambariClient = mock(AmbariClient.class);
HostMetadata metadata1 = mock(HostMetadata.class);
HostMetadata metadata2 = mock(HostMetadata.class);
HostMetadata metadata3 = mock(HostMetadata.class);
Set<HostMetadata> hostsMetaData = new HashSet<>();
List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
hostsMetaData.addAll(hostsMetadataList);
HostGroup hostGroup = new HostGroup();
hostGroup.setHostMetadata(hostsMetaData);
hostGroup.setName("slave_1");
when(ambariClientProvider.getAmbariClient(any(HttpClientConfig.class), anyInt(), any(Cluster.class))).thenReturn(ambariClient);
when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")).thenReturn(singletonMap("DATANODE", "SLAVE"));
when(hostGroupService.getByClusterIdAndName(anyLong(), anyString())).thenReturn(hostGroup);
when(statusToPollGroupConverter.convert(any(Status.class))).thenReturn(PollGroup.POLLABLE);
underTest.updateHosts(stack.getId(), json);
verify(flowManager, times(1)).triggerClusterDownscale(stack.getId(), json);
verify(blueprintValidator, times(1)).validateHostGroupScalingRequest(stack.getCluster().getBlueprint(), hostGroup, json.getScalingAdjustment());
}
use of com.sequenceiq.cloudbreak.domain.HostMetadata in project cloudbreak by hortonworks.
the class AmbariClusterHostServiceTypeTest method testUpdateHostsForDownscaleFilterOneHost.
@Test
public void testUpdateHostsForDownscaleFilterOneHost() throws CloudbreakSecuritySetupException {
HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
json.setHostGroup("slave_1");
json.setScalingAdjustment(-1);
AmbariClient ambariClient = mock(AmbariClient.class);
HostMetadata metadata1 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
HostMetadata metadata2 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
HostMetadata metadata3 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
HostMetadata metadata4 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData4 = mock(InstanceMetaData.class);
Set<HostMetadata> hostsMetaData = new HashSet<>(asList(metadata1, metadata2, metadata3, metadata4));
HostGroup hostGroup = new HostGroup();
hostGroup.setHostMetadata(hostsMetaData);
hostGroup.setName("slave_1");
Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
dfsSpace.put("node2", singletonMap(85_000L, 15_000L));
dfsSpace.put("node1", singletonMap(90_000L, 10_000L));
dfsSpace.put("node3", singletonMap(80_000L, 20_000L));
dfsSpace.put("node4", singletonMap(80_000L, 11_000L));
when(metadata1.getHostName()).thenReturn("node1");
when(metadata2.getHostName()).thenReturn("node2");
when(metadata3.getHostName()).thenReturn("node3");
when(metadata4.getHostName()).thenReturn("node4");
when(instanceMetaData1.getAmbariServer()).thenReturn(false);
when(instanceMetaData2.getAmbariServer()).thenReturn(false);
when(instanceMetaData3.getAmbariServer()).thenReturn(false);
when(instanceMetaData4.getAmbariServer()).thenReturn(false);
when(ambariClientProvider.getAmbariClient(any(HttpClientConfig.class), anyInt(), any(Cluster.class))).thenReturn(ambariClient);
when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")).thenReturn(singletonMap("DATANODE", "SLAVE"));
when(ambariClient.getBlueprintMap(cluster.getBlueprint().getAmbariName())).thenReturn(singletonMap("slave_1", singletonList("DATANODE")));
when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")).thenReturn(instanceMetaData1);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")).thenReturn(instanceMetaData2);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")).thenReturn(instanceMetaData3);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node4")).thenReturn(instanceMetaData4);
when(hostGroupService.getByClusterIdAndName(anyLong(), anyString())).thenReturn(hostGroup);
when(statusToPollGroupConverter.convert(any(Status.class))).thenReturn(PollGroup.POLLABLE);
underTest.updateHosts(stack.getId(), json);
verify(flowManager, times(1)).triggerClusterDownscale(stack.getId(), json);
verify(blueprintValidator, times(1)).validateHostGroupScalingRequest(stack.getCluster().getBlueprint(), hostGroup, json.getScalingAdjustment());
}
Aggregations