use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method getMetricsWithLocality.
private ClusterMetrics getMetricsWithLocality(ServerName serverName, byte[] region, float locality) {
RegionMetrics regionMetrics = mock(RegionMetrics.class);
when(regionMetrics.getDataLocality()).thenReturn(locality);
Map<byte[], RegionMetrics> regionMetricsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
regionMetricsMap.put(region, regionMetrics);
ServerMetrics serverMetrics = mock(ServerMetrics.class);
when(serverMetrics.getRegionMetrics()).thenReturn(regionMetricsMap);
Map<ServerName, ServerMetrics> serverMetricsMap = new HashMap<>();
serverMetricsMap.put(serverName, serverMetrics);
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetricsMap);
return metrics;
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testMapHostNameToServerName.
@Test
public void testMapHostNameToServerName() throws Exception {
assertTrue(finder.mapHostNameToServerName(null).isEmpty());
List<String> hosts = new ArrayList<>();
for (int i = 0; i < 10; i += 2) {
hosts.add("host-" + i);
}
assertTrue(finder.mapHostNameToServerName(hosts).isEmpty());
Map<ServerName, ServerMetrics> serverMetrics = new HashMap<>();
for (int i = 0; i < 10; i += 2) {
ServerName sn = ServerName.valueOf("host-" + i, 12345, 12345);
serverMetrics.put(sn, null);
}
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetrics);
finder.setClusterMetrics(metrics);
List<ServerName> sns = finder.mapHostNameToServerName(hosts);
assertEquals(5, sns.size());
for (int i = 0; i < 5; i++) {
ServerName sn = sns.get(i);
assertEquals("host-" + (2 * i), sn.getHostname());
assertEquals(12345, sn.getPort());
assertEquals(12345, sn.getStartcode());
}
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testGetTopBlockLocations.
@Test
public void testGetTopBlockLocations() {
Map<ServerName, ServerMetrics> serverMetrics = new HashMap<>();
for (int i = 0; i < 10; i++) {
ServerName sn = ServerName.valueOf("host-" + i, 12345, 12345);
serverMetrics.put(sn, null);
}
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetrics);
finder.setClusterMetrics(metrics);
for (RegionInfo region : REGIONS) {
List<ServerName> servers = finder.getTopBlockLocations(region);
long previousWeight = Long.MAX_VALUE;
HDFSBlocksDistribution hbd = generate(region);
for (ServerName server : servers) {
long weight = hbd.getWeight(server.getHostname());
assertTrue(weight <= previousWeight);
previousWeight = weight;
}
}
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestStochasticLoadBalancer method testKeepRegionLoad.
@Test
public void testKeepRegionLoad() throws Exception {
ServerName sn = ServerName.valueOf("test:8080", 100);
int numClusterStatusToAdd = 20000;
for (int i = 0; i < numClusterStatusToAdd; i++) {
ServerMetrics sl = mock(ServerMetrics.class);
RegionMetrics rl = mock(RegionMetrics.class);
when(rl.getReadRequestCount()).thenReturn(0L);
when(rl.getCpRequestCount()).thenReturn(0L);
when(rl.getWriteRequestCount()).thenReturn(0L);
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
when(rl.getStoreFileSize()).thenReturn(new Size(i, Size.Unit.MEGABYTE));
Map<byte[], RegionMetrics> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl);
when(sl.getRegionMetrics()).thenReturn(regionLoadMap);
ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
serverMetricsMap.put(sn, sl);
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
loadBalancer.updateClusterMetrics(clusterStatus);
}
String regionNameAsString = RegionInfo.getRegionNameAsString(Bytes.toBytes(REGION_KEY));
assertTrue(loadBalancer.loads.get(regionNameAsString) != null);
assertTrue(loadBalancer.loads.get(regionNameAsString).size() == 15);
Queue<BalancerRegionLoad> loads = loadBalancer.loads.get(regionNameAsString);
int i = 0;
while (loads.size() > 0) {
BalancerRegionLoad rl = loads.remove();
assertEquals(i + (numClusterStatusToAdd - 15), rl.getStorefileSizeMB());
i++;
}
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TopScreenModel method refreshRecords.
private void refreshRecords(ClusterMetrics clusterMetrics) {
List<Record> records = currentMode.getRecords(clusterMetrics, pushDownFilters);
// Filter and sort
records = records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))).sorted((recordLeft, recordRight) -> {
FieldValue left = recordLeft.get(currentSortField);
FieldValue right = recordRight.get(currentSortField);
return (ascendingSort ? 1 : -1) * left.compareTo(right);
}).collect(Collectors.toList());
this.records = Collections.unmodifiableList(records);
}
Aggregations