use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testGetTopBlockLocations.
@Test
public void testGetTopBlockLocations() {
Map<ServerName, ServerMetrics> serverMetrics = new HashMap<>();
for (int i = 0; i < 10; i++) {
ServerName sn = ServerName.valueOf("host-" + i, 12345, 12345);
serverMetrics.put(sn, null);
}
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetrics);
finder.setClusterMetrics(metrics);
for (RegionInfo region : REGIONS) {
List<ServerName> servers = finder.getTopBlockLocations(region);
long previousWeight = Long.MAX_VALUE;
HDFSBlocksDistribution hbd = generate(region);
for (ServerName server : servers) {
long weight = hbd.getWeight(server.getHostname());
assertTrue(weight <= previousWeight);
previousWeight = weight;
}
}
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestStochasticLoadBalancer method testKeepRegionLoad.
@Test
public void testKeepRegionLoad() throws Exception {
ServerName sn = ServerName.valueOf("test:8080", 100);
int numClusterStatusToAdd = 20000;
for (int i = 0; i < numClusterStatusToAdd; i++) {
ServerMetrics sl = mock(ServerMetrics.class);
RegionMetrics rl = mock(RegionMetrics.class);
when(rl.getReadRequestCount()).thenReturn(0L);
when(rl.getCpRequestCount()).thenReturn(0L);
when(rl.getWriteRequestCount()).thenReturn(0L);
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
when(rl.getStoreFileSize()).thenReturn(new Size(i, Size.Unit.MEGABYTE));
Map<byte[], RegionMetrics> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl);
when(sl.getRegionMetrics()).thenReturn(regionLoadMap);
ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
serverMetricsMap.put(sn, sl);
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
loadBalancer.updateClusterMetrics(clusterStatus);
}
String regionNameAsString = RegionInfo.getRegionNameAsString(Bytes.toBytes(REGION_KEY));
assertTrue(loadBalancer.loads.get(regionNameAsString) != null);
assertTrue(loadBalancer.loads.get(regionNameAsString).size() == 15);
Queue<BalancerRegionLoad> loads = loadBalancer.loads.get(regionNameAsString);
int i = 0;
while (loads.size() > 0) {
BalancerRegionLoad rl = loads.remove();
assertEquals(i + (numClusterStatusToAdd - 15), rl.getStorefileSizeMB());
i++;
}
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class RegionServerModeStrategy method getRecords.
@Override
public List<Record> getRecords(ClusterMetrics clusterMetrics, List<RecordFilter> pushDownFilters) {
// Get records from RegionModeStrategy and add REGION_COUNT field
List<Record> records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT);
// Aggregation by LONG_REGION_SERVER field
Map<String, Record> retMap = ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream().collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r));
// Add USED_HEAP_SIZE field and MAX_HEAP_SIZE field
for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) {
Record record = retMap.get(sm.getServerName().getServerName());
if (record == null) {
continue;
}
Record newRecord = Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()).put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build();
retMap.put(sm.getServerName().getServerName(), newRecord);
}
return new ArrayList<>(retMap.values());
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method getClusterMetrics.
private static ClusterMetrics getClusterMetrics(int noOfLiveServer) {
ClusterMetrics clusterMetrics = new ClusterMetrics() {
@Nullable
@Override
public String getHBaseVersion() {
return null;
}
@Override
public List<ServerName> getDeadServerNames() {
return null;
}
@Override
public Map<ServerName, ServerMetrics> getLiveServerMetrics() {
Map<ServerName, ServerMetrics> liveServerMetrics = new HashMap<>();
for (int i = 0; i < noOfLiveServer; i++) {
ServerName serverName = ServerName.valueOf("rs_" + i, 16010, 12345);
liveServerMetrics.put(serverName, TestRegionsRecoveryChore.getServerMetrics(i + 3));
}
return liveServerMetrics;
}
@Nullable
@Override
public ServerName getMasterName() {
return null;
}
@Override
public List<ServerName> getBackupMasterNames() {
return null;
}
@Override
public List<RegionState> getRegionStatesInTransition() {
return null;
}
@Nullable
@Override
public String getClusterId() {
return null;
}
@Override
public List<String> getMasterCoprocessorNames() {
return null;
}
@Nullable
@Override
public Boolean getBalancerOn() {
return null;
}
@Override
public int getMasterInfoPort() {
return 0;
}
@Override
public List<ServerName> getServersName() {
return null;
}
@Override
public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
return null;
}
@Override
public List<ServerTask> getMasterTasks() {
return null;
}
};
return clusterMetrics;
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method testRegionReopensWithoutStoreRefConfig.
@Test
public void testRegionReopensWithoutStoreRefConfig() throws Exception {
regionNo = 0;
ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(10);
final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
LOG.debug("All Region Names with refCount....");
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
}
}
Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
for (byte[] regionName : REGION_NAME_LIST) {
Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
}
Stoppable stoppable = new StoppableImplementation();
Configuration configuration = getCustomConf();
configuration.unset("hbase.regions.recovery.store.file.ref.count");
regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
regionsRecoveryChore.chore();
// Verify that by default the feature is turned off so no regions
// should be reopened
Mockito.verify(hMaster, Mockito.times(0)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
// default maxCompactedStoreFileRefCount is -1 (no regions to be reopened using AM)
Mockito.verify(hMaster, Mockito.times(0)).getAssignmentManager();
Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any());
}
Aggregations