use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class RegionServerModeStrategy method getRecords.
@Override
public List<Record> getRecords(ClusterMetrics clusterMetrics, List<RecordFilter> pushDownFilters) {
// Get records from RegionModeStrategy and add REGION_COUNT field
List<Record> records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT);
// Aggregation by LONG_REGION_SERVER field
Map<String, Record> retMap = ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream().collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r));
// Add USED_HEAP_SIZE field and MAX_HEAP_SIZE field
for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) {
Record record = retMap.get(sm.getServerName().getServerName());
if (record == null) {
continue;
}
Record newRecord = Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()).put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build();
retMap.put(sm.getServerName().getServerName(), newRecord);
}
return new ArrayList<>(retMap.values());
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method getClusterMetrics.
private static ClusterMetrics getClusterMetrics(int noOfLiveServer) {
ClusterMetrics clusterMetrics = new ClusterMetrics() {
@Nullable
@Override
public String getHBaseVersion() {
return null;
}
@Override
public List<ServerName> getDeadServerNames() {
return null;
}
@Override
public Map<ServerName, ServerMetrics> getLiveServerMetrics() {
Map<ServerName, ServerMetrics> liveServerMetrics = new HashMap<>();
for (int i = 0; i < noOfLiveServer; i++) {
ServerName serverName = ServerName.valueOf("rs_" + i, 16010, 12345);
liveServerMetrics.put(serverName, TestRegionsRecoveryChore.getServerMetrics(i + 3));
}
return liveServerMetrics;
}
@Nullable
@Override
public ServerName getMasterName() {
return null;
}
@Override
public List<ServerName> getBackupMasterNames() {
return null;
}
@Override
public List<RegionState> getRegionStatesInTransition() {
return null;
}
@Nullable
@Override
public String getClusterId() {
return null;
}
@Override
public List<String> getMasterCoprocessorNames() {
return null;
}
@Nullable
@Override
public Boolean getBalancerOn() {
return null;
}
@Override
public int getMasterInfoPort() {
return 0;
}
@Override
public List<ServerName> getServersName() {
return null;
}
@Override
public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
return null;
}
@Override
public List<ServerTask> getMasterTasks() {
return null;
}
};
return clusterMetrics;
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method testRegionReopensWithoutStoreRefConfig.
@Test
public void testRegionReopensWithoutStoreRefConfig() throws Exception {
regionNo = 0;
ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(10);
final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
LOG.debug("All Region Names with refCount....");
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
}
}
Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
for (byte[] regionName : REGION_NAME_LIST) {
Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
}
Stoppable stoppable = new StoppableImplementation();
Configuration configuration = getCustomConf();
configuration.unset("hbase.regions.recovery.store.file.ref.count");
regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
regionsRecoveryChore.chore();
// Verify that by default the feature is turned off so no regions
// should be reopened
Mockito.verify(hMaster, Mockito.times(0)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
// default maxCompactedStoreFileRefCount is -1 (no regions to be reopened using AM)
Mockito.verify(hMaster, Mockito.times(0)).getAssignmentManager();
Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any());
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method testRegionReopensWithLessThreshold.
@Test
public void testRegionReopensWithLessThreshold() throws Exception {
regionNo = 0;
ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4);
final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
LOG.debug("All Region Names with refCount....");
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
}
}
Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
for (byte[] regionName : REGION_NAME_LIST) {
Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
}
Stoppable stoppable = new StoppableImplementation();
Configuration configuration = getCustomConf();
configuration.setInt("hbase.regions.recovery.store.file.ref.count", 400);
regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
regionsRecoveryChore.chore();
// Verify that we need to reopen regions of only 1 table
Mockito.verify(hMaster, Mockito.times(1)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
Mockito.verify(hMaster, Mockito.times(1)).getClusterMetrics();
// Verify that we need to reopen only 1 region with refCount > 400
Mockito.verify(hMaster, Mockito.times(1)).getAssignmentManager();
Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any());
}
use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.
the class TestMasterShutdown method testMasterShutdown.
/**
* Simple test of shutdown.
* <p>
* Starts with three masters. Tells the active master to shutdown the cluster.
* Verifies that all masters are properly shutdown.
*/
@Test
public void testMasterShutdown() throws Exception {
// Create config to use for this cluster
Configuration conf = HBaseConfiguration.create();
// Start the cluster
try {
htu = new HBaseTestingUtil(conf);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(3).numRegionServers(1).numDataNodes(1).build();
final SingleProcessHBaseCluster cluster = htu.startMiniCluster(option);
// wait for all master thread to spawn and start their run loop.
final long thirtySeconds = TimeUnit.SECONDS.toMillis(30);
final long oneSecond = TimeUnit.SECONDS.toMillis(1);
assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> {
final List<MasterThread> masterThreads = cluster.getMasterThreads();
return masterThreads != null && masterThreads.size() >= 3 && masterThreads.stream().allMatch(Thread::isAlive);
}));
// find the active master
final HMaster active = cluster.getMaster();
assertNotNull(active);
// make sure the other two are backup masters
ClusterMetrics status = active.getClusterMetrics();
assertEquals(2, status.getBackupMasterNames().size());
// tell the active master to shutdown the cluster
active.shutdown();
assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> CollectionUtils.isEmpty(cluster.getLiveMasterThreads())));
assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> CollectionUtils.isEmpty(cluster.getLiveRegionServerThreads())));
} finally {
if (htu != null) {
htu.shutdownMiniCluster();
htu = null;
}
}
}
Aggregations