Search in sources :

Example 11 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class RegionServerModeStrategy method getRecords.

@Override
public List<Record> getRecords(ClusterMetrics clusterMetrics, List<RecordFilter> pushDownFilters) {
    // Get records from RegionModeStrategy and add REGION_COUNT field
    List<Record> records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT);
    // Aggregation by LONG_REGION_SERVER field
    Map<String, Record> retMap = ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream().collect(Collectors.toMap(r -> r.get(Field.LONG_REGION_SERVER).asString(), r -> r));
    // Add USED_HEAP_SIZE field and MAX_HEAP_SIZE field
    for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) {
        Record record = retMap.get(sm.getServerName().getServerName());
        if (record == null) {
            continue;
        }
        Record newRecord = Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()).put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build();
        retMap.put(sm.getServerName().getServerName(), newRecord);
    }
    return new ArrayList<>(retMap.values());
}
Also used : RecordFilter(org.apache.hadoop.hbase.hbtop.RecordFilter) Arrays(java.util.Arrays) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Collectors(java.util.stream.Collectors) ArrayList(java.util.ArrayList) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Field(org.apache.hadoop.hbase.hbtop.field.Field) FieldInfo(org.apache.hadoop.hbase.hbtop.field.FieldInfo) Map(java.util.Map) Record(org.apache.hadoop.hbase.hbtop.Record) Collections(java.util.Collections) ArrayList(java.util.ArrayList) Record(org.apache.hadoop.hbase.hbtop.Record) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics)

Example 12 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestRegionsRecoveryChore method getClusterMetrics.

private static ClusterMetrics getClusterMetrics(int noOfLiveServer) {
    ClusterMetrics clusterMetrics = new ClusterMetrics() {

        @Nullable
        @Override
        public String getHBaseVersion() {
            return null;
        }

        @Override
        public List<ServerName> getDeadServerNames() {
            return null;
        }

        @Override
        public Map<ServerName, ServerMetrics> getLiveServerMetrics() {
            Map<ServerName, ServerMetrics> liveServerMetrics = new HashMap<>();
            for (int i = 0; i < noOfLiveServer; i++) {
                ServerName serverName = ServerName.valueOf("rs_" + i, 16010, 12345);
                liveServerMetrics.put(serverName, TestRegionsRecoveryChore.getServerMetrics(i + 3));
            }
            return liveServerMetrics;
        }

        @Nullable
        @Override
        public ServerName getMasterName() {
            return null;
        }

        @Override
        public List<ServerName> getBackupMasterNames() {
            return null;
        }

        @Override
        public List<RegionState> getRegionStatesInTransition() {
            return null;
        }

        @Nullable
        @Override
        public String getClusterId() {
            return null;
        }

        @Override
        public List<String> getMasterCoprocessorNames() {
            return null;
        }

        @Nullable
        @Override
        public Boolean getBalancerOn() {
            return null;
        }

        @Override
        public int getMasterInfoPort() {
            return 0;
        }

        @Override
        public List<ServerName> getServersName() {
            return null;
        }

        @Override
        public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
            return null;
        }

        @Override
        public List<ServerTask> getMasterTasks() {
            return null;
        }
    };
    return clusterMetrics;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) ServerTask(org.apache.hadoop.hbase.ServerTask) HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionStatesCount(org.apache.hadoop.hbase.client.RegionStatesCount)

Example 13 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestRegionsRecoveryChore method testRegionReopensWithoutStoreRefConfig.

@Test
public void testRegionReopensWithoutStoreRefConfig() throws Exception {
    regionNo = 0;
    ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(10);
    final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
    LOG.debug("All Region Names with refCount....");
    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
        Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
        for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
            LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
        }
    }
    Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
    Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
    for (byte[] regionName : REGION_NAME_LIST) {
        Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
    }
    Stoppable stoppable = new StoppableImplementation();
    Configuration configuration = getCustomConf();
    configuration.unset("hbase.regions.recovery.store.file.ref.count");
    regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
    regionsRecoveryChore.chore();
    // Verify that by default the feature is turned off so no regions
    // should be reopened
    Mockito.verify(hMaster, Mockito.times(0)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
    // default maxCompactedStoreFileRefCount is -1 (no regions to be reopened using AM)
    Mockito.verify(hMaster, Mockito.times(0)).getAssignmentManager();
    Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any());
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Configuration(org.apache.hadoop.conf.Configuration) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Stoppable(org.apache.hadoop.hbase.Stoppable) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) Test(org.junit.Test)

Example 14 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestRegionsRecoveryChore method testRegionReopensWithLessThreshold.

@Test
public void testRegionReopensWithLessThreshold() throws Exception {
    regionNo = 0;
    ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4);
    final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
    LOG.debug("All Region Names with refCount....");
    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
        Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
        for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
            LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
        }
    }
    Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
    Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
    for (byte[] regionName : REGION_NAME_LIST) {
        Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
    }
    Stoppable stoppable = new StoppableImplementation();
    Configuration configuration = getCustomConf();
    configuration.setInt("hbase.regions.recovery.store.file.ref.count", 400);
    regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
    regionsRecoveryChore.chore();
    // Verify that we need to reopen regions of only 1 table
    Mockito.verify(hMaster, Mockito.times(1)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
    Mockito.verify(hMaster, Mockito.times(1)).getClusterMetrics();
    // Verify that we need to reopen only 1 region with refCount > 400
    Mockito.verify(hMaster, Mockito.times(1)).getAssignmentManager();
    Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any());
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Configuration(org.apache.hadoop.conf.Configuration) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Stoppable(org.apache.hadoop.hbase.Stoppable) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) Test(org.junit.Test)

Example 15 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestMasterShutdown method testMasterShutdown.

/**
 * Simple test of shutdown.
 * <p>
 * Starts with three masters.  Tells the active master to shutdown the cluster.
 * Verifies that all masters are properly shutdown.
 */
@Test
public void testMasterShutdown() throws Exception {
    // Create config to use for this cluster
    Configuration conf = HBaseConfiguration.create();
    // Start the cluster
    try {
        htu = new HBaseTestingUtil(conf);
        StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(3).numRegionServers(1).numDataNodes(1).build();
        final SingleProcessHBaseCluster cluster = htu.startMiniCluster(option);
        // wait for all master thread to spawn and start their run loop.
        final long thirtySeconds = TimeUnit.SECONDS.toMillis(30);
        final long oneSecond = TimeUnit.SECONDS.toMillis(1);
        assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> {
            final List<MasterThread> masterThreads = cluster.getMasterThreads();
            return masterThreads != null && masterThreads.size() >= 3 && masterThreads.stream().allMatch(Thread::isAlive);
        }));
        // find the active master
        final HMaster active = cluster.getMaster();
        assertNotNull(active);
        // make sure the other two are backup masters
        ClusterMetrics status = active.getClusterMetrics();
        assertEquals(2, status.getBackupMasterNames().size());
        // tell the active master to shutdown the cluster
        active.shutdown();
        assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> CollectionUtils.isEmpty(cluster.getLiveMasterThreads())));
        assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> CollectionUtils.isEmpty(cluster.getLiveRegionServerThreads())));
    } finally {
        if (htu != null) {
            htu.shutdownMiniCluster();
            htu = null;
        }
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) List(java.util.List) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Test(org.junit.Test)

Aggregations

ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)39 ServerName (org.apache.hadoop.hbase.ServerName)30 Test (org.junit.Test)19 ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)18 ArrayList (java.util.ArrayList)13 List (java.util.List)12 HashMap (java.util.HashMap)9 RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)8 Admin (org.apache.hadoop.hbase.client.Admin)8 IOException (java.io.IOException)7 Map (java.util.Map)7 TableName (org.apache.hadoop.hbase.TableName)7 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)7 HashSet (java.util.HashSet)6 Configuration (org.apache.hadoop.conf.Configuration)6 TreeMap (java.util.TreeMap)5 Collections (java.util.Collections)4 LinkedList (java.util.LinkedList)4 Collectors (java.util.stream.Collectors)4 Put (org.apache.hadoop.hbase.client.Put)4