Search in sources :

Example 16 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class BaseTestHBaseFsck method getDeployedHRIs.

/**
 * Get region info from local cluster.
 */
Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
    ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
    Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
    Map<ServerName, List<String>> mm = new HashMap<>();
    for (ServerName hsi : regionServers) {
        // list all online regions from this region server
        List<RegionInfo> regions = admin.getRegions(hsi);
        List<String> regionNames = new ArrayList<>(regions.size());
        for (RegionInfo hri : regions) {
            regionNames.add(hri.getRegionNameAsString());
        }
        mm.put(hsi, regionNames);
    }
    return mm;
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Example 17 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal method testBalanceCluster.

/**
 * Test HBASE-20791
 */
@Test
public void testBalanceCluster() throws IOException {
    // mock cluster State
    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
    ServerName serverA = servers.get(0);
    ServerName serverB = servers.get(1);
    ServerName serverC = servers.get(2);
    List<RegionInfo> regionsOnServerA = randomRegions(3);
    List<RegionInfo> regionsOnServerB = randomRegions(3);
    List<RegionInfo> regionsOnServerC = randomRegions(3);
    clusterState.put(serverA, regionsOnServerA);
    clusterState.put(serverB, regionsOnServerB);
    clusterState.put(serverC, regionsOnServerC);
    // mock ClusterMetrics
    Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
    serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 0));
    serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0));
    serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0));
    ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
    when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
    loadBalancer.updateClusterMetrics(clusterStatus);
    // ReadRequestCostFunction are Rate based, So doing setClusterMetrics again
    // this time, regions on serverA with more readRequestCount load
    // serverA : 1000,1000,1000
    // serverB : 0,0,0
    // serverC : 0,0,0
    // so should move two regions from serverA to serverB & serverC
    serverMetricsMap = new TreeMap<>();
    serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 1000));
    serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0));
    serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0));
    clusterStatus = mock(ClusterMetrics.class);
    when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
    loadBalancer.updateClusterMetrics(clusterStatus);
    Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable = (Map) mockClusterServersWithTables(clusterState);
    List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
    Set<RegionInfo> regionsMoveFromServerA = new HashSet<>();
    Set<ServerName> targetServers = new HashSet<>();
    for (RegionPlan plan : plans) {
        if (plan.getSource().equals(serverA)) {
            regionsMoveFromServerA.add(plan.getRegionInfo());
            targetServers.add(plan.getDestination());
        }
    }
    // should move 2 regions from serverA, one moves to serverB, the other moves to serverC
    assertEquals(2, regionsMoveFromServerA.size());
    assertEquals(2, targetServers.size());
    assertTrue(regionsOnServerA.containsAll(regionsMoveFromServerA));
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) TableName(org.apache.hadoop.hbase.TableName) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName) List(java.util.List) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 18 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestReplicationStatus method waitOnMetricsReport.

/**
 * Wait until Master shows metrics counts for ReplicationLoadSourceList that are
 * greater than <code>greaterThan</code> for <code>serverName</code> before
 * returning. We want to avoid case where RS hasn't yet updated Master before
 * allowing test proceed.
 * @param greaterThan size of replicationLoadSourceList must be greater before we proceed
 */
private List<ReplicationLoadSource> waitOnMetricsReport(int greaterThan, ServerName serverName) throws IOException {
    ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
    List<ReplicationLoadSource> list = metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
    while (list.size() <= greaterThan) {
        Threads.sleep(1000);
    }
    return list;
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics)

Example 19 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestReplicationStatusBothNormalAndRecoveryLagging method testReplicationStatusBothNormalAndRecoveryLagging.

@Test
public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception {
    UTIL2.shutdownMiniHBaseCluster();
    // add some values to cluster 1
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(famName, Bytes.toBytes("col1"), Bytes.toBytes("val" + i));
        htable1.put(p);
    }
    Thread.sleep(10000);
    restartSourceCluster(1);
    Admin hbaseAdmin = UTIL1.getAdmin();
    ServerName serverName = UTIL1.getHBaseCluster().getRegionServer(0).getServerName();
    Thread.sleep(10000);
    // add more values to cluster 1, these should cause normal queue to lag
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(famName, Bytes.toBytes("col1"), Bytes.toBytes("val" + i));
        htable1.put(p);
    }
    Thread.sleep(10000);
    ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
    List<ReplicationLoadSource> loadSources = metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
    assertEquals(2, loadSources.size());
    boolean foundRecovery = false;
    boolean foundNormal = false;
    for (ReplicationLoadSource loadSource : loadSources) {
        if (loadSource.isRecovered()) {
            foundRecovery = true;
        } else {
            foundNormal = true;
        }
        assertTrue(loadSource.hasEditsSinceRestart());
        assertEquals(0, loadSource.getTimestampOfLastShippedOp());
        assertTrue(loadSource.getReplicationLag() > 0);
    }
    assertTrue("No normal queue found.", foundNormal);
    assertTrue("No recovery queue found.", foundRecovery);
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) ServerName(org.apache.hadoop.hbase.ServerName) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 20 with ClusterMetrics

use of org.apache.hadoop.hbase.ClusterMetrics in project hbase by apache.

the class TestReplicationStatusSourceStartedTargetStoppedNewOp method testReplicationStatusSourceStartedTargetStoppedNewOp.

@Test
public void testReplicationStatusSourceStartedTargetStoppedNewOp() throws Exception {
    UTIL2.shutdownMiniHBaseCluster();
    restartSourceCluster(1);
    Admin hbaseAdmin = UTIL1.getAdmin();
    // add some values to source cluster
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(famName, Bytes.toBytes("col1"), Bytes.toBytes("val" + i));
        htable1.put(p);
    }
    Thread.sleep(10000);
    ServerName serverName = UTIL1.getHBaseCluster().getRegionServer(0).getServerName();
    ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
    List<ReplicationLoadSource> loadSources = metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
    assertEquals(1, loadSources.size());
    ReplicationLoadSource loadSource = loadSources.get(0);
    assertTrue(loadSource.hasEditsSinceRestart());
    assertEquals(0, loadSource.getTimestampOfLastShippedOp());
    assertTrue(loadSource.getReplicationLag() > 0);
    assertFalse(loadSource.isRecovered());
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) ServerName(org.apache.hadoop.hbase.ServerName) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)39 ServerName (org.apache.hadoop.hbase.ServerName)30 Test (org.junit.Test)19 ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)18 ArrayList (java.util.ArrayList)13 List (java.util.List)12 HashMap (java.util.HashMap)9 RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)8 Admin (org.apache.hadoop.hbase.client.Admin)8 IOException (java.io.IOException)7 Map (java.util.Map)7 TableName (org.apache.hadoop.hbase.TableName)7 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)7 HashSet (java.util.HashSet)6 Configuration (org.apache.hadoop.conf.Configuration)6 TreeMap (java.util.TreeMap)5 Collections (java.util.Collections)4 LinkedList (java.util.LinkedList)4 Collectors (java.util.stream.Collectors)4 Put (org.apache.hadoop.hbase.client.Put)4