Search in sources :

Example 31 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestBaseLoadBalancer method testRandomAssignment.

private void testRandomAssignment(int numberOfIdleServers) throws Exception {
    assert numberOfIdleServers > 0;
    List<ServerName> idleServers = new ArrayList<>(numberOfIdleServers);
    for (int i = 0; i != numberOfIdleServers; ++i) {
        idleServers.add(ServerName.valueOf("server-" + i, 1000, 1L));
    }
    List<ServerName> allServers = new ArrayList<>(idleServers.size() + 1);
    allServers.add(ServerName.valueOf("server-" + numberOfIdleServers, 1000, 1L));
    allServers.addAll(idleServers);
    LoadBalancer balancer = new MockBalancer();
    Configuration conf = HBaseConfiguration.create();
    conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
    balancer.setClusterInfoProvider(new DummyClusterInfoProvider(conf) {

        @Override
        public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> servers, Predicate<ServerMetrics> filter) {
            return idleServers;
        }
    });
    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false).setRegionId(100).build();
    assertNull(balancer.randomAssignment(hri1, Collections.emptyList()));
    assertNull(balancer.randomAssignment(hri1, null));
    for (int i = 0; i != 3; ++i) {
        ServerName sn = balancer.randomAssignment(hri1, allServers);
        assertTrue("actual:" + sn + ", except:" + idleServers, idleServers.contains(sn));
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) LoadBalancer(org.apache.hadoop.hbase.master.LoadBalancer) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) ArrayList(java.util.ArrayList) List(java.util.List)

Example 32 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class ClientModeStrategy method createRecords.

List<Record> createRecords(ClusterMetrics clusterMetrics) {
    List<Record> ret = new ArrayList<>();
    for (ServerMetrics serverMetrics : clusterMetrics.getLiveServerMetrics().values()) {
        long lastReportTimestamp = serverMetrics.getLastReportTimestamp();
        serverMetrics.getUserMetrics().values().forEach(um -> um.getClientMetrics().values().forEach(clientMetrics -> ret.add(createRecord(um.getNameAsString(), clientMetrics, lastReportTimestamp, serverMetrics.getServerName().getServerName()))));
    }
    return ret;
}
Also used : RecordFilter(org.apache.hadoop.hbase.hbtop.RecordFilter) Arrays(java.util.Arrays) UserMetrics(org.apache.hadoop.hbase.UserMetrics) Set(java.util.Set) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashMap(java.util.HashMap) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Collectors(java.util.stream.Collectors) FieldValueType(org.apache.hadoop.hbase.hbtop.field.FieldValueType) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Field(org.apache.hadoop.hbase.hbtop.field.Field) FieldInfo(org.apache.hadoop.hbase.hbtop.field.FieldInfo) FieldValue(org.apache.hadoop.hbase.hbtop.field.FieldValue) Map(java.util.Map) Record(org.apache.hadoop.hbase.hbtop.Record) Collections(java.util.Collections) ArrayList(java.util.ArrayList) Record(org.apache.hadoop.hbase.hbtop.Record) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics)

Example 33 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestUtils method createDummyClusterMetrics.

public static ClusterMetrics createDummyClusterMetrics() {
    Map<ServerName, ServerMetrics> serverMetricsMap = new HashMap<>();
    // host1
    List<RegionMetrics> regionMetricsList = new ArrayList<>();
    List<UserMetrics> userMetricsList = new ArrayList<>();
    userMetricsList.add(createUserMetrics("FOO", 1, 2, 4));
    userMetricsList.add(createUserMetrics("BAR", 2, 3, 3));
    regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00"));
    regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01"));
    regionMetricsList.add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02"));
    ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1);
    serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList));
    // host2
    regionMetricsList.clear();
    userMetricsList.clear();
    userMetricsList.add(createUserMetrics("FOO", 5, 7, 3));
    userMetricsList.add(createUserMetrics("BAR", 4, 8, 4));
    regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03"));
    regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04"));
    regionMetricsList.add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05"));
    ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2);
    serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList));
    ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3);
    return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT").setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap).setDeadServerNames(Collections.singletonList(host3)).setRegionsInTransition(Collections.singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")).setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0).setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))).build();
}
Also used : RegionState(org.apache.hadoop.hbase.master.RegionState) HashMap(java.util.HashMap) Size(org.apache.hadoop.hbase.Size) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) UserMetrics(org.apache.hadoop.hbase.UserMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 34 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestRegionServerReadRequestMetrics method updateMetricsMap.

private static void updateMetricsMap() throws IOException, InterruptedException {
    for (Metric metric : Metric.values()) {
        requestsMapPrev.put(metric, requestsMap.get(metric));
    }
    ServerMetrics serverMetrics = null;
    RegionMetrics regionMetricsOuter = null;
    boolean metricsUpdated = false;
    for (int i = 0; i < MAX_TRY; i++) {
        for (ServerName serverName : serverNames) {
            serverMetrics = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().get(serverName);
            Map<byte[], RegionMetrics> regionMetrics = serverMetrics.getRegionMetrics();
            RegionMetrics regionMetric = regionMetrics.get(regionInfo.getRegionName());
            if (regionMetric != null) {
                regionMetricsOuter = regionMetric;
                for (Metric metric : Metric.values()) {
                    if (getReadRequest(serverMetrics, regionMetric, metric) > requestsMapPrev.get(metric)) {
                        for (Metric metricInner : Metric.values()) {
                            requestsMap.put(metricInner, getReadRequest(serverMetrics, regionMetric, metricInner));
                        }
                        metricsUpdated = true;
                        break;
                    }
                }
            }
        }
        if (metricsUpdated) {
            break;
        }
        Thread.sleep(SLEEP_MS);
    }
    if (!metricsUpdated) {
        for (Metric metric : Metric.values()) {
            requestsMap.put(metric, getReadRequest(serverMetrics, regionMetricsOuter, metric));
        }
    }
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 35 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestReplicationStatus method testReplicationStatus.

/**
 * Test for HBASE-9531.
 * <p/>
 * put a few rows into htable1, which should be replicated to htable2 <br/>
 * create a ClusterStatus instance 'status' from HBaseAdmin <br/>
 * test : status.getLoad(server).getReplicationLoadSourceList() <br/>
 * test : status.getLoad(server).getReplicationLoadSink()
 */
@Test
public void testReplicationStatus() throws Exception {
    // This test wants two RS's up. We only run one generally so add one.
    UTIL1.getMiniHBaseCluster().startRegionServer();
    Waiter.waitFor(UTIL1.getConfiguration(), 30000, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() > 1;
        }
    });
    Admin hbaseAdmin = UTIL1.getAdmin();
    // disable peer <= WHY? I DON'T GET THIS DISABLE BUT TEST FAILS W/O IT.
    hbaseAdmin.disableReplicationPeer(PEER_ID2);
    insertRowsOnSource();
    LOG.info("AFTER PUTS");
    // TODO: Change this wait to a barrier. I tried waiting on replication stats to
    // change but sleeping in main thread seems to mess up background replication.
    // HACK! To address flakeyness.
    Threads.sleep(10000);
    ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
    for (JVMClusterUtil.RegionServerThread thread : UTIL1.getHBaseCluster().getRegionServerThreads()) {
        ServerName server = thread.getRegionServer().getServerName();
        assertTrue("" + server, metrics.getLiveServerMetrics().containsKey(server));
        ServerMetrics sm = metrics.getLiveServerMetrics().get(server);
        List<ReplicationLoadSource> rLoadSourceList = sm.getReplicationLoadSourceList();
        ReplicationLoadSink rLoadSink = sm.getReplicationLoadSink();
        // check SourceList only has one entry, because only has one peer
        assertEquals("Failed to get ReplicationLoadSourceList " + rLoadSourceList + ", " + server, 1, rLoadSourceList.size());
        assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID());
        // check Sink exist only as it is difficult to verify the value on the fly
        assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ", (rLoadSink.getAgeOfLastAppliedOp() >= 0));
        assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ", (rLoadSink.getTimestampsOfLastAppliedOp() >= 0));
    }
    // Stop rs1, then the queue of rs1 will be transfered to rs0
    HRegionServer hrs = UTIL1.getHBaseCluster().getRegionServer(1);
    hrs.stop("Stop RegionServer");
    while (hrs.isAlive()) {
        Threads.sleep(100);
    }
    // To be sure it dead and references cleaned up. TODO: Change this to a barrier.
    // I tried waiting on replication stats to change but sleeping in main thread
    // seems to mess up background replication.
    Threads.sleep(10000);
    ServerName server = UTIL1.getHBaseCluster().getRegionServer(0).getServerName();
    List<ReplicationLoadSource> rLoadSourceList = waitOnMetricsReport(1, server);
    // The remaining server should now have two queues -- the original and then the one that was
    // added because of failover. The original should still be PEER_ID2 though.
    assertEquals("Failed ReplicationLoadSourceList " + rLoadSourceList, 2, rLoadSourceList.size());
    assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID());
}
Also used : Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Aggregations

ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)37 ServerName (org.apache.hadoop.hbase.ServerName)27 RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)19 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)18 HashMap (java.util.HashMap)13 List (java.util.List)11 ArrayList (java.util.ArrayList)10 Map (java.util.Map)10 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)10 Test (org.junit.Test)10 IOException (java.io.IOException)7 TreeMap (java.util.TreeMap)6 TableName (org.apache.hadoop.hbase.TableName)6 Configuration (org.apache.hadoop.conf.Configuration)5 Collections (java.util.Collections)4 Collectors (java.util.stream.Collectors)4 InterfaceAudience (org.apache.yetus.audience.InterfaceAudience)4 InterruptedIOException (java.io.InterruptedIOException)3 Arrays (java.util.Arrays)3 HashSet (java.util.HashSet)3