Search in sources :

Example 1 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class StorageClusterStatusResource method get.

@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("GET " + uriInfo.getAbsolutePath());
    }
    servlet.getMetrics().incrementRequests(1);
    try {
        ClusterStatus status = servlet.getAdmin().getClusterStatus();
        StorageClusterStatusModel model = new StorageClusterStatusModel();
        model.setRegions(status.getRegionsCount());
        model.setRequests(status.getRequestsCount());
        model.setAverageLoad(status.getAverageLoad());
        for (ServerName info : status.getServers()) {
            ServerLoad load = status.getLoad(info);
            StorageClusterStatusModel.Node node = model.addLiveNode(info.getHostname() + ":" + Integer.toString(info.getPort()), info.getStartcode(), load.getUsedHeapMB(), load.getMaxHeapMB());
            node.setRequests(load.getNumberOfRequests());
            for (RegionLoad region : load.getRegionsLoad().values()) {
                node.addRegion(region.getName(), region.getStores(), region.getStorefiles(), region.getStorefileSizeMB(), region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(), region.getReadRequestsCount(), region.getWriteRequestsCount(), region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(), region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(), region.getCurrentCompactedKVs());
            }
        }
        for (ServerName name : status.getDeadServerNames()) {
            model.addDeadNode(name.toString());
        }
        ResponseBuilder response = Response.ok(model);
        response.cacheControl(cacheControl);
        servlet.getMetrics().incrementSucessfulGetRequests(1);
        return response.build();
    } catch (IOException e) {
        servlet.getMetrics().incrementFailedGetRequests(1);
        return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
    }
}
Also used : ServerLoad(org.apache.hadoop.hbase.ServerLoad) RegionLoad(org.apache.hadoop.hbase.RegionLoad) ServerName(org.apache.hadoop.hbase.ServerName) StorageClusterStatusModel(org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel) IOException(java.io.IOException) ResponseBuilder(javax.ws.rs.core.Response.ResponseBuilder) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 2 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class MergeTableRegionsProcedure method getRegionLoad.

private RegionLoad getRegionLoad(final MasterProcedureEnv env, final ServerName sn, final HRegionInfo hri) {
    ServerManager serverManager = env.getMasterServices().getServerManager();
    ServerLoad load = serverManager.getLoad(sn);
    if (load != null) {
        Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
        if (regionsLoad != null) {
            return regionsLoad.get(hri.getRegionName());
        }
    }
    return null;
}
Also used : ServerManager(org.apache.hadoop.hbase.master.ServerManager) ServerLoad(org.apache.hadoop.hbase.ServerLoad) RegionLoad(org.apache.hadoop.hbase.RegionLoad)

Example 3 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class StochasticLoadBalancer method updateRegionLoad.

/**
   * Store the current region loads.
   */
private synchronized void updateRegionLoad() {
    // We create a new hashmap so that regions that are no longer there are removed.
    // However we temporarily need the old loads so we can use them to keep the rolling average.
    Map<String, Deque<BalancerRegionLoad>> oldLoads = loads;
    loads = new HashMap<>();
    for (ServerName sn : clusterStatus.getServers()) {
        ServerLoad sl = clusterStatus.getLoad(sn);
        if (sl == null) {
            continue;
        }
        for (Entry<byte[], RegionLoad> entry : sl.getRegionsLoad().entrySet()) {
            Deque<BalancerRegionLoad> rLoads = oldLoads.get(Bytes.toString(entry.getKey()));
            if (rLoads == null) {
                // There was nothing there
                rLoads = new ArrayDeque<>();
            } else if (rLoads.size() >= numRegionLoadsToRemember) {
                rLoads.remove();
            }
            rLoads.add(new BalancerRegionLoad(entry.getValue()));
            loads.put(Bytes.toString(entry.getKey()), rLoads);
        }
    }
    for (CostFromRegionLoadFunction cost : regionLoadFunctions) {
        cost.setLoads(loads);
    }
}
Also used : ServerLoad(org.apache.hadoop.hbase.ServerLoad) RegionLoad(org.apache.hadoop.hbase.RegionLoad) ServerName(org.apache.hadoop.hbase.ServerName) Deque(java.util.Deque) ArrayDeque(java.util.ArrayDeque)

Example 4 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class TestStochasticLoadBalancer method testKeepRegionLoad.

@Test
public void testKeepRegionLoad() throws Exception {
    ServerName sn = ServerName.valueOf("test:8080", 100);
    int numClusterStatusToAdd = 20000;
    for (int i = 0; i < numClusterStatusToAdd; i++) {
        ServerLoad sl = mock(ServerLoad.class);
        RegionLoad rl = mock(RegionLoad.class);
        when(rl.getStorefileSizeMB()).thenReturn(i);
        Map<byte[], RegionLoad> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl);
        when(sl.getRegionsLoad()).thenReturn(regionLoadMap);
        ClusterStatus clusterStatus = mock(ClusterStatus.class);
        when(clusterStatus.getServers()).thenReturn(Arrays.asList(sn));
        when(clusterStatus.getLoad(sn)).thenReturn(sl);
        loadBalancer.setClusterStatus(clusterStatus);
    }
    assertTrue(loadBalancer.loads.get(REGION_KEY) != null);
    assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15);
    Queue<BalancerRegionLoad> loads = loadBalancer.loads.get(REGION_KEY);
    int i = 0;
    while (loads.size() > 0) {
        BalancerRegionLoad rl = loads.remove();
        assertEquals(i + (numClusterStatusToAdd - 15), rl.getStorefileSizeMB());
        i++;
    }
}
Also used : ServerLoad(org.apache.hadoop.hbase.ServerLoad) RegionLoad(org.apache.hadoop.hbase.RegionLoad) ServerName(org.apache.hadoop.hbase.ServerName) TreeMap(java.util.TreeMap) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus) Test(org.junit.Test)

Example 5 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class TestAssignmentListener method testAddNewServerThatExistsInDraining.

@Test
public void testAddNewServerThatExistsInDraining() throws Exception {
    // Under certain circumstances, such as when we failover to the Backup
    // HMaster, the DrainingServerTracker is started with existing servers in
    // draining before all of the Region Servers register with the
    // ServerManager as "online".  This test is to ensure that Region Servers
    // are properly added to the ServerManager.drainingServers when they
    // register with the ServerManager under these circumstances.
    Configuration conf = TEST_UTIL.getConfiguration();
    ZooKeeperWatcher zooKeeper = new ZooKeeperWatcher(conf, "zkWatcher-NewServerDrainTest", abortable, true);
    String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
    String drainingZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining"));
    HMaster master = Mockito.mock(HMaster.class);
    Mockito.when(master.getConfiguration()).thenReturn(conf);
    ServerName SERVERNAME_A = ServerName.valueOf("mockserverbulk_a.org", 1000, 8000);
    ServerName SERVERNAME_B = ServerName.valueOf("mockserverbulk_b.org", 1001, 8000);
    ServerName SERVERNAME_C = ServerName.valueOf("mockserverbulk_c.org", 1002, 8000);
    // We'll start with 2 servers in draining that existed before the
    // HMaster started.
    ArrayList<ServerName> drainingServers = new ArrayList<>();
    drainingServers.add(SERVERNAME_A);
    drainingServers.add(SERVERNAME_B);
    // We'll have 2 servers that come online AFTER the DrainingServerTracker
    // is started (just as we see when we failover to the Backup HMaster).
    // One of these will already be a draining server.
    HashMap<ServerName, ServerLoad> onlineServers = new HashMap<>();
    onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
    onlineServers.put(SERVERNAME_C, ServerLoad.EMPTY_SERVERLOAD);
    // performed when the previous HMaster was running.
    for (ServerName sn : drainingServers) {
        String znode = ZKUtil.joinZNode(drainingZNode, sn.getServerName());
        ZKUtil.createAndFailSilent(zooKeeper, znode);
    }
    // Now, we follow the same order of steps that the HMaster does to setup
    // the ServerManager, RegionServerTracker, and DrainingServerTracker.
    ServerManager serverManager = new ServerManager(master);
    RegionServerTracker regionServerTracker = new RegionServerTracker(zooKeeper, master, serverManager);
    regionServerTracker.start();
    DrainingServerTracker drainingServerTracker = new DrainingServerTracker(zooKeeper, master, serverManager);
    drainingServerTracker.start();
    // Confirm our ServerManager lists are empty.
    Assert.assertEquals(serverManager.getOnlineServers(), new HashMap<ServerName, ServerLoad>());
    Assert.assertEquals(serverManager.getDrainingServersList(), new ArrayList<ServerName>());
    // checkAndRecordNewServer() is how servers are added to the ServerManager.
    ArrayList<ServerName> onlineDrainingServers = new ArrayList<>();
    for (ServerName sn : onlineServers.keySet()) {
        // Here's the actual test.
        serverManager.checkAndRecordNewServer(sn, onlineServers.get(sn));
        if (drainingServers.contains(sn)) {
            // keeping track for later verification
            onlineDrainingServers.add(sn);
        }
    }
    // Verify the ServerManager lists are correctly updated.
    Assert.assertEquals(serverManager.getOnlineServers(), onlineServers);
    Assert.assertEquals(serverManager.getDrainingServersList(), onlineDrainingServers);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) DrainingServerTracker(org.apache.hadoop.hbase.zookeeper.DrainingServerTracker) ArrayList(java.util.ArrayList) ServerLoad(org.apache.hadoop.hbase.ServerLoad) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ServerName(org.apache.hadoop.hbase.ServerName) RegionServerTracker(org.apache.hadoop.hbase.zookeeper.RegionServerTracker) Test(org.junit.Test)

Aggregations

ServerLoad (org.apache.hadoop.hbase.ServerLoad)15 ServerName (org.apache.hadoop.hbase.ServerName)13 ClusterStatus (org.apache.hadoop.hbase.ClusterStatus)6 RegionLoad (org.apache.hadoop.hbase.RegionLoad)5 IOException (java.io.IOException)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 List (java.util.List)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 Map (java.util.Map)2 TreeMap (java.util.TreeMap)2 Admin (org.apache.hadoop.hbase.client.Admin)2 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)2 InterruptedIOException (java.io.InterruptedIOException)1 ArrayDeque (java.util.ArrayDeque)1 Deque (java.util.Deque)1 LinkedList (java.util.LinkedList)1 GET (javax.ws.rs.GET)1 Produces (javax.ws.rs.Produces)1