Search in sources :

Example 26 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestStochasticLoadBalancer method testCPRequestCost.

@Test
public void testCPRequestCost() {
    // in order to pass needsBalance judgement
    conf.setFloat("hbase.master.balancer.stochastic.cpRequestCost", 10000f);
    loadBalancer.onConfigurationChange(conf);
    // mock cluster State
    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
    ServerName serverA = randomServer(3).getServerName();
    ServerName serverB = randomServer(3).getServerName();
    ServerName serverC = randomServer(3).getServerName();
    List<RegionInfo> regionsOnServerA = randomRegions(3);
    List<RegionInfo> regionsOnServerB = randomRegions(3);
    List<RegionInfo> regionsOnServerC = randomRegions(3);
    clusterState.put(serverA, regionsOnServerA);
    clusterState.put(serverB, regionsOnServerB);
    clusterState.put(serverC, regionsOnServerC);
    // mock ClusterMetrics
    Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
    serverMetricsMap.put(serverA, mockServerMetricsWithCpRequests(regionsOnServerA, 0));
    serverMetricsMap.put(serverB, mockServerMetricsWithCpRequests(regionsOnServerB, 0));
    serverMetricsMap.put(serverC, mockServerMetricsWithCpRequests(regionsOnServerC, 0));
    ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
    when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
    loadBalancer.updateClusterMetrics(clusterStatus);
    // CPRequestCostFunction are Rate based, So doing setClusterMetrics again
    // this time, regions on serverA with more cpRequestCount load
    // serverA : 1000,1000,1000
    // serverB : 0,0,0
    // serverC : 0,0,0
    // so should move two regions from serverA to serverB & serverC
    serverMetricsMap = new TreeMap<>();
    serverMetricsMap.put(serverA, mockServerMetricsWithCpRequests(regionsOnServerA, 1000));
    serverMetricsMap.put(serverB, mockServerMetricsWithCpRequests(regionsOnServerB, 0));
    serverMetricsMap.put(serverC, mockServerMetricsWithCpRequests(regionsOnServerC, 0));
    clusterStatus = mock(ClusterMetrics.class);
    when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
    loadBalancer.updateClusterMetrics(clusterStatus);
    List<RegionPlan> plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, clusterState);
    Set<RegionInfo> regionsMoveFromServerA = new HashSet<>();
    Set<ServerName> targetServers = new HashSet<>();
    for (RegionPlan plan : plans) {
        if (plan.getSource().equals(serverA)) {
            regionsMoveFromServerA.add(plan.getRegionInfo());
            targetServers.add(plan.getDestination());
        }
    }
    // should move 2 regions from serverA, one moves to serverB, the other moves to serverC
    assertEquals(2, regionsMoveFromServerA.size());
    assertEquals(2, targetServers.size());
    assertTrue(regionsOnServerA.containsAll(regionsMoveFromServerA));
    // reset config
    conf.setFloat("hbase.master.balancer.stochastic.cpRequestCost", 5f);
    loadBalancer.onConfigurationChange(conf);
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 27 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestStochasticLoadBalancer method mockServerMetricsWithCpRequests.

private ServerMetrics mockServerMetricsWithCpRequests(List<RegionInfo> regionsOnServer, long cpRequestCount) {
    ServerMetrics serverMetrics = mock(ServerMetrics.class);
    Map<byte[], RegionMetrics> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (RegionInfo info : regionsOnServer) {
        RegionMetrics rl = mock(RegionMetrics.class);
        when(rl.getReadRequestCount()).thenReturn(0L);
        when(rl.getCpRequestCount()).thenReturn(cpRequestCount);
        when(rl.getWriteRequestCount()).thenReturn(0L);
        when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
        when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
        regionLoadMap.put(info.getRegionName(), rl);
    }
    when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
    return serverMetrics;
}
Also used : ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 28 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class MasterRpcServices method regionServerReport.

@Override
public RegionServerReportResponse regionServerReport(RpcController controller, RegionServerReportRequest request) throws ServiceException {
    try {
        server.checkServiceStarted();
        int versionNumber = 0;
        String version = "0.0.0";
        VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
        if (versionInfo != null) {
            version = versionInfo.getVersion();
            versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
        }
        ClusterStatusProtos.ServerLoad sl = request.getLoad();
        ServerName serverName = ProtobufUtil.toServerName(request.getServer());
        ServerMetrics oldLoad = server.getServerManager().getLoad(serverName);
        ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
        server.getServerManager().regionServerReport(serverName, newLoad);
        server.getAssignmentManager().reportOnlineRegions(serverName, newLoad.getRegionMetrics().keySet());
        if (sl != null && server.metricsMaster != null) {
            // Up our metrics.
            server.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
            server.metricsMaster.incrementReadRequests(sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0));
            server.metricsMaster.incrementWriteRequests(sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0));
        }
    } catch (IOException ioe) {
        throw new ServiceException(ioe);
    }
    return RegionServerReportResponse.newBuilder().build();
}
Also used : VersionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo) ClusterStatusProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 29 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class RegionServerTracker method upgrade.

/**
 * Upgrade to active master mode, where besides tracking the changes of region server set, we will
 * also started to add new region servers to ServerManager and also schedule SCP if a region
 * server dies. Starts the tracking of online RegionServers. All RSes will be tracked after this
 * method is called.
 * <p/>
 * In this method, we will also construct the region server sets in {@link ServerManager}. If a
 * region server is dead between the crash of the previous master instance and the start of the
 * current master instance, we will schedule a SCP for it. This is done in
 * {@link ServerManager#findDeadServersAndProcess(Set, Set)}, we call it here under the lock
 * protection to prevent concurrency issues with server expiration operation.
 * @param deadServersFromPE the region servers which already have SCP associated.
 * @param liveServersFromWALDir the live region servers from wal directory.
 * @param splittingServersFromWALDir Servers whose WALs are being actively 'split'.
 */
public void upgrade(Set<ServerName> deadServersFromPE, Set<ServerName> liveServersFromWALDir, Set<ServerName> splittingServersFromWALDir) throws KeeperException, IOException {
    LOG.info("Upgrading RegionServerTracker to active master mode; {} have existing" + "ServerCrashProcedures, {} possibly 'live' servers, and {} 'splitting'.", deadServersFromPE.size(), liveServersFromWALDir.size(), splittingServersFromWALDir.size());
    // deadServersFromPE is made from a list of outstanding ServerCrashProcedures.
    // splittingServersFromWALDir are being actively split -- the directory in the FS ends in
    // '-SPLITTING'. Each splitting server should have a corresponding SCP. Log if not.
    splittingServersFromWALDir.stream().filter(s -> !deadServersFromPE.contains(s)).forEach(s -> LOG.error("{} has no matching ServerCrashProcedure", s));
    // create ServerNode for all possible live servers from wal directory
    liveServersFromWALDir.forEach(sn -> server.getAssignmentManager().getRegionStates().getOrCreateServer(sn));
    ServerManager serverManager = server.getServerManager();
    synchronized (this) {
        Set<ServerName> liveServers = regionServers;
        for (ServerName serverName : liveServers) {
            RegionServerInfo info = getServerInfo(serverName);
            ServerMetrics serverMetrics = info != null ? ServerMetricsBuilder.of(serverName, VersionInfoUtil.getVersionNumber(info.getVersionInfo()), info.getVersionInfo().getVersion()) : ServerMetricsBuilder.of(serverName);
            serverManager.checkAndRecordNewServer(serverName, serverMetrics);
        }
        serverManager.findDeadServersAndProcess(deadServersFromPE, liveServersFromWALDir);
        active = true;
    }
}
Also used : ZKListener(org.apache.hadoop.hbase.zookeeper.ZKListener) Logger(org.slf4j.Logger) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) KeeperException(org.apache.zookeeper.KeeperException) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) VersionInfoUtil(org.apache.hadoop.hbase.client.VersionInfoUtil) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Sets(org.apache.hbase.thirdparty.com.google.common.collect.Sets) IOException(java.io.IOException) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) InterruptedIOException(java.io.InterruptedIOException) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) CollectionUtils(org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) RegionServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo) ServerMetricsBuilder(org.apache.hadoop.hbase.ServerMetricsBuilder) Collections(java.util.Collections) ExecutorService(java.util.concurrent.ExecutorService) ServerName(org.apache.hadoop.hbase.ServerName) RegionServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics)

Example 30 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class ServerManager method getAverageLoad.

/**
 * Compute the average load across all region servers.
 * Currently, this uses a very naive computation - just uses the number of
 * regions being served, ignoring stats about number of requests.
 * @return the average load
 */
public double getAverageLoad() {
    int totalLoad = 0;
    int numServers = 0;
    for (ServerMetrics sl : this.onlineServers.values()) {
        numServers++;
        totalLoad += sl.getRegionMetrics().size();
    }
    return numServers == 0 ? 0 : (double) totalLoad / (double) numServers;
}
Also used : ServerMetrics(org.apache.hadoop.hbase.ServerMetrics)

Aggregations

ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)37 ServerName (org.apache.hadoop.hbase.ServerName)27 RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)19 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)18 HashMap (java.util.HashMap)13 List (java.util.List)11 ArrayList (java.util.ArrayList)10 Map (java.util.Map)10 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)10 Test (org.junit.Test)10 IOException (java.io.IOException)7 TreeMap (java.util.TreeMap)6 TableName (org.apache.hadoop.hbase.TableName)6 Configuration (org.apache.hadoop.conf.Configuration)5 Collections (java.util.Collections)4 Collectors (java.util.stream.Collectors)4 InterfaceAudience (org.apache.yetus.audience.InterfaceAudience)4 InterruptedIOException (java.io.InterruptedIOException)3 Arrays (java.util.Arrays)3 HashSet (java.util.HashSet)3