Search in sources :

Example 16 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestRegionsRecoveryChore method getServerMetrics.

private static ServerMetrics getServerMetrics(int noOfRegions) {
    ServerMetrics serverMetrics = new ServerMetrics() {

        @Override
        public ServerName getServerName() {
            return null;
        }

        @Override
        public long getRequestCountPerSecond() {
            return 0;
        }

        @Override
        public long getRequestCount() {
            return 0;
        }

        @Override
        public long getReadRequestsCount() {
            return 0;
        }

        @Override
        public long getWriteRequestsCount() {
            return 0;
        }

        @Override
        public Size getUsedHeapSize() {
            return null;
        }

        @Override
        public Size getMaxHeapSize() {
            return null;
        }

        @Override
        public int getInfoServerPort() {
            return 0;
        }

        @Override
        public List<ReplicationLoadSource> getReplicationLoadSourceList() {
            return null;
        }

        @Override
        public Map<String, List<ReplicationLoadSource>> getReplicationLoadSourceMap() {
            return null;
        }

        @Nullable
        @Override
        public ReplicationLoadSink getReplicationLoadSink() {
            return null;
        }

        @Override
        public Map<byte[], RegionMetrics> getRegionMetrics() {
            Map<byte[], RegionMetrics> regionMetricsMap = new HashMap<>();
            for (int i = 0; i < noOfRegions; i++) {
                byte[] regionName = Bytes.toBytes("region" + regionNo + "_" + i);
                regionMetricsMap.put(regionName, TestRegionsRecoveryChore.getRegionMetrics(regionName, 100 * i));
                ++regionNo;
            }
            return regionMetricsMap;
        }

        @Override
        public Map<byte[], UserMetrics> getUserMetrics() {
            return new HashMap<>();
        }

        @Override
        public Set<String> getCoprocessorNames() {
            return null;
        }

        @Override
        public long getReportTimestamp() {
            return 0;
        }

        @Override
        public long getLastReportTimestamp() {
            return 0;
        }

        @Override
        public List<ServerTask> getTasks() {
            return null;
        }
    };
    return serverMetrics;
}
Also used : ReplicationLoadSource(org.apache.hadoop.hbase.replication.ReplicationLoadSource) ServerTask(org.apache.hadoop.hbase.ServerTask) HashMap(java.util.HashMap) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) List(java.util.List) UserMetrics(org.apache.hadoop.hbase.UserMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 17 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestRegionsRecoveryChore method testRegionReopensWithLessThreshold.

@Test
public void testRegionReopensWithLessThreshold() throws Exception {
    regionNo = 0;
    ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4);
    final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
    LOG.debug("All Region Names with refCount....");
    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
        Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
        for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
            LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
        }
    }
    Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
    Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
    for (byte[] regionName : REGION_NAME_LIST) {
        Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
    }
    Stoppable stoppable = new StoppableImplementation();
    Configuration configuration = getCustomConf();
    configuration.setInt("hbase.regions.recovery.store.file.ref.count", 400);
    regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
    regionsRecoveryChore.chore();
    // Verify that we need to reopen regions of only 1 table
    Mockito.verify(hMaster, Mockito.times(1)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
    Mockito.verify(hMaster, Mockito.times(1)).getClusterMetrics();
    // Verify that we need to reopen only 1 region with refCount > 400
    Mockito.verify(hMaster, Mockito.times(1)).getAssignmentManager();
    Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any());
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Configuration(org.apache.hadoop.conf.Configuration) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Stoppable(org.apache.hadoop.hbase.Stoppable) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) Test(org.junit.Test)

Example 18 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestRegionServerReadRequestMetrics method testReadRequests.

private void testReadRequests(byte[] regionName, int expectedReadRequests) throws Exception {
    for (ServerName serverName : serverNames) {
        ServerMetrics serverMetrics = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().get(serverName);
        Map<byte[], RegionMetrics> regionMetrics = serverMetrics.getRegionMetrics();
        RegionMetrics regionMetric = regionMetrics.get(regionName);
        if (regionMetric != null) {
            LOG.debug("server read request is " + serverMetrics.getRegionMetrics().get(regionName).getReadRequestCount() + ", region read request is " + regionMetric.getReadRequestCount());
            assertEquals(3, serverMetrics.getRegionMetrics().get(regionName).getReadRequestCount());
            assertEquals(3, regionMetric.getReadRequestCount());
        }
    }
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 19 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestSyncReplicationReplayWALManager method setUp.

@Before
public void setUp() throws IOException, ReplicationException {
    wokenProcedures = new ArrayDeque<>();
    onlineServers = new HashSet<>();
    listeners = new ArrayList<>();
    ServerManager serverManager = mock(ServerManager.class);
    doAnswer(inv -> listeners.add(inv.getArgument(0))).when(serverManager).registerListener(any(ServerListener.class));
    ServerMetrics serverMetrics = mock(ServerMetrics.class);
    doAnswer(inv -> onlineServers.stream().collect(Collectors.toMap(Function.identity(), k -> serverMetrics))).when(serverManager).getOnlineServers();
    MasterFileSystem mfs = mock(MasterFileSystem.class);
    when(mfs.getFileSystem()).thenReturn(UTIL.getTestFileSystem());
    when(mfs.getWALRootDir()).thenReturn(new Path("/"));
    scheduler = mock(MasterProcedureScheduler.class);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            ProcedureEvent<?> event = ((ProcedureEvent<?>[]) invocation.getArgument(0))[0];
            event.wakeInternal(new MasterProcedureScheduler(pid -> null) {

                @Override
                public void addFront(Iterator<Procedure> procedureIterator) {
                    procedureIterator.forEachRemaining(wokenProcedures::add);
                }
            });
            return null;
        }
    }).when(scheduler).wakeEvents(any(ProcedureEvent[].class));
    MasterProcedureEnv env = mock(MasterProcedureEnv.class);
    when(env.getProcedureScheduler()).thenReturn(scheduler);
    ProcedureExecutor<MasterProcedureEnv> procExec = mock(ProcedureExecutor.class);
    when(procExec.getEnvironment()).thenReturn(env);
    MasterServices services = mock(MasterServices.class);
    when(services.getServerManager()).thenReturn(serverManager);
    when(services.getMasterFileSystem()).thenReturn(mfs);
    when(services.getMasterProcedureExecutor()).thenReturn(procExec);
    manager = new SyncReplicationReplayWALManager(services);
    assertEquals(1, listeners.size());
}
Also used : ServerManager(org.apache.hadoop.hbase.master.ServerManager) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) MasterProcedureScheduler(org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler) MasterServices(org.apache.hadoop.hbase.master.MasterServices) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Iterator(java.util.Iterator) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) ServerListener(org.apache.hadoop.hbase.master.ServerListener) Before(org.junit.Before)

Example 20 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal method testBalanceCluster.

/**
 * Test HBASE-20791
 */
@Test
public void testBalanceCluster() throws IOException {
    // mock cluster State
    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
    ServerName serverA = servers.get(0);
    ServerName serverB = servers.get(1);
    ServerName serverC = servers.get(2);
    List<RegionInfo> regionsOnServerA = randomRegions(3);
    List<RegionInfo> regionsOnServerB = randomRegions(3);
    List<RegionInfo> regionsOnServerC = randomRegions(3);
    clusterState.put(serverA, regionsOnServerA);
    clusterState.put(serverB, regionsOnServerB);
    clusterState.put(serverC, regionsOnServerC);
    // mock ClusterMetrics
    Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
    serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 0));
    serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0));
    serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0));
    ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
    when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
    loadBalancer.updateClusterMetrics(clusterStatus);
    // ReadRequestCostFunction are Rate based, So doing setClusterMetrics again
    // this time, regions on serverA with more readRequestCount load
    // serverA : 1000,1000,1000
    // serverB : 0,0,0
    // serverC : 0,0,0
    // so should move two regions from serverA to serverB & serverC
    serverMetricsMap = new TreeMap<>();
    serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 1000));
    serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0));
    serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0));
    clusterStatus = mock(ClusterMetrics.class);
    when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
    loadBalancer.updateClusterMetrics(clusterStatus);
    Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable = (Map) mockClusterServersWithTables(clusterState);
    List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
    Set<RegionInfo> regionsMoveFromServerA = new HashSet<>();
    Set<ServerName> targetServers = new HashSet<>();
    for (RegionPlan plan : plans) {
        if (plan.getSource().equals(serverA)) {
            regionsMoveFromServerA.add(plan.getRegionInfo());
            targetServers.add(plan.getDestination());
        }
    }
    // should move 2 regions from serverA, one moves to serverB, the other moves to serverC
    assertEquals(2, regionsMoveFromServerA.size());
    assertEquals(2, targetServers.size());
    assertTrue(regionsOnServerA.containsAll(regionsMoveFromServerA));
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) TableName(org.apache.hadoop.hbase.TableName) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName) List(java.util.List) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)37 ServerName (org.apache.hadoop.hbase.ServerName)27 RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)19 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)18 HashMap (java.util.HashMap)13 List (java.util.List)11 ArrayList (java.util.ArrayList)10 Map (java.util.Map)10 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)10 Test (org.junit.Test)10 IOException (java.io.IOException)7 TreeMap (java.util.TreeMap)6 TableName (org.apache.hadoop.hbase.TableName)6 Configuration (org.apache.hadoop.conf.Configuration)5 Collections (java.util.Collections)4 Collectors (java.util.stream.Collectors)4 InterfaceAudience (org.apache.yetus.audience.InterfaceAudience)4 InterruptedIOException (java.io.InterruptedIOException)3 Arrays (java.util.Arrays)3 HashSet (java.util.HashSet)3