use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method getServerMetrics.
private static ServerMetrics getServerMetrics(int noOfRegions) {
ServerMetrics serverMetrics = new ServerMetrics() {
@Override
public ServerName getServerName() {
return null;
}
@Override
public long getRequestCountPerSecond() {
return 0;
}
@Override
public long getRequestCount() {
return 0;
}
@Override
public long getReadRequestsCount() {
return 0;
}
@Override
public long getWriteRequestsCount() {
return 0;
}
@Override
public Size getUsedHeapSize() {
return null;
}
@Override
public Size getMaxHeapSize() {
return null;
}
@Override
public int getInfoServerPort() {
return 0;
}
@Override
public List<ReplicationLoadSource> getReplicationLoadSourceList() {
return null;
}
@Override
public Map<String, List<ReplicationLoadSource>> getReplicationLoadSourceMap() {
return null;
}
@Nullable
@Override
public ReplicationLoadSink getReplicationLoadSink() {
return null;
}
@Override
public Map<byte[], RegionMetrics> getRegionMetrics() {
Map<byte[], RegionMetrics> regionMetricsMap = new HashMap<>();
for (int i = 0; i < noOfRegions; i++) {
byte[] regionName = Bytes.toBytes("region" + regionNo + "_" + i);
regionMetricsMap.put(regionName, TestRegionsRecoveryChore.getRegionMetrics(regionName, 100 * i));
++regionNo;
}
return regionMetricsMap;
}
@Override
public Map<byte[], UserMetrics> getUserMetrics() {
return new HashMap<>();
}
@Override
public Set<String> getCoprocessorNames() {
return null;
}
@Override
public long getReportTimestamp() {
return 0;
}
@Override
public long getLastReportTimestamp() {
return 0;
}
@Override
public List<ServerTask> getTasks() {
return null;
}
};
return serverMetrics;
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method testRegionReopensWithLessThreshold.
@Test
public void testRegionReopensWithLessThreshold() throws Exception {
regionNo = 0;
ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4);
final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
LOG.debug("All Region Names with refCount....");
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
}
}
Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
for (byte[] regionName : REGION_NAME_LIST) {
Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
}
Stoppable stoppable = new StoppableImplementation();
Configuration configuration = getCustomConf();
configuration.setInt("hbase.regions.recovery.store.file.ref.count", 400);
regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
regionsRecoveryChore.chore();
// Verify that we need to reopen regions of only 1 table
Mockito.verify(hMaster, Mockito.times(1)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
Mockito.verify(hMaster, Mockito.times(1)).getClusterMetrics();
// Verify that we need to reopen only 1 region with refCount > 400
Mockito.verify(hMaster, Mockito.times(1)).getAssignmentManager();
Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any());
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionServerReadRequestMetrics method testReadRequests.
private void testReadRequests(byte[] regionName, int expectedReadRequests) throws Exception {
for (ServerName serverName : serverNames) {
ServerMetrics serverMetrics = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().get(serverName);
Map<byte[], RegionMetrics> regionMetrics = serverMetrics.getRegionMetrics();
RegionMetrics regionMetric = regionMetrics.get(regionName);
if (regionMetric != null) {
LOG.debug("server read request is " + serverMetrics.getRegionMetrics().get(regionName).getReadRequestCount() + ", region read request is " + regionMetric.getReadRequestCount());
assertEquals(3, serverMetrics.getRegionMetrics().get(regionName).getReadRequestCount());
assertEquals(3, regionMetric.getReadRequestCount());
}
}
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestSyncReplicationReplayWALManager method setUp.
@Before
public void setUp() throws IOException, ReplicationException {
wokenProcedures = new ArrayDeque<>();
onlineServers = new HashSet<>();
listeners = new ArrayList<>();
ServerManager serverManager = mock(ServerManager.class);
doAnswer(inv -> listeners.add(inv.getArgument(0))).when(serverManager).registerListener(any(ServerListener.class));
ServerMetrics serverMetrics = mock(ServerMetrics.class);
doAnswer(inv -> onlineServers.stream().collect(Collectors.toMap(Function.identity(), k -> serverMetrics))).when(serverManager).getOnlineServers();
MasterFileSystem mfs = mock(MasterFileSystem.class);
when(mfs.getFileSystem()).thenReturn(UTIL.getTestFileSystem());
when(mfs.getWALRootDir()).thenReturn(new Path("/"));
scheduler = mock(MasterProcedureScheduler.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
ProcedureEvent<?> event = ((ProcedureEvent<?>[]) invocation.getArgument(0))[0];
event.wakeInternal(new MasterProcedureScheduler(pid -> null) {
@Override
public void addFront(Iterator<Procedure> procedureIterator) {
procedureIterator.forEachRemaining(wokenProcedures::add);
}
});
return null;
}
}).when(scheduler).wakeEvents(any(ProcedureEvent[].class));
MasterProcedureEnv env = mock(MasterProcedureEnv.class);
when(env.getProcedureScheduler()).thenReturn(scheduler);
ProcedureExecutor<MasterProcedureEnv> procExec = mock(ProcedureExecutor.class);
when(procExec.getEnvironment()).thenReturn(env);
MasterServices services = mock(MasterServices.class);
when(services.getServerManager()).thenReturn(serverManager);
when(services.getMasterFileSystem()).thenReturn(mfs);
when(services.getMasterProcedureExecutor()).thenReturn(procExec);
manager = new SyncReplicationReplayWALManager(services);
assertEquals(1, listeners.size());
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal method testBalanceCluster.
/**
* Test HBASE-20791
*/
@Test
public void testBalanceCluster() throws IOException {
// mock cluster State
Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
ServerName serverA = servers.get(0);
ServerName serverB = servers.get(1);
ServerName serverC = servers.get(2);
List<RegionInfo> regionsOnServerA = randomRegions(3);
List<RegionInfo> regionsOnServerB = randomRegions(3);
List<RegionInfo> regionsOnServerC = randomRegions(3);
clusterState.put(serverA, regionsOnServerA);
clusterState.put(serverB, regionsOnServerB);
clusterState.put(serverC, regionsOnServerC);
// mock ClusterMetrics
Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 0));
serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0));
serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0));
ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
loadBalancer.updateClusterMetrics(clusterStatus);
// ReadRequestCostFunction are Rate based, So doing setClusterMetrics again
// this time, regions on serverA with more readRequestCount load
// serverA : 1000,1000,1000
// serverB : 0,0,0
// serverC : 0,0,0
// so should move two regions from serverA to serverB & serverC
serverMetricsMap = new TreeMap<>();
serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 1000));
serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0));
serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0));
clusterStatus = mock(ClusterMetrics.class);
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
loadBalancer.updateClusterMetrics(clusterStatus);
Map<TableName, Map<ServerName, List<RegionInfo>>> LoadOfAllTable = (Map) mockClusterServersWithTables(clusterState);
List<RegionPlan> plans = loadBalancer.balanceCluster(LoadOfAllTable);
Set<RegionInfo> regionsMoveFromServerA = new HashSet<>();
Set<ServerName> targetServers = new HashSet<>();
for (RegionPlan plan : plans) {
if (plan.getSource().equals(serverA)) {
regionsMoveFromServerA.add(plan.getRegionInfo());
targetServers.add(plan.getDestination());
}
}
// should move 2 regions from serverA, one moves to serverB, the other moves to serverC
assertEquals(2, regionsMoveFromServerA.size());
assertEquals(2, targetServers.size());
assertTrue(regionsOnServerA.containsAll(regionsMoveFromServerA));
}
Aggregations