use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestStochasticLoadBalancer method testCPRequestCost.
@Test
public void testCPRequestCost() {
// in order to pass needsBalance judgement
conf.setFloat("hbase.master.balancer.stochastic.cpRequestCost", 10000f);
loadBalancer.onConfigurationChange(conf);
// mock cluster State
Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
ServerName serverA = randomServer(3).getServerName();
ServerName serverB = randomServer(3).getServerName();
ServerName serverC = randomServer(3).getServerName();
List<RegionInfo> regionsOnServerA = randomRegions(3);
List<RegionInfo> regionsOnServerB = randomRegions(3);
List<RegionInfo> regionsOnServerC = randomRegions(3);
clusterState.put(serverA, regionsOnServerA);
clusterState.put(serverB, regionsOnServerB);
clusterState.put(serverC, regionsOnServerC);
// mock ClusterMetrics
Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
serverMetricsMap.put(serverA, mockServerMetricsWithCpRequests(regionsOnServerA, 0));
serverMetricsMap.put(serverB, mockServerMetricsWithCpRequests(regionsOnServerB, 0));
serverMetricsMap.put(serverC, mockServerMetricsWithCpRequests(regionsOnServerC, 0));
ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
loadBalancer.updateClusterMetrics(clusterStatus);
// CPRequestCostFunction are Rate based, So doing setClusterMetrics again
// this time, regions on serverA with more cpRequestCount load
// serverA : 1000,1000,1000
// serverB : 0,0,0
// serverC : 0,0,0
// so should move two regions from serverA to serverB & serverC
serverMetricsMap = new TreeMap<>();
serverMetricsMap.put(serverA, mockServerMetricsWithCpRequests(regionsOnServerA, 1000));
serverMetricsMap.put(serverB, mockServerMetricsWithCpRequests(regionsOnServerB, 0));
serverMetricsMap.put(serverC, mockServerMetricsWithCpRequests(regionsOnServerC, 0));
clusterStatus = mock(ClusterMetrics.class);
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
loadBalancer.updateClusterMetrics(clusterStatus);
List<RegionPlan> plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, clusterState);
Set<RegionInfo> regionsMoveFromServerA = new HashSet<>();
Set<ServerName> targetServers = new HashSet<>();
for (RegionPlan plan : plans) {
if (plan.getSource().equals(serverA)) {
regionsMoveFromServerA.add(plan.getRegionInfo());
targetServers.add(plan.getDestination());
}
}
// should move 2 regions from serverA, one moves to serverB, the other moves to serverC
assertEquals(2, regionsMoveFromServerA.size());
assertEquals(2, targetServers.size());
assertTrue(regionsOnServerA.containsAll(regionsMoveFromServerA));
// reset config
conf.setFloat("hbase.master.balancer.stochastic.cpRequestCost", 5f);
loadBalancer.onConfigurationChange(conf);
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestStochasticLoadBalancer method mockServerMetricsWithCpRequests.
private ServerMetrics mockServerMetricsWithCpRequests(List<RegionInfo> regionsOnServer, long cpRequestCount) {
ServerMetrics serverMetrics = mock(ServerMetrics.class);
Map<byte[], RegionMetrics> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (RegionInfo info : regionsOnServer) {
RegionMetrics rl = mock(RegionMetrics.class);
when(rl.getReadRequestCount()).thenReturn(0L);
when(rl.getCpRequestCount()).thenReturn(cpRequestCount);
when(rl.getWriteRequestCount()).thenReturn(0L);
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
regionLoadMap.put(info.getRegionName(), rl);
}
when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
return serverMetrics;
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class MasterRpcServices method regionServerReport.
@Override
public RegionServerReportResponse regionServerReport(RpcController controller, RegionServerReportRequest request) throws ServiceException {
try {
server.checkServiceStarted();
int versionNumber = 0;
String version = "0.0.0";
VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo();
if (versionInfo != null) {
version = versionInfo.getVersion();
versionNumber = VersionInfoUtil.getVersionNumber(versionInfo);
}
ClusterStatusProtos.ServerLoad sl = request.getLoad();
ServerName serverName = ProtobufUtil.toServerName(request.getServer());
ServerMetrics oldLoad = server.getServerManager().getLoad(serverName);
ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl);
server.getServerManager().regionServerReport(serverName, newLoad);
server.getAssignmentManager().reportOnlineRegions(serverName, newLoad.getRegionMetrics().keySet());
if (sl != null && server.metricsMaster != null) {
// Up our metrics.
server.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0));
server.metricsMaster.incrementReadRequests(sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0));
server.metricsMaster.incrementWriteRequests(sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0));
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
return RegionServerReportResponse.newBuilder().build();
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class RegionServerTracker method upgrade.
/**
* Upgrade to active master mode, where besides tracking the changes of region server set, we will
* also started to add new region servers to ServerManager and also schedule SCP if a region
* server dies. Starts the tracking of online RegionServers. All RSes will be tracked after this
* method is called.
* <p/>
* In this method, we will also construct the region server sets in {@link ServerManager}. If a
* region server is dead between the crash of the previous master instance and the start of the
* current master instance, we will schedule a SCP for it. This is done in
* {@link ServerManager#findDeadServersAndProcess(Set, Set)}, we call it here under the lock
* protection to prevent concurrency issues with server expiration operation.
* @param deadServersFromPE the region servers which already have SCP associated.
* @param liveServersFromWALDir the live region servers from wal directory.
* @param splittingServersFromWALDir Servers whose WALs are being actively 'split'.
*/
public void upgrade(Set<ServerName> deadServersFromPE, Set<ServerName> liveServersFromWALDir, Set<ServerName> splittingServersFromWALDir) throws KeeperException, IOException {
LOG.info("Upgrading RegionServerTracker to active master mode; {} have existing" + "ServerCrashProcedures, {} possibly 'live' servers, and {} 'splitting'.", deadServersFromPE.size(), liveServersFromWALDir.size(), splittingServersFromWALDir.size());
// deadServersFromPE is made from a list of outstanding ServerCrashProcedures.
// splittingServersFromWALDir are being actively split -- the directory in the FS ends in
// '-SPLITTING'. Each splitting server should have a corresponding SCP. Log if not.
splittingServersFromWALDir.stream().filter(s -> !deadServersFromPE.contains(s)).forEach(s -> LOG.error("{} has no matching ServerCrashProcedure", s));
// create ServerNode for all possible live servers from wal directory
liveServersFromWALDir.forEach(sn -> server.getAssignmentManager().getRegionStates().getOrCreateServer(sn));
ServerManager serverManager = server.getServerManager();
synchronized (this) {
Set<ServerName> liveServers = regionServers;
for (ServerName serverName : liveServers) {
RegionServerInfo info = getServerInfo(serverName);
ServerMetrics serverMetrics = info != null ? ServerMetricsBuilder.of(serverName, VersionInfoUtil.getVersionNumber(info.getVersionInfo()), info.getVersionInfo().getVersion()) : ServerMetricsBuilder.of(serverName);
serverManager.checkAndRecordNewServer(serverName, serverMetrics);
}
serverManager.findDeadServersAndProcess(deadServersFromPE, liveServersFromWALDir);
active = true;
}
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class ServerManager method getAverageLoad.
/**
* Compute the average load across all region servers.
* Currently, this uses a very naive computation - just uses the number of
* regions being served, ignoring stats about number of requests.
* @return the average load
*/
public double getAverageLoad() {
int totalLoad = 0;
int numServers = 0;
for (ServerMetrics sl : this.onlineServers.values()) {
numServers++;
totalLoad += sl.getRegionMetrics().size();
}
return numServers == 0 ? 0 : (double) totalLoad / (double) numServers;
}
Aggregations