use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.
the class RegionStates method getAssignmentsByTable.
/**
* This is an EXPENSIVE clone. Cloning though is the safest thing to do.
* Can't let out original since it can change and at least the load balancer
* wants to iterate this exported list. We need to synchronize on regions
* since all access to this.servers is under a lock on this.regions.
* @param forceByCluster a flag to force to aggregate the server-load to the cluster level
* @return A clone of current assignments by table.
*/
protected Map<TableName, Map<ServerName, List<HRegionInfo>>> getAssignmentsByTable(boolean forceByCluster) {
Map<TableName, Map<ServerName, List<HRegionInfo>>> result;
synchronized (this) {
result = getTableRSRegionMap(server.getConfiguration().getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false) && !forceByCluster);
}
Map<ServerName, ServerLoad> onlineSvrs = serverManager.getOnlineServers();
// Take care of servers w/o assignments, and remove servers in draining mode
List<ServerName> drainingServers = this.serverManager.getDrainingServersList();
for (Map<ServerName, List<HRegionInfo>> map : result.values()) {
for (ServerName svr : onlineSvrs.keySet()) {
if (!map.containsKey(svr)) {
map.put(svr, new ArrayList<>());
}
}
map.keySet().removeAll(drainingServers);
}
return result;
}
use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.
the class ServerManager method getAverageLoad.
/**
* Compute the average load across all region servers.
* Currently, this uses a very naive computation - just uses the number of
* regions being served, ignoring stats about number of requests.
* @return the average load
*/
public double getAverageLoad() {
int totalLoad = 0;
int numServers = 0;
for (ServerLoad sl : this.onlineServers.values()) {
numServers++;
totalLoad += sl.getNumberOfRegions();
}
return numServers == 0 ? 0 : (double) totalLoad / (double) numServers;
}
use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.
the class MasterRpcServices method regionServerReport.
@Override
public RegionServerReportResponse regionServerReport(RpcController controller, RegionServerReportRequest request) throws ServiceException {
try {
master.checkServiceStarted();
ClusterStatusProtos.ServerLoad sl = request.getLoad();
ServerName serverName = ProtobufUtil.toServerName(request.getServer());
ServerLoad oldLoad = master.getServerManager().getLoad(serverName);
master.getServerManager().regionServerReport(serverName, new ServerLoad(sl));
if (sl != null && master.metricsMaster != null) {
// Up our metrics.
master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
return RegionServerReportResponse.newBuilder().build();
}
use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.
the class TestReplicationStatus method testReplicationStatus.
/**
* Test for HBASE-9531
* put a few rows into htable1, which should be replicated to htable2
* create a ClusterStatus instance 'status' from HBaseAdmin
* test : status.getLoad(server).getReplicationLoadSourceList()
* test : status.getLoad(server).getReplicationLoadSink()
* * @throws Exception
*/
@Test(timeout = 300000)
public void testReplicationStatus() throws Exception {
LOG.info("testReplicationStatus");
try (Admin hbaseAdmin = utility1.getConnection().getAdmin()) {
// disable peer
admin.disablePeer(PEER_ID);
final byte[] qualName = Bytes.toBytes("q");
Put p;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p = new Put(Bytes.toBytes("row" + i));
p.addColumn(famName, qualName, Bytes.toBytes("val" + i));
htable1.put(p);
}
ClusterStatus status = hbaseAdmin.getClusterStatus();
for (JVMClusterUtil.RegionServerThread thread : utility1.getHBaseCluster().getRegionServerThreads()) {
ServerName server = thread.getRegionServer().getServerName();
ServerLoad sl = status.getLoad(server);
List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList();
ReplicationLoadSink rLoadSink = sl.getReplicationLoadSink();
// check SourceList only has one entry, beacuse only has one peer
assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() == 1));
assertEquals(PEER_ID, rLoadSourceList.get(0).getPeerID());
// check Sink exist only as it is difficult to verify the value on the fly
assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ", (rLoadSink.getAgeOfLastAppliedOp() >= 0));
assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ", (rLoadSink.getTimeStampsOfLastAppliedOp() >= 0));
}
// Stop rs1, then the queue of rs1 will be transfered to rs0
utility1.getHBaseCluster().getRegionServer(1).stop("Stop RegionServer");
Thread.sleep(10000);
status = hbaseAdmin.getClusterStatus();
ServerName server = utility1.getHBaseCluster().getRegionServer(0).getServerName();
ServerLoad sl = status.getLoad(server);
List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList();
// check SourceList still only has one entry
assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() == 1));
assertEquals(PEER_ID, rLoadSourceList.get(0).getPeerID());
} finally {
admin.enablePeer(PEER_ID);
utility1.getHBaseCluster().getRegionServer(1).start();
}
}
use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.
the class TestMasterReplication method testLoopedReplication.
/**
* Tests the replication scenario 0 -> 0. By default
* {@link BaseReplicationEndpoint#canReplicateToSameCluster()} returns false, so the
* ReplicationSource should terminate, and no further logs should get enqueued
*/
@Test(timeout = 300000)
public void testLoopedReplication() throws Exception {
LOG.info("testLoopedReplication");
startMiniClusters(1);
createTableOnClusters(table);
addPeer("1", 0, 0);
Thread.sleep(SLEEP_TIME);
// wait for source to terminate
final ServerName rsName = utilities[0].getHBaseCluster().getRegionServer(0).getServerName();
Waiter.waitFor(baseConfiguration, 10000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
ClusterStatus clusterStatus = utilities[0].getAdmin().getClusterStatus();
ServerLoad serverLoad = clusterStatus.getLoad(rsName);
List<ReplicationLoadSource> replicationLoadSourceList = serverLoad.getReplicationLoadSourceList();
return replicationLoadSourceList.isEmpty();
}
});
Table[] htables = getHTablesOnClusters(tableName);
putAndWait(row, famName, htables[0], htables[0]);
rollWALAndWait(utilities[0], table.getTableName(), row);
ZooKeeperWatcher zkw = utilities[0].getZooKeeperWatcher();
String queuesZnode = ZKUtil.joinZNode(zkw.getZNodePaths().baseZNode, ZKUtil.joinZNode("replication", "rs"));
List<String> listChildrenNoWatch = ZKUtil.listChildrenNoWatch(zkw, ZKUtil.joinZNode(queuesZnode, rsName.toString()));
assertEquals(0, listChildrenNoWatch.size());
}
Aggregations