use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.
the class DistributedHBaseCluster method getServerHoldingRegion.
@Override
public ServerName getServerHoldingRegion(TableName tn, byte[] regionName) throws IOException {
HRegionLocation regionLoc = null;
try (RegionLocator locator = connection.getRegionLocator(tn)) {
regionLoc = locator.getRegionLocation(regionName, true);
}
if (regionLoc == null) {
LOG.warn("Cannot find region server holding region " + Bytes.toString(regionName) + ", start key [" + Bytes.toString(HRegionInfo.getStartKey(regionName)) + "]");
return null;
}
AdminProtos.AdminService.BlockingInterface client = ((ClusterConnection) this.connection).getAdmin(regionLoc.getServerName());
ServerInfo info = ProtobufUtil.getServerInfo(null, client);
return ProtobufUtil.toServerName(info.getServerName());
}
use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.
the class TestRSGroups method testNamespaceCreateAndAssign.
@Test
public void testNamespaceCreateAndAssign() throws Exception {
LOG.info("testNamespaceCreateAndAssign");
String nsName = tablePrefix + "_foo";
final TableName tableName = TableName.valueOf(nsName, tablePrefix + "_testCreateAndAssign");
RSGroupInfo appInfo = addGroup("appInfo", 1);
admin.createNamespace(NamespaceDescriptor.create(nsName).addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "appInfo").build());
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
admin.createTable(desc);
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getTableRegionMap().get(desc.getTableName()) != null;
}
});
ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next().toString());
AdminProtos.AdminService.BlockingInterface rs = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
//verify it was assigned to the right group
Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size());
}
use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.
the class TestRSGroupsBase method testKillRS.
@Test
public void testKillRS() throws Exception {
RSGroupInfo appInfo = addGroup("appInfo", 1);
final TableName tableName = TableName.valueOf(tablePrefix + "_ns", name.getMethodName());
admin.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build());
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
admin.createTable(desc);
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getTableRegionMap().get(desc.getTableName()) != null;
}
});
ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next().toString());
AdminProtos.AdminService.BlockingInterface targetRS = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0);
Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
try {
//stopping may cause an exception
//due to the connection loss
targetRS.stopServer(null, AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
} catch (Exception e) {
}
assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
}
});
Set<Address> newServers = Sets.newHashSet();
newServers.add(rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().iterator().next());
rsGroupAdmin.moveServers(newServers, appInfo.getName());
//Make sure all the table's regions get reassigned
//disabling the table guarantees no conflicting assign/unassign (ie SSH) happens
admin.disableTable(tableName);
admin.enableTable(tableName);
//wait for region to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
}
});
targetServer = ServerName.parseServerName(newServers.iterator().next().toString());
targetRS = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
Assert.assertEquals(tableName, ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable());
}
use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.
the class RegionPlacementMaintainer method updateAssignmentPlanToRegionServers.
/**
* Update the assignment plan to all the region servers
* @param plan
* @throws IOException
*/
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
LOG.info("Start to update the region servers with the new assignment plan");
// Get the region to region server map
Map<ServerName, List<HRegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();
// track of the failed and succeeded updates
int succeededNum = 0;
Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
for (Map.Entry<ServerName, List<HRegionInfo>> entry : currentAssignment.entrySet()) {
List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
try {
// Keep track of the favored updates for the current region server
FavoredNodesPlan singleServerPlan = null;
// Find out all the updates for the current region server
for (HRegionInfo region : entry.getValue()) {
List<ServerName> favoredServerList = plan.getFavoredNodes(region);
if (favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
// Create the single server plan if necessary
if (singleServerPlan == null) {
singleServerPlan = new FavoredNodesPlan();
}
// Update the single server update
singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
regionUpdateInfos.add(new Pair<>(region, favoredServerList));
}
}
if (singleServerPlan != null) {
// Update the current region server with its updated favored nodes
BlockingInterface currentRegionServer = ((ClusterConnection) this.connection).getAdmin(entry.getKey());
UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
UpdateFavoredNodesResponse updateFavoredNodesResponse = currentRegionServer.updateFavoredNodes(null, request);
LOG.info("Region server " + ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.getAssignmentMap().size() + " regions with the assignment plan");
succeededNum++;
}
} catch (Exception e) {
failedUpdateMap.put(entry.getKey(), e);
}
}
// log the succeeded updates
LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");
// log the failed updates
int failedNum = failedUpdateMap.size();
if (failedNum != 0) {
LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes");
for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
LOG.error("Failed to update " + entry.getKey().getHostAndPort() + " because of " + entry.getValue().getMessage());
}
}
}
use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.
the class DumpReplicationQueues method dumpReplicationQueues.
private int dumpReplicationQueues(DumpOptions opts) throws Exception {
Configuration conf = getConf();
HBaseAdmin.available(conf);
ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), new WarnOnlyAbortable(), true);
try {
// Our zk watcher
LOG.info("Our Quorum: " + zkw.getQuorum());
List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
if (replicatedTableCFs.isEmpty()) {
LOG.info("No tables with a configured replication peer were found.");
return (0);
} else {
LOG.info("Replicated Tables: " + replicatedTableCFs);
}
List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
if (peers.isEmpty()) {
LOG.info("Replication is enabled but no peer configuration was found.");
}
System.out.println("Dumping replication peers and configurations:");
System.out.println(dumpPeersState(peers));
if (opts.isDistributed()) {
LOG.info("Found [--distributed], will poll each RegionServer.");
Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
System.out.println(dumpQueues(connection, zkw, peerIds, opts.isHdfs()));
System.out.println(dumpReplicationSummary());
} else {
// use ZK instead
System.out.print("Dumping replication znodes via ZooKeeper:");
System.out.println(ZKUtil.getReplicationZnodesDump(zkw));
}
return (0);
} catch (IOException e) {
return (-1);
} finally {
zkw.close();
}
}
Aggregations