Search in sources :

Example 1 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class DistributedHBaseCluster method getServerHoldingRegion.

@Override
public ServerName getServerHoldingRegion(TableName tn, byte[] regionName) throws IOException {
    HRegionLocation regionLoc = null;
    try (RegionLocator locator = connection.getRegionLocator(tn)) {
        regionLoc = locator.getRegionLocation(regionName, true);
    }
    if (regionLoc == null) {
        LOG.warn("Cannot find region server holding region " + Bytes.toString(regionName) + ", start key [" + Bytes.toString(HRegionInfo.getStartKey(regionName)) + "]");
        return null;
    }
    AdminProtos.AdminService.BlockingInterface client = ((ClusterConnection) this.connection).getAdmin(regionLoc.getServerName());
    ServerInfo info = ProtobufUtil.getServerInfo(null, client);
    return ProtobufUtil.toServerName(info.getServerName());
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo)

Example 2 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestRSGroups method testNamespaceCreateAndAssign.

@Test
public void testNamespaceCreateAndAssign() throws Exception {
    LOG.info("testNamespaceCreateAndAssign");
    String nsName = tablePrefix + "_foo";
    final TableName tableName = TableName.valueOf(nsName, tablePrefix + "_testCreateAndAssign");
    RSGroupInfo appInfo = addGroup("appInfo", 1);
    admin.createNamespace(NamespaceDescriptor.create(nsName).addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "appInfo").build());
    final HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor("f"));
    admin.createTable(desc);
    //wait for created table to be assigned
    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return getTableRegionMap().get(desc.getTableName()) != null;
        }
    });
    ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next().toString());
    AdminProtos.AdminService.BlockingInterface rs = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
    //verify it was assigned to the right group
    Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size());
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 3 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class TestRSGroupsBase method testKillRS.

@Test
public void testKillRS() throws Exception {
    RSGroupInfo appInfo = addGroup("appInfo", 1);
    final TableName tableName = TableName.valueOf(tablePrefix + "_ns", name.getMethodName());
    admin.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build());
    final HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor("f"));
    admin.createTable(desc);
    //wait for created table to be assigned
    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return getTableRegionMap().get(desc.getTableName()) != null;
        }
    });
    ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next().toString());
    AdminProtos.AdminService.BlockingInterface targetRS = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
    HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0);
    Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
    try {
        //stopping may cause an exception
        //due to the connection loss
        targetRS.stopServer(null, AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
    } catch (Exception e) {
    }
    assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
    //wait for created table to be assigned
    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
        }
    });
    Set<Address> newServers = Sets.newHashSet();
    newServers.add(rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().iterator().next());
    rsGroupAdmin.moveServers(newServers, appInfo.getName());
    //Make sure all the table's regions get reassigned
    //disabling the table guarantees no conflicting assign/unassign (ie SSH) happens
    admin.disableTable(tableName);
    admin.enableTable(tableName);
    //wait for region to be assigned
    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
        }
    });
    targetServer = ServerName.parseServerName(newServers.iterator().next().toString());
    targetRS = ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
    Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
    Assert.assertEquals(tableName, ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable());
}
Also used : Address(org.apache.hadoop.hbase.net.Address) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) ConstraintException(org.apache.hadoop.hbase.constraint.ConstraintException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 4 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class RegionPlacementMaintainer method updateAssignmentPlanToRegionServers.

/**
   * Update the assignment plan to all the region servers
   * @param plan
   * @throws IOException
   */
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
    LOG.info("Start to update the region servers with the new assignment plan");
    // Get the region to region server map
    Map<ServerName, List<HRegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();
    // track of the failed and succeeded updates
    int succeededNum = 0;
    Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
    for (Map.Entry<ServerName, List<HRegionInfo>> entry : currentAssignment.entrySet()) {
        List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
        try {
            // Keep track of the favored updates for the current region server
            FavoredNodesPlan singleServerPlan = null;
            // Find out all the updates for the current region server
            for (HRegionInfo region : entry.getValue()) {
                List<ServerName> favoredServerList = plan.getFavoredNodes(region);
                if (favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
                    // Create the single server plan if necessary
                    if (singleServerPlan == null) {
                        singleServerPlan = new FavoredNodesPlan();
                    }
                    // Update the single server update
                    singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
                    regionUpdateInfos.add(new Pair<>(region, favoredServerList));
                }
            }
            if (singleServerPlan != null) {
                // Update the current region server with its updated favored nodes
                BlockingInterface currentRegionServer = ((ClusterConnection) this.connection).getAdmin(entry.getKey());
                UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
                UpdateFavoredNodesResponse updateFavoredNodesResponse = currentRegionServer.updateFavoredNodes(null, request);
                LOG.info("Region server " + ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.getAssignmentMap().size() + " regions with the assignment plan");
                succeededNum++;
            }
        } catch (Exception e) {
            failedUpdateMap.put(entry.getKey(), e);
        }
    }
    // log the succeeded updates
    LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");
    // log the failed updates
    int failedNum = failedUpdateMap.size();
    if (failedNum != 0) {
        LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes");
        for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
            LOG.error("Failed to update " + entry.getKey().getHostAndPort() + " because of " + entry.getValue().getMessage());
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FavoredNodesPlan(org.apache.hadoop.hbase.favored.FavoredNodesPlan) IOException(java.io.IOException) ParseException(org.apache.commons.cli.ParseException) UpdateFavoredNodesResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) UpdateFavoredNodesRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) BlockingInterface(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) Pair(org.apache.hadoop.hbase.util.Pair)

Example 5 with ClusterConnection

use of org.apache.hadoop.hbase.client.ClusterConnection in project hbase by apache.

the class DumpReplicationQueues method dumpReplicationQueues.

private int dumpReplicationQueues(DumpOptions opts) throws Exception {
    Configuration conf = getConf();
    HBaseAdmin.available(conf);
    ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
    Admin admin = connection.getAdmin();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), new WarnOnlyAbortable(), true);
    try {
        // Our zk watcher
        LOG.info("Our Quorum: " + zkw.getQuorum());
        List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
        if (replicatedTableCFs.isEmpty()) {
            LOG.info("No tables with a configured replication peer were found.");
            return (0);
        } else {
            LOG.info("Replicated Tables: " + replicatedTableCFs);
        }
        List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
        if (peers.isEmpty()) {
            LOG.info("Replication is enabled but no peer configuration was found.");
        }
        System.out.println("Dumping replication peers and configurations:");
        System.out.println(dumpPeersState(peers));
        if (opts.isDistributed()) {
            LOG.info("Found [--distributed], will poll each RegionServer.");
            Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
            System.out.println(dumpQueues(connection, zkw, peerIds, opts.isHdfs()));
            System.out.println(dumpReplicationSummary());
        } else {
            // use ZK instead
            System.out.print("Dumping replication znodes via ZooKeeper:");
            System.out.println(ZKUtil.getReplicationZnodesDump(zkw));
        }
        return (0);
    } catch (IOException e) {
        return (-1);
    } finally {
        zkw.close();
    }
}
Also used : StringUtils(org.apache.hadoop.hbase.procedure2.util.StringUtils) ReplicationTracker(org.apache.hadoop.hbase.replication.ReplicationTracker) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) ReplicationFactory(org.apache.hadoop.hbase.replication.ReplicationFactory) FileStatus(org.apache.hadoop.fs.FileStatus) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ArrayList(java.util.ArrayList) WALLink(org.apache.hadoop.hbase.io.WALLink) ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) Configured(org.apache.hadoop.conf.Configured) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) AtomicLongMap(com.google.common.util.concurrent.AtomicLongMap) LinkedList(java.util.LinkedList) KeeperException(org.apache.zookeeper.KeeperException) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) Abortable(org.apache.hadoop.hbase.Abortable) ReplicationQueuesClientArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments) ToolRunner(org.apache.hadoop.util.ToolRunner) Set(java.util.Set) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) IOException(java.io.IOException) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Collectors(java.util.stream.Collectors) Stoppable(org.apache.hadoop.hbase.Stoppable) FileNotFoundException(java.io.FileNotFoundException) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Tool(org.apache.hadoop.util.Tool) List(java.util.List) ReplicationQueuesClient(org.apache.hadoop.hbase.replication.ReplicationQueuesClient) Admin(org.apache.hadoop.hbase.client.Admin) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) Log(org.apache.commons.logging.Log) Queue(java.util.Queue) LogFactory(org.apache.commons.logging.LogFactory) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) IOException(java.io.IOException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Admin(org.apache.hadoop.hbase.client.Admin) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Aggregations

ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)23 Test (org.junit.Test)12 ServerName (org.apache.hadoop.hbase.ServerName)8 IOException (java.io.IOException)6 Configuration (org.apache.hadoop.conf.Configuration)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 TableName (org.apache.hadoop.hbase.TableName)4 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 Waiter (org.apache.hadoop.hbase.Waiter)3 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)3 Result (org.apache.hadoop.hbase.client.Result)3 Table (org.apache.hadoop.hbase.client.Table)3 LinkedList (java.util.LinkedList)2 Map (java.util.Map)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2