Search in sources :

Example 1 with ReplicationTracker

use of org.apache.hadoop.hbase.replication.ReplicationTracker in project hbase by apache.

the class DumpReplicationQueues method dumpQueues.

public String dumpQueues(ClusterConnection connection, ZooKeeperWatcher zkw, Set<String> peerIds, boolean hdfs) throws Exception {
    ReplicationQueuesClient queuesClient;
    ReplicationPeers replicationPeers;
    ReplicationQueues replicationQueues;
    ReplicationTracker replicationTracker;
    ReplicationQueuesClientArguments replicationArgs = new ReplicationQueuesClientArguments(getConf(), new WarnOnlyAbortable(), zkw);
    StringBuilder sb = new StringBuilder();
    queuesClient = ReplicationFactory.getReplicationQueuesClient(replicationArgs);
    queuesClient.init();
    replicationQueues = ReplicationFactory.getReplicationQueues(replicationArgs);
    replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), queuesClient, connection);
    replicationTracker = ReplicationFactory.getReplicationTracker(zkw, replicationPeers, getConf(), new WarnOnlyAbortable(), new WarnOnlyStoppable());
    List<String> liveRegionServers = replicationTracker.getListOfRegionServers();
    // Loops each peer on each RS and dumps the queues
    try {
        List<String> regionservers = queuesClient.getListOfReplicators();
        for (String regionserver : regionservers) {
            List<String> queueIds = queuesClient.getAllQueues(regionserver);
            replicationQueues.init(regionserver);
            if (!liveRegionServers.contains(regionserver)) {
                deadRegionServers.add(regionserver);
            }
            for (String queueId : queueIds) {
                ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
                List<String> wals = queuesClient.getLogsInQueue(regionserver, queueId);
                if (!peerIds.contains(queueInfo.getPeerId())) {
                    deletedQueues.add(regionserver + "/" + queueId);
                    sb.append(formatQueue(regionserver, replicationQueues, queueInfo, queueId, wals, true, hdfs));
                } else {
                    sb.append(formatQueue(regionserver, replicationQueues, queueInfo, queueId, wals, false, hdfs));
                }
            }
        }
    } catch (KeeperException ke) {
        throw new IOException(ke);
    }
    return sb.toString();
}
Also used : ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) IOException(java.io.IOException) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationTracker(org.apache.hadoop.hbase.replication.ReplicationTracker) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) ReplicationQueuesClientArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments) KeeperException(org.apache.zookeeper.KeeperException) ReplicationQueuesClient(org.apache.hadoop.hbase.replication.ReplicationQueuesClient)

Aggregations

IOException (java.io.IOException)1 ReplicationPeers (org.apache.hadoop.hbase.replication.ReplicationPeers)1 ReplicationQueueInfo (org.apache.hadoop.hbase.replication.ReplicationQueueInfo)1 ReplicationQueues (org.apache.hadoop.hbase.replication.ReplicationQueues)1 ReplicationQueuesClient (org.apache.hadoop.hbase.replication.ReplicationQueuesClient)1 ReplicationQueuesClientArguments (org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments)1 ReplicationTracker (org.apache.hadoop.hbase.replication.ReplicationTracker)1 KeeperException (org.apache.zookeeper.KeeperException)1