Search in sources :

Example 6 with ReplicationQueues

use of org.apache.hadoop.hbase.replication.ReplicationQueues in project hbase by apache.

the class TestReplicationSourceManager method testCleanupFailoverQueues.

@Test
public void testCleanupFailoverQueues() throws Exception {
    final Server server = new DummyServer("hostname1.example.org");
    ReplicationQueues rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, server.getZooKeeper()));
    rq.init(server.getServerName().toString());
    // populate some znodes in the peer znode
    SortedSet<String> files = new TreeSet<>();
    String group = "testgroup";
    String file1 = group + ".log1";
    String file2 = group + ".log2";
    files.add(file1);
    files.add(file2);
    for (String file : files) {
        rq.addLog("1", file);
    }
    Server s1 = new DummyServer("dummyserver1.example.org");
    ReplicationQueues rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, s1.getZooKeeper()));
    rq1.init(s1.getServerName().toString());
    ReplicationPeers rp1 = ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration(), s1);
    rp1.init();
    NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName().getServerName(), rq1, rp1, new UUID(new Long(1), new Long(2)));
    w1.run();
    assertEquals(1, manager.getWalsByIdRecoveredQueues().size());
    String id = "1-" + server.getServerName().getServerName();
    assertEquals(files, manager.getWalsByIdRecoveredQueues().get(id).get(group));
    manager.cleanOldLogs(file2, id, true);
    // log1 should be deleted
    assertEquals(Sets.newHashSet(file2), manager.getWalsByIdRecoveredQueues().get(id).get(group));
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) NodeFailoverWorker(org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker) Server(org.apache.hadoop.hbase.Server) TreeSet(java.util.TreeSet) UUID(java.util.UUID) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) Test(org.junit.Test)

Example 7 with ReplicationQueues

use of org.apache.hadoop.hbase.replication.ReplicationQueues in project hbase by apache.

the class TestReplicationSourceManager method testCleanupUnknownPeerZNode.

@Test
public void testCleanupUnknownPeerZNode() throws Exception {
    final Server server = new DummyServer("hostname2.example.org");
    ReplicationQueues rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, server.getZooKeeper()));
    rq.init(server.getServerName().toString());
    // populate some znodes in the peer znode
    // add log to an unknown peer
    String group = "testgroup";
    rq.addLog("2", group + ".log1");
    rq.addLog("2", group + ".log2");
    NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName().getServerName());
    w1.run();
    // The log of the unknown peer should be removed from zk
    for (String peer : manager.getAllQueues()) {
        assertTrue(peer.startsWith("1"));
    }
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) NodeFailoverWorker(org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker) Server(org.apache.hadoop.hbase.Server) Test(org.junit.Test)

Example 8 with ReplicationQueues

use of org.apache.hadoop.hbase.replication.ReplicationQueues in project hbase by apache.

the class TestReplicationSourceManagerZkImpl method testFailoverDeadServerCversionChange.

@Test
public void testFailoverDeadServerCversionChange() throws Exception {
    final Server s0 = new DummyServer("cversion-change0.example.org");
    ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, s0, s0.getZooKeeper()));
    repQueues.init(s0.getServerName().toString());
    // populate some znodes in the peer znode
    files.add("log1");
    files.add("log2");
    for (String file : files) {
        repQueues.addLog("1", file);
    }
    // simulate queue transfer
    Server s1 = new DummyServer("cversion-change1.example.org");
    ReplicationQueues rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, s1.getZooKeeper()));
    rq1.init(s1.getServerName().toString());
    ReplicationQueuesClientZKImpl client = (ReplicationQueuesClientZKImpl) ReplicationFactory.getReplicationQueuesClient(new ReplicationQueuesClientArguments(s1.getConfiguration(), s1, s1.getZooKeeper()));
    int v0 = client.getQueuesZNodeCversion();
    List<String> queues = rq1.getUnClaimedQueueIds(s0.getServerName().getServerName());
    for (String queue : queues) {
        rq1.claimQueue(s0.getServerName().getServerName(), queue);
    }
    rq1.removeReplicatorIfQueueIsEmpty(s0.getServerName().getServerName());
    int v1 = client.getQueuesZNodeCversion();
    // cversion should increase by 1 since a child node is deleted
    assertEquals(v0 + 1, v1);
    s0.stop("");
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) Server(org.apache.hadoop.hbase.Server) ReplicationQueuesClientZKImpl(org.apache.hadoop.hbase.replication.ReplicationQueuesClientZKImpl) ReplicationQueuesClientArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments) Test(org.junit.Test)

Example 9 with ReplicationQueues

use of org.apache.hadoop.hbase.replication.ReplicationQueues in project hbase by apache.

the class TestReplicationSourceManagerZkImpl method testNodeFailoverDeadServerParsing.

// Tests the naming convention of adopted queues for ReplicationQueuesZkImpl
@Test
public void testNodeFailoverDeadServerParsing() throws Exception {
    final Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com");
    ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
    repQueues.init(server.getServerName().toString());
    // populate some znodes in the peer znode
    files.add("log1");
    files.add("log2");
    for (String file : files) {
        repQueues.addLog("1", file);
    }
    // create 3 DummyServers
    Server s1 = new DummyServer("ip-10-8-101-114.ec2.internal");
    Server s2 = new DummyServer("ec2-107-20-52-47.compute-1.amazonaws.com");
    Server s3 = new DummyServer("ec2-23-20-187-167.compute-1.amazonaws.com");
    // simulate three servers fail sequentially
    ReplicationQueues rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, s1.getZooKeeper()));
    rq1.init(s1.getServerName().toString());
    String serverName = server.getServerName().getServerName();
    List<String> unclaimed = rq1.getUnClaimedQueueIds(serverName);
    rq1.claimQueue(serverName, unclaimed.get(0)).getSecond();
    rq1.removeReplicatorIfQueueIsEmpty(unclaimed.get(0));
    ReplicationQueues rq2 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s2.getConfiguration(), s2, s2.getZooKeeper()));
    rq2.init(s2.getServerName().toString());
    serverName = s1.getServerName().getServerName();
    unclaimed = rq2.getUnClaimedQueueIds(serverName);
    rq2.claimQueue(serverName, unclaimed.get(0)).getSecond();
    rq2.removeReplicatorIfQueueIsEmpty(unclaimed.get(0));
    ReplicationQueues rq3 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s3.getConfiguration(), s3, s3.getZooKeeper()));
    rq3.init(s3.getServerName().toString());
    serverName = s2.getServerName().getServerName();
    unclaimed = rq3.getUnClaimedQueueIds(serverName);
    String queue3 = rq3.claimQueue(serverName, unclaimed.get(0)).getFirst();
    rq3.removeReplicatorIfQueueIsEmpty(unclaimed.get(0));
    ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queue3);
    List<String> result = replicationQueueInfo.getDeadRegionServers();
    // verify
    assertTrue(result.contains(server.getServerName().getServerName()));
    assertTrue(result.contains(s1.getServerName().getServerName()));
    assertTrue(result.contains(s2.getServerName().getServerName()));
    server.stop("");
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) Server(org.apache.hadoop.hbase.Server) ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) Test(org.junit.Test)

Example 10 with ReplicationQueues

use of org.apache.hadoop.hbase.replication.ReplicationQueues in project hbase by apache.

the class DumpReplicationQueues method dumpQueues.

public String dumpQueues(ClusterConnection connection, ZooKeeperWatcher zkw, Set<String> peerIds, boolean hdfs) throws Exception {
    ReplicationQueuesClient queuesClient;
    ReplicationPeers replicationPeers;
    ReplicationQueues replicationQueues;
    ReplicationTracker replicationTracker;
    ReplicationQueuesClientArguments replicationArgs = new ReplicationQueuesClientArguments(getConf(), new WarnOnlyAbortable(), zkw);
    StringBuilder sb = new StringBuilder();
    queuesClient = ReplicationFactory.getReplicationQueuesClient(replicationArgs);
    queuesClient.init();
    replicationQueues = ReplicationFactory.getReplicationQueues(replicationArgs);
    replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), queuesClient, connection);
    replicationTracker = ReplicationFactory.getReplicationTracker(zkw, replicationPeers, getConf(), new WarnOnlyAbortable(), new WarnOnlyStoppable());
    List<String> liveRegionServers = replicationTracker.getListOfRegionServers();
    // Loops each peer on each RS and dumps the queues
    try {
        List<String> regionservers = queuesClient.getListOfReplicators();
        for (String regionserver : regionservers) {
            List<String> queueIds = queuesClient.getAllQueues(regionserver);
            replicationQueues.init(regionserver);
            if (!liveRegionServers.contains(regionserver)) {
                deadRegionServers.add(regionserver);
            }
            for (String queueId : queueIds) {
                ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
                List<String> wals = queuesClient.getLogsInQueue(regionserver, queueId);
                if (!peerIds.contains(queueInfo.getPeerId())) {
                    deletedQueues.add(regionserver + "/" + queueId);
                    sb.append(formatQueue(regionserver, replicationQueues, queueInfo, queueId, wals, true, hdfs));
                } else {
                    sb.append(formatQueue(regionserver, replicationQueues, queueInfo, queueId, wals, false, hdfs));
                }
            }
        }
    } catch (KeeperException ke) {
        throw new IOException(ke);
    }
    return sb.toString();
}
Also used : ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) IOException(java.io.IOException) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationTracker(org.apache.hadoop.hbase.replication.ReplicationTracker) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) ReplicationQueuesClientArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments) KeeperException(org.apache.zookeeper.KeeperException) ReplicationQueuesClient(org.apache.hadoop.hbase.replication.ReplicationQueuesClient)

Aggregations

ReplicationQueues (org.apache.hadoop.hbase.replication.ReplicationQueues)10 ReplicationQueuesArguments (org.apache.hadoop.hbase.replication.ReplicationQueuesArguments)9 Test (org.junit.Test)9 Server (org.apache.hadoop.hbase.Server)6 IOException (java.io.IOException)4 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)3 ReplicationPeers (org.apache.hadoop.hbase.replication.ReplicationPeers)3 Configuration (org.apache.hadoop.conf.Configuration)2 Waiter (org.apache.hadoop.hbase.Waiter)2 ReplicationQueueInfo (org.apache.hadoop.hbase.replication.ReplicationQueueInfo)2 ReplicationQueuesClientArguments (org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments)2 NodeFailoverWorker (org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker)2 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)2 KeeperException (org.apache.zookeeper.KeeperException)2 ArrayList (java.util.ArrayList)1 List (java.util.List)1 TreeSet (java.util.TreeSet)1 UUID (java.util.UUID)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 FileStatus (org.apache.hadoop.fs.FileStatus)1