Search in sources :

Example 1 with ReplicationQueuesArguments

use of org.apache.hadoop.hbase.replication.ReplicationQueuesArguments in project hbase by apache.

the class TestLogsCleaner method testLogCleaning.

@Test
public void testLogCleaning() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    // set TTL
    long ttl = 10000;
    conf.setLong("hbase.master.logcleaner.ttl", ttl);
    Replication.decorateMasterConfiguration(conf);
    Server server = new DummyServer();
    ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
    repQueues.init(server.getServerName().toString());
    final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME);
    String fakeMachineName = URLEncoder.encode(server.getServerName().toString(), "UTF8");
    final FileSystem fs = FileSystem.get(conf);
    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
    long now = System.currentTimeMillis();
    fs.delete(oldLogDir, true);
    fs.mkdirs(oldLogDir);
    // Case 1: 2 invalid files, which would be deleted directly
    fs.createNewFile(new Path(oldLogDir, "a"));
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    System.out.println("Now is: " + now);
    for (int i = 1; i < 31; i++) {
        // Case 3: old files which would be deletable for the first log cleaner
        // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
        Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i));
        fs.createNewFile(fileName);
        // (ReplicationLogCleaner)
        if (i % (30 / 3) == 1) {
            repQueues.addLog(fakeMachineName, fileName.getName());
            System.out.println("Replication log file: " + fileName);
        }
    }
    // sleep for sometime to get newer modifcation time
    Thread.sleep(ttl);
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
    // Case 2: 1 newer file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000)));
    for (FileStatus stat : fs.listStatus(oldLogDir)) {
        System.out.println(stat.getPath().toString());
    }
    assertEquals(34, fs.listStatus(oldLogDir).length);
    LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir);
    cleaner.chore();
    // We end up with the current log file, a newer one and the 3 old log
    // files which are scheduled for replication
    TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return 5 == fs.listStatus(oldLogDir).length;
        }
    });
    for (FileStatus file : fs.listStatus(oldLogDir)) {
        System.out.println("Kept log files: " + file.getPath().getName());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.hbase.Server) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) ZooKeeperConnectionException(org.apache.hadoop.hbase.ZooKeeperConnectionException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) FileSystem(org.apache.hadoop.fs.FileSystem) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 2 with ReplicationQueuesArguments

use of org.apache.hadoop.hbase.replication.ReplicationQueuesArguments in project hbase by apache.

the class TestHBaseFsckOneRS method testCheckReplication.

@Test(timeout = 180000)
public void testCheckReplication() throws Exception {
    // check no errors
    HBaseFsck hbck = doFsck(conf, false);
    assertNoErrors(hbck);
    // create peer
    ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf);
    Assert.assertEquals(0, replicationAdmin.getPeersCount());
    int zkPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey("127.0.0.1:" + zkPort + ":/hbase");
    replicationAdmin.addPeer("1", rpc, null);
    replicationAdmin.getPeersCount();
    Assert.assertEquals(1, replicationAdmin.getPeersCount());
    // create replicator
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection);
    ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection, zkw));
    repQueues.init("server1");
    // queues for current peer, no errors
    repQueues.addLog("1", "file1");
    repQueues.addLog("1-server2", "file1");
    Assert.assertEquals(2, repQueues.getAllQueues().size());
    hbck = doFsck(conf, false);
    assertNoErrors(hbck);
    // queues for removed peer
    repQueues.addLog("2", "file1");
    repQueues.addLog("2-server2", "file1");
    Assert.assertEquals(4, repQueues.getAllQueues().size());
    hbck = doFsck(conf, false);
    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE });
    // fix the case
    hbck = doFsck(conf, true);
    hbck = doFsck(conf, false);
    assertNoErrors(hbck);
    // ensure only "2" is deleted
    Assert.assertEquals(2, repQueues.getAllQueues().size());
    Assert.assertNull(repQueues.getLogsInQueue("2"));
    Assert.assertNull(repQueues.getLogsInQueue("2-sever2"));
    replicationAdmin.removePeer("1");
    repQueues.removeAllQueues();
    zkw.close();
    replicationAdmin.close();
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Test(org.junit.Test)

Example 3 with ReplicationQueuesArguments

use of org.apache.hadoop.hbase.replication.ReplicationQueuesArguments in project hbase by apache.

the class TestReplicationSourceManager method testPeerRemovalCleanup.

/**
   * Test whether calling removePeer() on a ReplicationSourceManager that failed on initializing the
   * corresponding ReplicationSourceInterface correctly cleans up the corresponding
   * replication queue and ReplicationPeer.
   * See HBASE-16096.
   * @throws Exception
   */
@Test
public void testPeerRemovalCleanup() throws Exception {
    String replicationSourceImplName = conf.get("replication.replicationsource.implementation");
    try {
        DummyServer server = new DummyServer();
        final ReplicationQueues rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, server.getZooKeeper()));
        rq.init(server.getServerName().toString());
        // Purposely fail ReplicationSourceManager.addSource() by causing ReplicationSourceInterface
        // initialization to throw an exception.
        conf.set("replication.replicationsource.implementation", FailInitializeDummyReplicationSource.class.getName());
        final ReplicationPeers rp = manager.getReplicationPeers();
        // Set up the znode and ReplicationPeer for the fake peer
        rp.registerPeer("FakePeer", new ReplicationPeerConfig().setClusterKey("localhost:1:/hbase"));
        // Wait for the peer to get created and connected
        Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return (rp.getConnectedPeer("FakePeer") != null);
            }
        });
        // Make sure that the replication source was not initialized
        List<ReplicationSourceInterface> sources = manager.getSources();
        for (ReplicationSourceInterface source : sources) {
            assertNotEquals("FakePeer", source.getPeerClusterId());
        }
        // Create a replication queue for the fake peer
        rq.addLog("FakePeer", "FakeFile");
        // Unregister peer, this should remove the peer and clear all queues associated with it
        // Need to wait for the ReplicationTracker to pick up the changes and notify listeners.
        rp.unregisterPeer("FakePeer");
        Waiter.waitFor(conf, 20000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                List<String> peers = rp.getAllPeerIds();
                return (!rq.getAllQueues().contains("FakePeer")) && (rp.getConnectedPeer("FakePeer") == null) && (!peers.contains("FakePeer"));
            }
        });
    } finally {
        conf.set("replication.replicationsource.implementation", replicationSourceImplName);
    }
}
Also used : IOException(java.io.IOException) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) List(java.util.List) ArrayList(java.util.ArrayList) Waiter(org.apache.hadoop.hbase.Waiter) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) Test(org.junit.Test)

Example 4 with ReplicationQueuesArguments

use of org.apache.hadoop.hbase.replication.ReplicationQueuesArguments in project hbase by apache.

the class TestReplicationAdmin method testAddPeerWithUnDeletedQueues.

@Test
public void testAddPeerWithUnDeletedQueues() throws Exception {
    ReplicationPeerConfig rpc1 = new ReplicationPeerConfig();
    rpc1.setClusterKey(KEY_ONE);
    ReplicationPeerConfig rpc2 = new ReplicationPeerConfig();
    rpc2.setClusterKey(KEY_SECOND);
    Configuration conf = TEST_UTIL.getConfiguration();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test HBaseAdmin", null);
    ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, null, zkw));
    repQueues.init("server1");
    // add queue for ID_ONE
    repQueues.addLog(ID_ONE, "file1");
    try {
        admin.addPeer(ID_ONE, rpc1, null);
        fail();
    } catch (Exception e) {
    // OK!
    }
    repQueues.removeQueue(ID_ONE);
    assertEquals(0, repQueues.getAllQueues().size());
    // add recovered queue for ID_ONE
    repQueues.addLog(ID_ONE + "-server2", "file1");
    try {
        admin.addPeer(ID_ONE, rpc2, null);
        fail();
    } catch (Exception e) {
    // OK!
    }
    repQueues.removeAllQueues();
    zkw.close();
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) Configuration(org.apache.hadoop.conf.Configuration) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ReplicationPeerNotFoundException(org.apache.hadoop.hbase.ReplicationPeerNotFoundException) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) IOException(java.io.IOException) RetriesExhaustedException(org.apache.hadoop.hbase.client.RetriesExhaustedException) Test(org.junit.Test)

Example 5 with ReplicationQueuesArguments

use of org.apache.hadoop.hbase.replication.ReplicationQueuesArguments in project hbase by apache.

the class TestReplicationSourceManager method testClaimQueues.

@Test
public void testClaimQueues() throws Exception {
    final Server server = new DummyServer("hostname0.example.org");
    ReplicationQueues rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, server.getZooKeeper()));
    rq.init(server.getServerName().toString());
    // populate some znodes in the peer znode
    files.add("log1");
    files.add("log2");
    for (String file : files) {
        rq.addLog("1", file);
    }
    // create 3 DummyServers
    Server s1 = new DummyServer("dummyserver1.example.org");
    Server s2 = new DummyServer("dummyserver2.example.org");
    Server s3 = new DummyServer("dummyserver3.example.org");
    // create 3 DummyNodeFailoverWorkers
    DummyNodeFailoverWorker w1 = new DummyNodeFailoverWorker(server.getServerName().getServerName(), s1);
    DummyNodeFailoverWorker w2 = new DummyNodeFailoverWorker(server.getServerName().getServerName(), s2);
    DummyNodeFailoverWorker w3 = new DummyNodeFailoverWorker(server.getServerName().getServerName(), s3);
    latch = new CountDownLatch(3);
    // start the threads
    w1.start();
    w2.start();
    w3.start();
    // make sure only one is successful
    int populatedMap = 0;
    // wait for result now... till all the workers are done.
    latch.await();
    populatedMap += w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated() + w3.isLogZnodesMapPopulated();
    assertEquals(1, populatedMap);
    server.abort("", null);
}
Also used : ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) ReplicationQueuesArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesArguments) Server(org.apache.hadoop.hbase.Server) CountDownLatch(java.util.concurrent.CountDownLatch) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) Test(org.junit.Test)

Aggregations

ReplicationQueuesArguments (org.apache.hadoop.hbase.replication.ReplicationQueuesArguments)10 ReplicationQueues (org.apache.hadoop.hbase.replication.ReplicationQueues)9 Test (org.junit.Test)9 Server (org.apache.hadoop.hbase.Server)6 IOException (java.io.IOException)4 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)3 UUID (java.util.UUID)2 Configuration (org.apache.hadoop.conf.Configuration)2 Waiter (org.apache.hadoop.hbase.Waiter)2 ReplicationException (org.apache.hadoop.hbase.replication.ReplicationException)2 ReplicationPeers (org.apache.hadoop.hbase.replication.ReplicationPeers)2 NodeFailoverWorker (org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker)2 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)2 KeeperException (org.apache.zookeeper.KeeperException)2 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 TreeSet (java.util.TreeSet)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 FileStatus (org.apache.hadoop.fs.FileStatus)1