Search in sources :

Example 16 with ReplicationQueueStorage

use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.

the class TestLogsCleaner method testZooKeeperRecoveryDuringGetListOfReplicators.

@Test
public void testZooKeeperRecoveryDuringGetListOfReplicators() throws Exception {
    ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
    List<FileStatus> dummyFiles = Arrays.asList(new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log1")), new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log2")));
    FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
    final AtomicBoolean getListOfReplicatorsFailed = new AtomicBoolean(false);
    try {
        faultyZK.init(false);
        ReplicationQueueStorage queueStorage = spy(ReplicationStorageFactory.getReplicationQueueStorage(faultyZK, conf));
        doAnswer(new Answer<Object>() {

            @Override
            public Object answer(InvocationOnMock invocation) throws Throwable {
                try {
                    return invocation.callRealMethod();
                } catch (ReplicationException e) {
                    LOG.debug("Caught Exception", e);
                    getListOfReplicatorsFailed.set(true);
                    throw e;
                }
            }
        }).when(queueStorage).getAllWALs();
        cleaner.setConf(conf, faultyZK, queueStorage);
        // should keep all files due to a ConnectionLossException getting the queues znodes
        cleaner.preClean();
        Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
        assertTrue(getListOfReplicatorsFailed.get());
        assertFalse(toDelete.iterator().hasNext());
        assertFalse(cleaner.isStopped());
        // zk recovery.
        faultyZK.init(true);
        cleaner.preClean();
        Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
        Iterator<FileStatus> iter = filesToDelete.iterator();
        assertTrue(iter.hasNext());
        assertEquals(new Path("log1"), iter.next().getPath());
        assertTrue(iter.hasNext());
        assertEquals(new Path("log2"), iter.next().getPath());
        assertFalse(iter.hasNext());
    } finally {
        faultyZK.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) ReplicationQueueStorage(org.apache.hadoop.hbase.replication.ReplicationQueueStorage) Test(org.junit.Test)

Example 17 with ReplicationQueueStorage

use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.

the class TestReplicationBarrierCleaner method testDeleteRowForDeletedRegion.

@Test
public void testDeleteRowForDeletedRegion() throws IOException, ReplicationException {
    TableName tableName = TableName.valueOf(name.getMethodName());
    RegionInfo region = RegionInfoBuilder.newBuilder(tableName).build();
    addBarrier(region, 40, 50, 60);
    fillCatalogFamily(region);
    String peerId = "1";
    ReplicationQueueStorage queueStorage = create(59L);
    @SuppressWarnings("unchecked") ReplicationPeerManager peerManager = create(queueStorage, Lists.newArrayList(peerId));
    ReplicationBarrierCleaner cleaner = create(peerManager);
    // we have something in catalog family, so only delete 40
    cleaner.chore();
    assertArrayEquals(new long[] { 50, 60 }, ReplicationBarrierFamilyFormat.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
    verify(queueStorage, never()).removeLastSequenceIds(anyString(), anyList());
    // No catalog family, then we should remove the whole row
    clearCatalogFamily(region);
    cleaner.chore();
    try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
        assertFalse(table.exists(new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY)));
    }
    verify(queueStorage, times(1)).removeLastSequenceIds(peerId, Arrays.asList(region.getEncodedName()));
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ReplicationPeerManager(org.apache.hadoop.hbase.master.replication.ReplicationPeerManager) Get(org.apache.hadoop.hbase.client.Get) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ReplicationQueueStorage(org.apache.hadoop.hbase.replication.ReplicationQueueStorage) Test(org.junit.Test)

Aggregations

ReplicationQueueStorage (org.apache.hadoop.hbase.replication.ReplicationQueueStorage)17 Test (org.junit.Test)11 ServerName (org.apache.hadoop.hbase.ServerName)6 Server (org.apache.hadoop.hbase.Server)5 TableName (org.apache.hadoop.hbase.TableName)5 ReplicationException (org.apache.hadoop.hbase.replication.ReplicationException)5 Path (org.apache.hadoop.fs.Path)4 IOException (java.io.IOException)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 Connection (org.apache.hadoop.hbase.client.Connection)3 Get (org.apache.hadoop.hbase.client.Get)3 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)3 Table (org.apache.hadoop.hbase.client.Table)3 MockServer (org.apache.hadoop.hbase.util.MockServer)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Cell (org.apache.hadoop.hbase.Cell)2 ZooKeeperConnectionException (org.apache.hadoop.hbase.ZooKeeperConnectionException)2 Delete (org.apache.hadoop.hbase.client.Delete)2