use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestLogsCleaner method testZooKeeperRecoveryDuringGetListOfReplicators.
@Test
public void testZooKeeperRecoveryDuringGetListOfReplicators() throws Exception {
ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
List<FileStatus> dummyFiles = Arrays.asList(new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log1")), new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log2")));
FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
final AtomicBoolean getListOfReplicatorsFailed = new AtomicBoolean(false);
try {
faultyZK.init(false);
ReplicationQueueStorage queueStorage = spy(ReplicationStorageFactory.getReplicationQueueStorage(faultyZK, conf));
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
try {
return invocation.callRealMethod();
} catch (ReplicationException e) {
LOG.debug("Caught Exception", e);
getListOfReplicatorsFailed.set(true);
throw e;
}
}
}).when(queueStorage).getAllWALs();
cleaner.setConf(conf, faultyZK, queueStorage);
// should keep all files due to a ConnectionLossException getting the queues znodes
cleaner.preClean();
Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
assertTrue(getListOfReplicatorsFailed.get());
assertFalse(toDelete.iterator().hasNext());
assertFalse(cleaner.isStopped());
// zk recovery.
faultyZK.init(true);
cleaner.preClean();
Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
Iterator<FileStatus> iter = filesToDelete.iterator();
assertTrue(iter.hasNext());
assertEquals(new Path("log1"), iter.next().getPath());
assertTrue(iter.hasNext());
assertEquals(new Path("log2"), iter.next().getPath());
assertFalse(iter.hasNext());
} finally {
faultyZK.close();
}
}
use of org.apache.hadoop.hbase.replication.ReplicationQueueStorage in project hbase by apache.
the class TestReplicationBarrierCleaner method testDeleteRowForDeletedRegion.
@Test
public void testDeleteRowForDeletedRegion() throws IOException, ReplicationException {
TableName tableName = TableName.valueOf(name.getMethodName());
RegionInfo region = RegionInfoBuilder.newBuilder(tableName).build();
addBarrier(region, 40, 50, 60);
fillCatalogFamily(region);
String peerId = "1";
ReplicationQueueStorage queueStorage = create(59L);
@SuppressWarnings("unchecked") ReplicationPeerManager peerManager = create(queueStorage, Lists.newArrayList(peerId));
ReplicationBarrierCleaner cleaner = create(peerManager);
// we have something in catalog family, so only delete 40
cleaner.chore();
assertArrayEquals(new long[] { 50, 60 }, ReplicationBarrierFamilyFormat.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
verify(queueStorage, never()).removeLastSequenceIds(anyString(), anyList());
// No catalog family, then we should remove the whole row
clearCatalogFamily(region);
cleaner.chore();
try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
assertFalse(table.exists(new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY)));
}
verify(queueStorage, times(1)).removeLastSequenceIds(peerId, Arrays.asList(region.getEncodedName()));
}
Aggregations