use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.
the class TestReplicationHFileCleaner method testZooKeeperAbort.
/**
* ReplicationHFileCleaner should be able to ride over ZooKeeper errors without aborting.
*/
@Test
public void testZooKeeperAbort() throws Exception {
ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
List<FileStatus> dummyFiles = Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("hfile1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("hfile2")));
FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
try {
faultyZK.init();
cleaner.setConf(conf, faultyZK);
// should keep all files due to a ConnectionLossException getting the queues znodes
Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
assertFalse(toDelete.iterator().hasNext());
assertFalse(cleaner.isStopped());
} finally {
faultyZK.close();
}
// when zk is working both files should be returned
cleaner = new ReplicationHFileCleaner();
ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
try {
cleaner.setConf(conf, zkw);
Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
Iterator<FileStatus> iter = filesToDelete.iterator();
assertTrue(iter.hasNext());
assertEquals(new Path("hfile1"), iter.next().getPath());
assertTrue(iter.hasNext());
assertEquals(new Path("hfile2"), iter.next().getPath());
assertFalse(iter.hasNext());
} finally {
zkw.close();
}
}
use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.
the class TestReplicationHFileCleaner method testForDifferntHFileRefsZnodeVersion.
/*
* Test for HBASE-14621. This test will not assert directly anything. Without the fix the test
* will end up in a infinite loop, so it will timeout.
*/
@Test(timeout = 15000)
public void testForDifferntHFileRefsZnodeVersion() throws Exception {
// 1. Create a file
Path file = new Path(root, "testForDifferntHFileRefsZnodeVersion");
fs.createNewFile(file);
// 2. Assert file is successfully created
assertTrue("Test file not created!", fs.exists(file));
ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
cleaner.setConf(conf);
ReplicationQueuesClient replicationQueuesClient = Mockito.mock(ReplicationQueuesClient.class);
//Return different znode version for each call
Mockito.when(replicationQueuesClient.getHFileRefsNodeChangeVersion()).thenReturn(1, 2);
Class<? extends ReplicationHFileCleaner> cleanerClass = cleaner.getClass();
Field rqc = cleanerClass.getDeclaredField("rqc");
rqc.setAccessible(true);
rqc.set(cleaner, replicationQueuesClient);
cleaner.isFileDeletable(fs.getFileStatus(file));
}
use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.
the class TestReplicationHFileCleaner method testIsFileDeletable.
@Test
public void testIsFileDeletable() throws IOException, ReplicationException {
// 1. Create a file
Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
fs.createNewFile(file);
// 2. Assert file is successfully created
assertTrue("Test file not created!", fs.exists(file));
ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
cleaner.setConf(conf);
// 3. Assert that file as is should be deletable
assertTrue("Cleaner should allow to delete this file as there is no hfile reference node " + "for it in the queue.", cleaner.isFileDeletable(fs.getFileStatus(file)));
List<Pair<Path, Path>> files = new ArrayList<>(1);
files.add(new Pair<>(null, file));
// 4. Add the file to hfile-refs queue
rq.addHFileRefs(peerId, files);
// 5. Assert file should not be deletable
assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node " + "for it in the queue.", cleaner.isFileDeletable(fs.getFileStatus(file)));
}
use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.
the class TestReplicationHFileCleaner method testGetDeletableFiles.
@Test
public void testGetDeletableFiles() throws Exception {
// 1. Create two files and assert that they do not exist
Path notDeletablefile = new Path(root, "testGetDeletableFiles_1");
fs.createNewFile(notDeletablefile);
assertTrue("Test file not created!", fs.exists(notDeletablefile));
Path deletablefile = new Path(root, "testGetDeletableFiles_2");
fs.createNewFile(deletablefile);
assertTrue("Test file not created!", fs.exists(deletablefile));
List<FileStatus> files = new ArrayList<>(2);
FileStatus f = new FileStatus();
f.setPath(deletablefile);
files.add(f);
f = new FileStatus();
f.setPath(notDeletablefile);
files.add(f);
List<Pair<Path, Path>> hfiles = new ArrayList<>(1);
hfiles.add(new Pair<>(null, notDeletablefile));
// 2. Add one file to hfile-refs queue
rq.addHFileRefs(peerId, hfiles);
ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
cleaner.setConf(conf);
Iterator<FileStatus> deletableFilesIterator = cleaner.getDeletableFiles(files).iterator();
int i = 0;
while (deletableFilesIterator.hasNext() && i < 2) {
i++;
}
// 5. Assert one file should not be deletable and it is present in the list returned
if (i > 2) {
fail("File " + notDeletablefile + " should not be deletable as its hfile reference node is not added.");
}
assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile));
}
Aggregations