Search in sources :

Example 1 with ReplicationHFileCleaner

use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.

the class TestReplicationHFileCleaner method testZooKeeperAbort.

/**
   * ReplicationHFileCleaner should be able to ride over ZooKeeper errors without aborting.
   */
@Test
public void testZooKeeperAbort() throws Exception {
    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
    List<FileStatus> dummyFiles = Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("hfile1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("hfile2")));
    FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
    try {
        faultyZK.init();
        cleaner.setConf(conf, faultyZK);
        // should keep all files due to a ConnectionLossException getting the queues znodes
        Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
        assertFalse(toDelete.iterator().hasNext());
        assertFalse(cleaner.isStopped());
    } finally {
        faultyZK.close();
    }
    // when zk is working both files should be returned
    cleaner = new ReplicationHFileCleaner();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
    try {
        cleaner.setConf(conf, zkw);
        Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
        Iterator<FileStatus> iter = filesToDelete.iterator();
        assertTrue(iter.hasNext());
        assertEquals(new Path("hfile1"), iter.next().getPath());
        assertTrue(iter.hasNext());
        assertEquals(new Path("hfile2"), iter.next().getPath());
        assertFalse(iter.hasNext());
    } finally {
        zkw.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) FileStatus(org.apache.hadoop.fs.FileStatus) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Test(org.junit.Test)

Example 2 with ReplicationHFileCleaner

use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.

the class TestReplicationHFileCleaner method testForDifferntHFileRefsZnodeVersion.

/*
   * Test for HBASE-14621. This test will not assert directly anything. Without the fix the test
   * will end up in a infinite loop, so it will timeout.
   */
@Test(timeout = 15000)
public void testForDifferntHFileRefsZnodeVersion() throws Exception {
    // 1. Create a file
    Path file = new Path(root, "testForDifferntHFileRefsZnodeVersion");
    fs.createNewFile(file);
    // 2. Assert file is successfully created
    assertTrue("Test file not created!", fs.exists(file));
    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
    cleaner.setConf(conf);
    ReplicationQueuesClient replicationQueuesClient = Mockito.mock(ReplicationQueuesClient.class);
    //Return different znode version for each call
    Mockito.when(replicationQueuesClient.getHFileRefsNodeChangeVersion()).thenReturn(1, 2);
    Class<? extends ReplicationHFileCleaner> cleanerClass = cleaner.getClass();
    Field rqc = cleanerClass.getDeclaredField("rqc");
    rqc.setAccessible(true);
    rqc.set(cleaner, replicationQueuesClient);
    cleaner.isFileDeletable(fs.getFileStatus(file));
}
Also used : Path(org.apache.hadoop.fs.Path) Field(java.lang.reflect.Field) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) ReplicationQueuesClient(org.apache.hadoop.hbase.replication.ReplicationQueuesClient) Test(org.junit.Test)

Example 3 with ReplicationHFileCleaner

use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.

the class TestReplicationHFileCleaner method testIsFileDeletable.

@Test
public void testIsFileDeletable() throws IOException, ReplicationException {
    // 1. Create a file
    Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
    fs.createNewFile(file);
    // 2. Assert file is successfully created
    assertTrue("Test file not created!", fs.exists(file));
    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
    cleaner.setConf(conf);
    // 3. Assert that file as is should be deletable
    assertTrue("Cleaner should allow to delete this file as there is no hfile reference node " + "for it in the queue.", cleaner.isFileDeletable(fs.getFileStatus(file)));
    List<Pair<Path, Path>> files = new ArrayList<>(1);
    files.add(new Pair<>(null, file));
    // 4. Add the file to hfile-refs queue
    rq.addHFileRefs(peerId, files);
    // 5. Assert file should not be deletable
    assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node " + "for it in the queue.", cleaner.isFileDeletable(fs.getFileStatus(file)));
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) ArrayList(java.util.ArrayList) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 4 with ReplicationHFileCleaner

use of org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner in project hbase by apache.

the class TestReplicationHFileCleaner method testGetDeletableFiles.

@Test
public void testGetDeletableFiles() throws Exception {
    // 1. Create two files and assert that they do not exist
    Path notDeletablefile = new Path(root, "testGetDeletableFiles_1");
    fs.createNewFile(notDeletablefile);
    assertTrue("Test file not created!", fs.exists(notDeletablefile));
    Path deletablefile = new Path(root, "testGetDeletableFiles_2");
    fs.createNewFile(deletablefile);
    assertTrue("Test file not created!", fs.exists(deletablefile));
    List<FileStatus> files = new ArrayList<>(2);
    FileStatus f = new FileStatus();
    f.setPath(deletablefile);
    files.add(f);
    f = new FileStatus();
    f.setPath(notDeletablefile);
    files.add(f);
    List<Pair<Path, Path>> hfiles = new ArrayList<>(1);
    hfiles.add(new Pair<>(null, notDeletablefile));
    // 2. Add one file to hfile-refs queue
    rq.addHFileRefs(peerId, hfiles);
    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
    cleaner.setConf(conf);
    Iterator<FileStatus> deletableFilesIterator = cleaner.getDeletableFiles(files).iterator();
    int i = 0;
    while (deletableFilesIterator.hasNext() && i < 2) {
        i++;
    }
    // 5. Assert one file should not be deletable and it is present in the list returned
    if (i > 2) {
        fail("File " + notDeletablefile + " should not be deletable as its hfile reference node is not added.");
    }
    assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile));
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) ArrayList(java.util.ArrayList) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Aggregations

Path (org.apache.hadoop.fs.Path)4 ReplicationHFileCleaner (org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 Pair (org.apache.hadoop.hbase.util.Pair)2 Field (java.lang.reflect.Field)1 ReplicationQueuesClient (org.apache.hadoop.hbase.replication.ReplicationQueuesClient)1 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)1