Search in sources :

Example 1 with BaseHFileCleanerDelegate

use of org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.

@Test
public void testArchivingOnSingleTable() throws Exception {
    createArchiveDirectory();
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<HRegion> regions = new ArrayList<>();
    regions.add(region);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // get the current hfiles in the archive directory
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        CommonFSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
        throw new RuntimeException("Didn't archive any files!");
    }
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
    runCleaner(cleaner, finished, stop);
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
    // but we still have the archive directory
    assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 2 with BaseHFileCleanerDelegate

use of org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate in project hbase by apache.

the class TestZooKeeperTableArchiveClient method setupCleanerWatching.

/**
 * Spy on the {@link LongTermArchivingHFileCleaner} to ensure we can catch when the cleaner has
 * seen all the files
 * @return a {@link CountDownLatch} to wait on that releases when the cleaner has been called at
 *         least the expected number of times.
 */
private CountDownLatch setupCleanerWatching(LongTermArchivingHFileCleaner cleaner, List<BaseHFileCleanerDelegate> cleaners, final int expected) {
    // replace the cleaner with one that we can can check
    BaseHFileCleanerDelegate delegateSpy = Mockito.spy(cleaner);
    final int[] counter = new int[] { 0 };
    final CountDownLatch finished = new CountDownLatch(1);
    Mockito.doAnswer(new Answer<Iterable<FileStatus>>() {

        @Override
        public Iterable<FileStatus> answer(InvocationOnMock invocation) throws Throwable {
            counter[0]++;
            LOG.debug(counter[0] + "/ " + expected + ") Wrapping call to getDeletableFiles for files: " + invocation.getArgument(0));
            @SuppressWarnings("unchecked") Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
            if (counter[0] >= expected) {
                finished.countDown();
            }
            return ret;
        }
    }).when(delegateSpy).getDeletableFiles(Mockito.anyList());
    cleaners.set(0, delegateSpy);
    return finished;
}
Also used : BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) FileStatus(org.apache.hadoop.fs.FileStatus) InvocationOnMock(org.mockito.invocation.InvocationOnMock) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 3 with BaseHFileCleanerDelegate

use of org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testMultipleTables.

/**
 * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
 * @throws Exception on failure
 */
@Test
public void testMultipleTables() throws Exception {
    createArchiveDirectory();
    String otherTable = "otherTable";
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    Path otherTableDir = getTableDir(otherTable);
    // register cleanup for the created directories
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<HRegion> regions = new ArrayList<>();
    regions.add(region);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // create the another table that we don't archive
    hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    regions = new ArrayList<>();
    regions.add(otherRegion);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(otherRegion, TEST_FAM);
    compactionCleaner1.chore();
    // get the current hfiles in the archive directory
    // Should  be archived
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        CommonFSUtils.logFileSystemState(fs, archiveDir, LOG);
        throw new RuntimeException("Didn't load archive any files!");
    }
    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
        String tableName = file.getParent().getParent().getParent().getName();
        // check to which table this file belongs
        if (tableName.equals(otherTable)) {
            initialCountForOtherTable++;
        } else if (tableName.equals(STRING_TABLE_NAME)) {
            initialCountForPrimary++;
        }
    }
    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    choreService.scheduleChore(cleaner);
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    int archivedForPrimary = 0;
    for (Path file : archivedFiles) {
        String tableName = file.getParent().getParent().getParent().getName();
        // ensure we don't have files from the non-archived table
        assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
        if (tableName.equals(STRING_TABLE_NAME)) {
            archivedForPrimary++;
        }
    }
    assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
    // but we still have the archive directory
    assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ChoreService(org.apache.hadoop.hbase.ChoreService) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)3 BaseHFileCleanerDelegate (org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate)3 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 Stoppable (org.apache.hadoop.hbase.Stoppable)2 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)2 HFileCleaner (org.apache.hadoop.hbase.master.cleaner.HFileCleaner)2 CompactedHFilesDischarger (org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger)2 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)2 StoppableImplementation (org.apache.hadoop.hbase.util.StoppableImplementation)2 Test (org.junit.Test)2 FileStatus (org.apache.hadoop.fs.FileStatus)1 ChoreService (org.apache.hadoop.hbase.ChoreService)1 InvocationOnMock (org.mockito.invocation.InvocationOnMock)1