Search in sources :

Example 1 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestMasterRegionCompaction method postSetUp.

@Override
protected void postSetUp() throws IOException {
    Configuration conf = htu.getConfiguration();
    conf.setLong(TimeToLiveMasterLocalStoreHFileCleaner.TTL_CONF_KEY, 5000);
    Path testDir = htu.getDataTestDir();
    FileSystem fs = testDir.getFileSystem(conf);
    Path globalArchivePath = HFileArchiveUtil.getArchivePath(conf);
    hfileCleaner = new HFileCleaner(500, new Stoppable() {

        private volatile boolean stopped = false;

        @Override
        public void stop(String why) {
            stopped = true;
        }

        @Override
        public boolean isStopped() {
            return stopped;
        }
    }, conf, fs, globalArchivePath, hfileCleanerPool);
    choreService.scheduleChore(hfileCleaner);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) TimeToLiveMasterLocalStoreHFileCleaner(org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner)

Example 2 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.

@Test
public void testArchivingOnSingleTable() throws Exception {
    createArchiveDirectory();
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<HRegion> regions = new ArrayList<>();
    regions.add(region);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // get the current hfiles in the archive directory
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        CommonFSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
        throw new RuntimeException("Didn't archive any files!");
    }
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
    runCleaner(cleaner, finished, stop);
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
    // but we still have the archive directory
    assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 3 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestHFileArchiving method getHFileCleaner.

// Avoid passing a null master to CleanerChore, see HBASE-21175
private HFileCleaner getHFileCleaner(Stoppable stoppable, Configuration conf, FileSystem fs, Path archiveDir) throws IOException {
    Map<String, Object> params = new HashMap<>();
    params.put(HMaster.MASTER, UTIL.getMiniHBaseCluster().getMaster());
    HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir, POOL);
    return Objects.requireNonNull(cleaner);
}
Also used : HashMap(java.util.HashMap) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner)

Example 4 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestHFileArchiving method testCleaningRace.

/**
 * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
 */
@Test
public void testCleaningRace() throws Exception {
    final long TEST_TIME = 20 * 1000;
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
    Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
    Path regionDir = new Path(CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(name.getMethodName())), "abcdef");
    Path familyDir = new Path(regionDir, "cf");
    Path sourceRegionDir = new Path(rootDir, regionDir);
    fs.mkdirs(sourceRegionDir);
    Stoppable stoppable = new StoppableImplementation();
    // The cleaner should be looping without long pauses to reproduce the race condition.
    HFileCleaner cleaner = getHFileCleaner(stoppable, conf, fs, archiveDir);
    assertNotNull("cleaner should not be null", cleaner);
    try {
        choreService.scheduleChore(cleaner);
        // Keep creating/archiving new files while the cleaner is running in the other thread
        long startTime = EnvironmentEdgeManager.currentTime();
        for (long fid = 0; (EnvironmentEdgeManager.currentTime() - startTime) < TEST_TIME; ++fid) {
            Path file = new Path(familyDir, String.valueOf(fid));
            Path sourceFile = new Path(rootDir, file);
            Path archiveFile = new Path(archiveDir, file);
            fs.createNewFile(sourceFile);
            try {
                // Try to archive the file
                HFileArchiver.archiveRegion(fs, rootDir, sourceRegionDir.getParent(), sourceRegionDir);
                // The archiver succeded, the file is no longer in the original location
                // but it's in the archive location.
                LOG.debug("hfile=" + fid + " should be in the archive");
                assertTrue(fs.exists(archiveFile));
                assertFalse(fs.exists(sourceFile));
            } catch (IOException e) {
                // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
                // in this case, the file should not be archived, and we should have the file
                // in the original location.
                LOG.debug("hfile=" + fid + " should be in the source location");
                assertFalse(fs.exists(archiveFile));
                assertTrue(fs.exists(sourceFile));
                // Avoid to have this file in the next run
                fs.delete(sourceFile, false);
            }
        }
    } finally {
        stoppable.stop("test end");
        cleaner.cancel(true);
        choreService.shutdown();
        fs.delete(rootDir, true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ChoreService(org.apache.hadoop.hbase.ChoreService) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) FileSystem(org.apache.hadoop.fs.FileSystem) Stoppable(org.apache.hadoop.hbase.Stoppable) IOException(java.io.IOException) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) Test(org.junit.Test)

Example 5 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testMultipleTables.

/**
 * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
 * @throws Exception on failure
 */
@Test
public void testMultipleTables() throws Exception {
    createArchiveDirectory();
    String otherTable = "otherTable";
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    Path otherTableDir = getTableDir(otherTable);
    // register cleanup for the created directories
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<HRegion> regions = new ArrayList<>();
    regions.add(region);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // create the another table that we don't archive
    hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    regions = new ArrayList<>();
    regions.add(otherRegion);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(otherRegion, TEST_FAM);
    compactionCleaner1.chore();
    // get the current hfiles in the archive directory
    // Should  be archived
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        CommonFSUtils.logFileSystemState(fs, archiveDir, LOG);
        throw new RuntimeException("Didn't load archive any files!");
    }
    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
        String tableName = file.getParent().getParent().getParent().getName();
        // check to which table this file belongs
        if (tableName.equals(otherTable)) {
            initialCountForOtherTable++;
        } else if (tableName.equals(STRING_TABLE_NAME)) {
            initialCountForPrimary++;
        }
    }
    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    choreService.scheduleChore(cleaner);
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    int archivedForPrimary = 0;
    for (Path file : archivedFiles) {
        String tableName = file.getParent().getParent().getParent().getName();
        // ensure we don't have files from the non-archived table
        assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
        if (tableName.equals(STRING_TABLE_NAME)) {
            archivedForPrimary++;
        }
    }
    assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
    // but we still have the archive directory
    assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ChoreService(org.apache.hadoop.hbase.ChoreService) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

HFileCleaner (org.apache.hadoop.hbase.master.cleaner.HFileCleaner)8 Path (org.apache.hadoop.fs.Path)6 Test (org.junit.Test)5 Configuration (org.apache.hadoop.conf.Configuration)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Stoppable (org.apache.hadoop.hbase.Stoppable)4 StoppableImplementation (org.apache.hadoop.hbase.util.StoppableImplementation)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ChoreService (org.apache.hadoop.hbase.ChoreService)2 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)2 BaseHFileCleanerDelegate (org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate)2 LogCleaner (org.apache.hadoop.hbase.master.cleaner.LogCleaner)2 ReplicationBarrierCleaner (org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner)2 CompactedHFilesDischarger (org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger)2 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)2 IOException (java.io.IOException)1 ScheduledChore (org.apache.hadoop.hbase.ScheduledChore)1 TableName (org.apache.hadoop.hbase.TableName)1