Search in sources :

Example 6 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testMultipleTables.

/**
 * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
 * @throws Exception on failure
 */
@Test
public void testMultipleTables() throws Exception {
    createArchiveDirectory();
    String otherTable = "otherTable";
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    Path otherTableDir = getTableDir(otherTable);
    // register cleanup for the created directories
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<HRegion> regions = new ArrayList<>();
    regions.add(region);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // create the another table that we don't archive
    hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    regions = new ArrayList<>();
    regions.add(otherRegion);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(otherRegion, TEST_FAM);
    compactionCleaner1.chore();
    // get the current hfiles in the archive directory
    // Should  be archived
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        CommonFSUtils.logFileSystemState(fs, archiveDir, LOG);
        throw new RuntimeException("Didn't load archive any files!");
    }
    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
        String tableName = file.getParent().getParent().getParent().getName();
        // check to which table this file belongs
        if (tableName.equals(otherTable)) {
            initialCountForOtherTable++;
        } else if (tableName.equals(STRING_TABLE_NAME)) {
            initialCountForPrimary++;
        }
    }
    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    choreService.scheduleChore(cleaner);
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    int archivedForPrimary = 0;
    for (Path file : archivedFiles) {
        String tableName = file.getParent().getParent().getParent().getName();
        // ensure we don't have files from the non-archived table
        assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
        if (tableName.equals(STRING_TABLE_NAME)) {
            archivedForPrimary++;
        }
    }
    assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
    // but we still have the archive directory
    assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ChoreService(org.apache.hadoop.hbase.ChoreService) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 7 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestMasterChoreScheduled method testDefaultScheduledChores.

@Test
public void testDefaultScheduledChores() {
    // test if logCleaner chore is scheduled by default in HMaster init
    TestChoreField<LogCleaner> logCleanerTestChoreField = new TestChoreField<>();
    LogCleaner logCleaner = logCleanerTestChoreField.getChoreObj("logCleaner");
    logCleanerTestChoreField.testIfChoreScheduled(logCleaner);
    // test if hfileCleaner chore is scheduled by default in HMaster init
    TestChoreField<HFileCleaner> hFileCleanerTestChoreField = new TestChoreField<>();
    HFileCleaner hFileCleaner = hFileCleanerTestChoreField.getChoreObj("hfileCleaner");
    hFileCleanerTestChoreField.testIfChoreScheduled(hFileCleaner);
    // test if replicationBarrierCleaner chore is scheduled by default in HMaster init
    TestChoreField<ReplicationBarrierCleaner> replicationBarrierCleanerTestChoreField = new TestChoreField<>();
    ReplicationBarrierCleaner replicationBarrierCleaner = replicationBarrierCleanerTestChoreField.getChoreObj("replicationBarrierCleaner");
    replicationBarrierCleanerTestChoreField.testIfChoreScheduled(replicationBarrierCleaner);
    // test if clusterStatusChore chore is scheduled by default in HMaster init
    TestChoreField<ClusterStatusChore> clusterStatusChoreTestChoreField = new TestChoreField<>();
    ClusterStatusChore clusterStatusChore = clusterStatusChoreTestChoreField.getChoreObj("clusterStatusChore");
    clusterStatusChoreTestChoreField.testIfChoreScheduled(clusterStatusChore);
    // test if balancerChore chore is scheduled by default in HMaster init
    TestChoreField<BalancerChore> balancerChoreTestChoreField = new TestChoreField<>();
    BalancerChore balancerChore = balancerChoreTestChoreField.getChoreObj("balancerChore");
    balancerChoreTestChoreField.testIfChoreScheduled(balancerChore);
    // test if normalizerChore chore is scheduled by default in HMaster init
    ScheduledChore regionNormalizerChore = hMaster.getRegionNormalizerManager().getRegionNormalizerChore();
    TestChoreField<ScheduledChore> regionNormalizerChoreTestChoreField = new TestChoreField<>();
    regionNormalizerChoreTestChoreField.testIfChoreScheduled(regionNormalizerChore);
    // test if catalogJanitorChore chore is scheduled by default in HMaster init
    TestChoreField<CatalogJanitor> catalogJanitorTestChoreField = new TestChoreField<>();
    CatalogJanitor catalogJanitor = catalogJanitorTestChoreField.getChoreObj("catalogJanitorChore");
    catalogJanitorTestChoreField.testIfChoreScheduled(catalogJanitor);
    // test if hbckChore chore is scheduled by default in HMaster init
    TestChoreField<HbckChore> hbckChoreTestChoreField = new TestChoreField<>();
    HbckChore hbckChore = hbckChoreTestChoreField.getChoreObj("hbckChore");
    hbckChoreTestChoreField.testIfChoreScheduled(hbckChore);
}
Also used : BalancerChore(org.apache.hadoop.hbase.master.balancer.BalancerChore) CatalogJanitor(org.apache.hadoop.hbase.master.janitor.CatalogJanitor) ClusterStatusChore(org.apache.hadoop.hbase.master.balancer.ClusterStatusChore) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) LogCleaner(org.apache.hadoop.hbase.master.cleaner.LogCleaner) ReplicationBarrierCleaner(org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner) ScheduledChore(org.apache.hadoop.hbase.ScheduledChore) Test(org.junit.Test)

Example 8 with HFileCleaner

use of org.apache.hadoop.hbase.master.cleaner.HFileCleaner in project hbase by apache.

the class TestSnapshotScannerHDFSAclController method testCleanArchiveTableDir.

@Test
public void testCleanArchiveTableDir() throws Exception {
    final String grantUserName = name.getMethodName();
    User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
    String namespace = name.getMethodName();
    TableName table = TableName.valueOf(namespace, name.getMethodName());
    String snapshot = namespace + "t1";
    TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
    snapshotAndWait(snapshot, table);
    TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
    TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
    // HFileCleaner will not delete archive table directory even if it's a empty directory
    HFileCleaner cleaner = TEST_UTIL.getHBaseCluster().getMaster().getHFileCleaner();
    cleaner.choreForTesting();
    Path archiveTableDir = HFileArchiveUtil.getTableArchivePath(rootDir, table);
    assertTrue(FS.exists(archiveTableDir));
    checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, true, true);
    // Check SnapshotScannerHDFSAclCleaner method
    assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveTableDir(archiveTableDir));
    assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveNamespaceDir(archiveTableDir.getParent()));
    assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveDataDir(archiveTableDir.getParent().getParent()));
    assertFalse(SnapshotScannerHDFSAclCleaner.isArchiveDataDir(archiveTableDir.getParent().getParent().getParent()));
    deleteTable(table);
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) User(org.apache.hadoop.hbase.security.User) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) Test(org.junit.Test)

Aggregations

HFileCleaner (org.apache.hadoop.hbase.master.cleaner.HFileCleaner)8 Path (org.apache.hadoop.fs.Path)6 Test (org.junit.Test)5 Configuration (org.apache.hadoop.conf.Configuration)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Stoppable (org.apache.hadoop.hbase.Stoppable)4 StoppableImplementation (org.apache.hadoop.hbase.util.StoppableImplementation)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ChoreService (org.apache.hadoop.hbase.ChoreService)2 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)2 BaseHFileCleanerDelegate (org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate)2 LogCleaner (org.apache.hadoop.hbase.master.cleaner.LogCleaner)2 ReplicationBarrierCleaner (org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner)2 CompactedHFilesDischarger (org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger)2 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)2 IOException (java.io.IOException)1 ScheduledChore (org.apache.hadoop.hbase.ScheduledChore)1 TableName (org.apache.hadoop.hbase.TableName)1