Search in sources :

Example 26 with Stoppable

use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.

the class TestParalleIndexWriter method testCorrectlyCleansUpResources.

@Test
public void testCorrectlyCleansUpResources() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(1);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    FakeTableFactory factory = new FakeTableFactory(Collections.<ImmutableBytesPtr, HTableInterface>emptyMap());
    TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
    Abortable mockAbort = Mockito.mock(Abortable.class);
    Stoppable mockStop = Mockito.mock(Stoppable.class);
    // create a simple writer
    writer.setup(factory, exec, mockAbort, mockStop, e);
    // stop the writer
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
    Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Configuration(org.apache.hadoop.conf.Configuration) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) ExecutorService(java.util.concurrent.ExecutorService) Stoppable(org.apache.hadoop.hbase.Stoppable) Test(org.junit.Test)

Example 27 with Stoppable

use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.

the class TestParalleWriterIndexCommitter method testSynchronouslyCompletesAllWrites.

@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
    LOG.info("Starting " + test.getTableNameString());
    LOG.info("Current thread is interrupted: " + Thread.interrupted());
    Abortable abort = new StubAbortable();
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    Region mockRegion = Mockito.mock(Region.class);
    Mockito.when(e.getRegion()).thenReturn(mockRegion);
    HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
    Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
    Stoppable stop = Mockito.mock(Stoppable.class);
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new LinkedHashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    indexUpdates.put(new HTableInterfaceReference(tableName), m);
    HTableInterface table = Mockito.mock(HTableInterface.class);
    final boolean[] completed = new boolean[] { false };
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // just keep track that it was called
            completed[0] = true;
            return null;
        }
    });
    Mockito.when(table.getTableName()).thenReturn(test.getTableName());
    // add the table to the set of tables, so its returned to the writer
    tables.put(tableName, table);
    // setup the writer and failure policy
    TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
    writer.setup(factory, exec, abort, stop, e);
    writer.write(indexUpdates, true);
    assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) LinkedHashMap(java.util.LinkedHashMap) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ExecutorService(java.util.concurrent.ExecutorService) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 28 with Stoppable

use of org.apache.hadoop.hbase.Stoppable in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testMultipleTables.

/**
 * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
 * @throws Exception on failure
 */
@Test
public void testMultipleTables() throws Exception {
    createArchiveDirectory();
    String otherTable = "otherTable";
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    Path otherTableDir = getTableDir(otherTable);
    // register cleanup for the created directories
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<HRegion> regions = new ArrayList<>();
    regions.add(region);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // create the another table that we don't archive
    hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    regions = new ArrayList<>();
    regions.add(otherRegion);
    Mockito.doReturn(regions).when(rss).getRegions();
    final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(otherRegion, TEST_FAM);
    compactionCleaner1.chore();
    // get the current hfiles in the archive directory
    // Should  be archived
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        CommonFSUtils.logFileSystemState(fs, archiveDir, LOG);
        throw new RuntimeException("Didn't load archive any files!");
    }
    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
        String tableName = file.getParent().getParent().getParent().getName();
        // check to which table this file belongs
        if (tableName.equals(otherTable)) {
            initialCountForOtherTable++;
        } else if (tableName.equals(STRING_TABLE_NAME)) {
            initialCountForPrimary++;
        }
    }
    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    choreService.scheduleChore(cleaner);
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    int archivedForPrimary = 0;
    for (Path file : archivedFiles) {
        String tableName = file.getParent().getParent().getParent().getName();
        // ensure we don't have files from the non-archived table
        assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
        if (tableName.equals(STRING_TABLE_NAME)) {
            archivedForPrimary++;
        }
    }
    assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
    // but we still have the archive directory
    assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ChoreService(org.apache.hadoop.hbase.ChoreService) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 29 with Stoppable

use of org.apache.hadoop.hbase.Stoppable in project hbase by apache.

the class TestRegionsRecoveryChore method testRegionReopensWithStoreRefConfig.

@Test
public void testRegionReopensWithStoreRefConfig() throws Exception {
    regionNo = 0;
    ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4);
    final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
    LOG.debug("All Region Names with refCount....");
    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
        Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
        for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
            LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
        }
    }
    Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
    Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
    for (byte[] regionName : REGION_NAME_LIST) {
        Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
    }
    Stoppable stoppable = new StoppableImplementation();
    Configuration configuration = getCustomConf();
    configuration.setInt("hbase.regions.recovery.store.file.ref.count", 300);
    regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
    regionsRecoveryChore.chore();
    // Verify that we need to reopen regions of 2 tables
    Mockito.verify(hMaster, Mockito.times(2)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
    Mockito.verify(hMaster, Mockito.times(1)).getClusterMetrics();
    // Verify that we need to reopen total 3 regions that have refCount > 300
    Mockito.verify(hMaster, Mockito.times(3)).getAssignmentManager();
    Mockito.verify(assignmentManager, Mockito.times(3)).getRegionInfo(Mockito.any());
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Configuration(org.apache.hadoop.conf.Configuration) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Stoppable(org.apache.hadoop.hbase.Stoppable) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) Test(org.junit.Test)

Example 30 with Stoppable

use of org.apache.hadoop.hbase.Stoppable in project hbase by apache.

the class TestCleanerChore method testNoExceptionFromDirectoryWithRacyChildren.

/**
 * The cleaner runs in a loop, where it first checks to see all the files under a directory can be
 * deleted. If they all can, then we try to delete the directory. However, a file may be added
 * that directory to after the original check. This ensures that we don't accidentally delete that
 * directory on and don't get spurious IOExceptions.
 * <p>
 * This was from HBASE-7465.
 * @throws Exception on failure
 */
@Test
public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
    UTIL.cleanupTestDir();
    Stoppable stop = new StoppableImplementation();
    // need to use a localutil to not break the rest of the test that runs on the local FS, which
    // gets hosed when we start to use a minicluster.
    HBaseTestingUtil localUtil = new HBaseTestingUtil();
    Configuration conf = localUtil.getConfiguration();
    final Path testDir = UTIL.getDataTestDir();
    final FileSystem fs = UTIL.getTestFileSystem();
    LOG.debug("Writing test data to: " + testDir);
    String confKey = "hbase.test.cleaner.delegates";
    conf.set(confKey, AlwaysDelete.class.getName());
    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
    // spy on the delegate to ensure that we don't check for directories
    AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
    AlwaysDelete spy = Mockito.spy(delegate);
    chore.cleanersChain.set(0, spy);
    // create the directory layout in the directory to clean
    final Path parent = new Path(testDir, "parent");
    Path file = new Path(parent, "someFile");
    fs.mkdirs(parent);
    // touch a new file
    fs.create(file).close();
    assertTrue("Test file didn't get created.", fs.exists(file));
    final Path racyFile = new Path(parent, "addedFile");
    // when we attempt to delete the original file, add another file in the same directory
    Mockito.doAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            fs.create(racyFile).close();
            CommonFSUtils.logFileSystemState(fs, testDir, LOG);
            return (Boolean) invocation.callRealMethod();
        }
    }).when(spy).isFileDeletable(Mockito.any());
    // run the chore
    chore.chore();
    // make sure all the directories + added file exist, but the original file is deleted
    assertTrue("Added file unexpectedly deleted", fs.exists(racyFile));
    assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
    assertFalse("Original file unexpectedly retained", fs.exists(file));
    Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any());
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) Stoppable(org.apache.hadoop.hbase.Stoppable) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Aggregations

Stoppable (org.apache.hadoop.hbase.Stoppable)34 Configuration (org.apache.hadoop.conf.Configuration)31 Test (org.junit.Test)31 FileSystem (org.apache.hadoop.fs.FileSystem)16 Path (org.apache.hadoop.fs.Path)16 StoppableImplementation (org.apache.hadoop.hbase.util.StoppableImplementation)15 FilterFileSystem (org.apache.hadoop.fs.FilterFileSystem)11 InvocationOnMock (org.mockito.invocation.InvocationOnMock)8 ArrayList (java.util.ArrayList)7 ExecutorService (java.util.concurrent.ExecutorService)7 Abortable (org.apache.hadoop.hbase.Abortable)7 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)7 StubAbortable (org.apache.phoenix.hbase.index.StubAbortable)7 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)5 Mutation (org.apache.hadoop.hbase.client.Mutation)5 Put (org.apache.hadoop.hbase.client.Put)5 SnapshotManager (org.apache.hadoop.hbase.master.snapshot.SnapshotManager)5 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)5 CountDownLatch (java.util.concurrent.CountDownLatch)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4