use of org.apache.hadoop.hbase.Stoppable in project hbase by apache.
the class TestCleanerChore method testNoExceptionFromDirectoryWithRacyChildren.
/**
* The cleaner runs in a loop, where it first checks to see all the files under a directory can be
* deleted. If they all can, then we try to delete the directory. However, a file may be added
* that directory to after the original check. This ensures that we don't accidentally delete that
* directory on and don't get spurious IOExceptions.
* <p>
* This was from HBASE-7465.
* @throws Exception on failure
*/
@Test
public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
Stoppable stop = new StoppableImplementation();
// need to use a localutil to not break the rest of the test that runs on the local FS, which
// gets hosed when we start to use a minicluster.
HBaseTestingUtility localUtil = new HBaseTestingUtility();
Configuration conf = localUtil.getConfiguration();
final Path testDir = UTIL.getDataTestDir();
final FileSystem fs = UTIL.getTestFileSystem();
LOG.debug("Writing test data to: " + testDir);
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// spy on the delegate to ensure that we don't check for directories
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
AlwaysDelete spy = Mockito.spy(delegate);
chore.cleanersChain.set(0, spy);
// create the directory layout in the directory to clean
final Path parent = new Path(testDir, "parent");
Path file = new Path(parent, "someFile");
fs.mkdirs(parent);
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
final Path racyFile = new Path(parent, "addedFile");
// when we attempt to delete the original file, add another file in the same directory
Mockito.doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
fs.create(racyFile).close();
FSUtils.logFileSystemState(fs, testDir, LOG);
return (Boolean) invocation.callRealMethod();
}
}).when(spy).isFileDeletable(Mockito.any(FileStatus.class));
// attempt to delete the directory, which
if (chore.checkAndDeleteDirectory(parent)) {
throw new Exception("Reported success deleting directory, should have failed when adding file mid-iteration");
}
// make sure all the directories + added file exist, but the original file is deleted
assertTrue("Added file unexpectedly deleted", fs.exists(racyFile));
assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
assertFalse("Original file unexpectedly retained", fs.exists(file));
Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
}
use of org.apache.hadoop.hbase.Stoppable in project hbase by apache.
the class TestZooKeeperTableArchiveClient method testMultipleTables.
/**
* Test archiving/cleaning across multiple tables, where some are retained, and others aren't
* @throws Exception on failure
*/
@Test(timeout = 300000)
public void testMultipleTables() throws Exception {
createArchiveDirectory();
String otherTable = "otherTable";
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
Path otherTableDir = getTableDir(otherTable);
// register cleanup for the created directories
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
toCleanup.add(otherTableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
List<Region> regions = new ArrayList<>();
regions.add(region);
when(rss.getOnlineRegions()).thenReturn(regions);
final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(region, TEST_FAM);
compactionCleaner.chore();
// create the another table that we don't archive
hcd = new HColumnDescriptor(TEST_FAM);
HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
regions = new ArrayList<>();
regions.add(otherRegion);
when(rss.getOnlineRegions()).thenReturn(regions);
final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(otherRegion, TEST_FAM);
compactionCleaner1.chore();
// get the current hfiles in the archive directory
// Should be archived
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, archiveDir, LOG);
throw new RuntimeException("Didn't load archive any files!");
}
// make sure we have files from both tables
int initialCountForPrimary = 0;
int initialCountForOtherTable = 0;
for (Path file : files) {
String tableName = file.getParent().getParent().getParent().getName();
// check to which table this file belongs
if (tableName.equals(otherTable))
initialCountForOtherTable++;
else if (tableName.equals(STRING_TABLE_NAME))
initialCountForPrimary++;
}
assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
// run the cleaners, checking for each of the directories + files (both should be deleted and
// need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
// run the cleaner
choreService.scheduleChore(cleaner);
// wait for the cleaner to check all the files
finished.await();
// stop the cleaner
stop.stop("");
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
int archivedForPrimary = 0;
for (Path file : archivedFiles) {
String tableName = file.getParent().getParent().getParent().getName();
// ensure we don't have files from the non-archived table
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
if (tableName.equals(STRING_TABLE_NAME))
archivedForPrimary++;
}
assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
// but we still have the archive directory
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestIndexWriter method testSynchronouslyCompletesAllWrites.
/**
* With the move to using a pool of threads to write, we need to ensure that we still block until
* all index writes for a mutation/batch are completed.
* @throws Exception on failure
*/
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
LOG.info("Starting " + testName.getTableNameString());
LOG.info("Current thread is interrupted: " + Thread.interrupted());
Abortable abort = new StubAbortable();
Stoppable stop = Mockito.mock(Stoppable.class);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
ExecutorService exec = Executors.newFixedThreadPool(1);
Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
FakeTableFactory factory = new FakeTableFactory(tables);
byte[] tableName = this.testName.getTableName();
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
Collection<Pair<Mutation, byte[]>> indexUpdates = Arrays.asList(new Pair<Mutation, byte[]>(m, tableName));
HTableInterface table = Mockito.mock(HTableInterface.class);
final boolean[] completed = new boolean[] { false };
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
// just keep track that it was called
completed[0] = true;
return null;
}
});
Mockito.when(table.getTableName()).thenReturn(testName.getTableName());
// add the table to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
// setup the writer and failure policy
ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
committer.setup(factory, exec, abort, stop, 2, e);
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
policy.setup(stop, abort);
IndexWriter writer = new IndexWriter(committer, policy);
writer.write(indexUpdates);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
writer.stop(this.testName.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestParalleIndexWriter method testCorrectlyCleansUpResources.
@Test
public void testCorrectlyCleansUpResources() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(1);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
FakeTableFactory factory = new FakeTableFactory(Collections.<ImmutableBytesPtr, HTableInterface>emptyMap());
ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
// create a simple writer
writer.setup(factory, exec, mockAbort, mockStop, 1, e);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestParalleWriterIndexCommitter method testSynchronouslyCompletesAllWrites.
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
LOG.info("Starting " + test.getTableNameString());
LOG.info("Current thread is interrupted: " + Thread.interrupted());
Abortable abort = new StubAbortable();
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
Stoppable stop = Mockito.mock(Stoppable.class);
ExecutorService exec = Executors.newFixedThreadPool(1);
Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
FakeTableFactory factory = new FakeTableFactory(tables);
ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
indexUpdates.put(new HTableInterfaceReference(tableName), m);
HTableInterface table = Mockito.mock(HTableInterface.class);
final boolean[] completed = new boolean[] { false };
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
// just keep track that it was called
completed[0] = true;
return null;
}
});
Mockito.when(table.getTableName()).thenReturn(test.getTableName());
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
// setup the writer and failure policy
ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
writer.setup(factory, exec, abort, stop, 1, e);
writer.write(indexUpdates, true);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Aggregations