use of org.apache.hadoop.hbase.util.StoppableImplementation in project hbase by apache.
the class TestEndToEndSplitTransaction method testFromClientSideWhileSplitting.
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName tableName = TableName.valueOf(name.getMethodName());
final byte[] FAMILY = Bytes.toBytes("family");
// SplitTransaction will update the meta table by offlining the parent region, and adding info
// for daughters.
Table table = TEST_UTIL.createTable(tableName, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(CONF, stopper, tableName);
final ChoreService choreService = new ChoreService("TEST_SERVER");
choreService.scheduleChore(regionChecker);
regionSplitter.start();
// wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw new AssertionError("regionChecker", regionChecker.ex);
}
if (regionSplitter.ex != null) {
throw new AssertionError("regionSplitter", regionSplitter.ex);
}
// one final check
regionChecker.verify();
}
use of org.apache.hadoop.hbase.util.StoppableImplementation in project hbase by apache.
the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.
@Test
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
List<HRegion> regions = new ArrayList<>();
regions.add(region);
Mockito.doReturn(regions).when(rss).getRegions();
final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(region, TEST_FAM);
compactionCleaner.chore();
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
CommonFSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
use of org.apache.hadoop.hbase.util.StoppableImplementation in project hbase by apache.
the class TestHFileArchiving method testCleaningRace.
/**
* Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
*/
@Test
public void testCleaningRace() throws Exception {
final long TEST_TIME = 20 * 1000;
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
Path regionDir = new Path(CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(name.getMethodName())), "abcdef");
Path familyDir = new Path(regionDir, "cf");
Path sourceRegionDir = new Path(rootDir, regionDir);
fs.mkdirs(sourceRegionDir);
Stoppable stoppable = new StoppableImplementation();
// The cleaner should be looping without long pauses to reproduce the race condition.
HFileCleaner cleaner = getHFileCleaner(stoppable, conf, fs, archiveDir);
assertNotNull("cleaner should not be null", cleaner);
try {
choreService.scheduleChore(cleaner);
// Keep creating/archiving new files while the cleaner is running in the other thread
long startTime = EnvironmentEdgeManager.currentTime();
for (long fid = 0; (EnvironmentEdgeManager.currentTime() - startTime) < TEST_TIME; ++fid) {
Path file = new Path(familyDir, String.valueOf(fid));
Path sourceFile = new Path(rootDir, file);
Path archiveFile = new Path(archiveDir, file);
fs.createNewFile(sourceFile);
try {
// Try to archive the file
HFileArchiver.archiveRegion(fs, rootDir, sourceRegionDir.getParent(), sourceRegionDir);
// The archiver succeded, the file is no longer in the original location
// but it's in the archive location.
LOG.debug("hfile=" + fid + " should be in the archive");
assertTrue(fs.exists(archiveFile));
assertFalse(fs.exists(sourceFile));
} catch (IOException e) {
// The archiver is unable to archive the file. Probably HBASE-7643 race condition.
// in this case, the file should not be archived, and we should have the file
// in the original location.
LOG.debug("hfile=" + fid + " should be in the source location");
assertFalse(fs.exists(archiveFile));
assertTrue(fs.exists(sourceFile));
// Avoid to have this file in the next run
fs.delete(sourceFile, false);
}
}
} finally {
stoppable.stop("test end");
cleaner.cancel(true);
choreService.shutdown();
fs.delete(rootDir, true);
}
}
use of org.apache.hadoop.hbase.util.StoppableImplementation in project hbase by apache.
the class TestCleanerChore method retriesIOExceptionInStatus.
@Test
public void retriesIOExceptionInStatus() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
Path child = new Path(testDir, "child");
Path file = new Path(child, "file");
fs.mkdirs(child);
fs.create(file).close();
assertTrue("test file didn't get created.", fs.exists(file));
final AtomicBoolean fails = new AtomicBoolean(true);
FilterFileSystem filtered = new FilterFileSystem(fs) {
public FileStatus[] listStatus(Path f) throws IOException {
if (fails.get()) {
throw new IOException("whomp whomp.");
}
return fs.listStatus(f);
}
};
AllValidPaths chore = new AllValidPaths("test-retry-ioe", stop, conf, filtered, testDir, confKey, POOL);
// trouble talking to the filesystem
Boolean result = chore.runCleaner();
// verify that it couldn't clean the files.
assertTrue("test rig failed to inject failure.", fs.exists(file));
assertTrue("test rig failed to inject failure.", fs.exists(child));
// and verify that it accurately reported the failure.
assertFalse("chore should report that it failed.", result);
// filesystem is back
fails.set(false);
result = chore.runCleaner();
// verify everything is gone.
assertFalse("file should have been destroyed.", fs.exists(file));
assertFalse("directory should have been destroyed.", fs.exists(child));
// and verify that it accurately reported success.
assertTrue("chore should claim it succeeded.", result);
}
use of org.apache.hadoop.hbase.util.StoppableImplementation in project hbase by apache.
the class TestCleanerChore method testCleanerDoesNotDeleteDirectoryWithLateAddedFiles.
/**
* While cleaning a directory, all the files in the directory may be deleted, but there may be
* another file added, in which case the directory shouldn't be deleted.
* @throws IOException on failure
*/
@Test
public void testCleanerDoesNotDeleteDirectoryWithLateAddedFiles() throws IOException {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
final Path testDir = UTIL.getDataTestDir();
final FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL);
// spy on the delegate to ensure that we don't check for directories
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
AlwaysDelete spy = Mockito.spy(delegate);
chore.cleanersChain.set(0, spy);
// create the directory layout in the directory to clean
final Path parent = new Path(testDir, "parent");
Path file = new Path(parent, "someFile");
fs.mkdirs(parent);
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
final Path addedFile = new Path(parent, "addedFile");
// when we attempt to delete the original file, add another file in the same directory
Mockito.doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
fs.create(addedFile).close();
CommonFSUtils.logFileSystemState(fs, testDir, LOG);
return (Boolean) invocation.callRealMethod();
}
}).when(spy).isFileDeletable(Mockito.any());
// run the chore
chore.chore();
// make sure all the directories + added file exist, but the original file is deleted
assertTrue("Added file unexpectedly deleted", fs.exists(addedFile));
assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
assertFalse("Original file unexpectedly retained", fs.exists(file));
Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any());
Mockito.reset(spy);
}
Aggregations