use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestCatalogJanitor method parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst.
/**
* Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
*
* @param rootDir the test case name, used as the HBase testing utility root
* @param lastEndKey the end key of the split parent
* @throws IOException
* @throws InterruptedException
*/
private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final String rootDir, final byte[] lastEndKey) throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, rootDir);
MasterServices services = new MockMasterServices(htu);
CatalogJanitor janitor = new CatalogJanitor(services);
final HTableDescriptor htd = createHTableDescriptor();
// Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
// Parent
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), lastEndKey);
// Sleep a second else the encoded name on these regions comes out
// same for all with same start key and made in same second.
Thread.sleep(1001);
// Daughter a
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
Thread.sleep(1001);
// Make daughters of daughter a; splitaa and splitab.
HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
// Daughter b
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), lastEndKey);
Thread.sleep(1001);
// Make Daughters of daughterb; splitba and splitbb.
HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"), lastEndKey);
// First test that our Comparator works right up in CatalogJanitor.
// Just fo kicks.
SortedMap<HRegionInfo, Result> regions = new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator());
// Now make sure that this regions map sorts as we expect it to.
regions.put(parent, createResult(parent, splita, splitb));
regions.put(splitb, createResult(splitb, splitba, splitbb));
regions.put(splita, createResult(splita, splitaa, splitab));
// Assert its properly sorted.
int index = 0;
for (Map.Entry<HRegionInfo, Result> e : regions.entrySet()) {
if (index == 0) {
assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
} else if (index == 1) {
assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
} else if (index == 2) {
assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
}
index++;
}
// Now play around with the cleanParent function. Create a ref from splita
// up to the parent.
Path splitaRef = createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
// Make sure actual super parent sticks around because splita has a ref.
assertFalse(janitor.cleanParent(parent, regions.get(parent)));
//splitba, and split bb, do not have dirs in fs. That means that if
// we test splitb, it should get cleaned up.
assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
// Now remove ref from splita to parent... so parent can be let go and so
// the daughter splita can be split (can't split if still references).
// BUT make the timing such that the daughter gets cleaned up before we
// can get a chance to let go of the parent.
FileSystem fs = FileSystem.get(htu.getConfiguration());
assertTrue(fs.delete(splitaRef, true));
// Create the refs from daughters of splita.
Path splitaaRef = createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
Path splitabRef = createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
// Test splita. It should stick around because references from splitab, etc.
assertFalse(janitor.cleanParent(splita, regions.get(splita)));
// Now clean up parent daughter first. Remove references from its daughters.
assertTrue(fs.delete(splitaaRef, true));
assertTrue(fs.delete(splitabRef, true));
assertTrue(janitor.cleanParent(splita, regions.get(splita)));
// Super parent should get cleaned up now both splita and splitb are gone.
assertTrue(janitor.cleanParent(parent, regions.get(parent)));
services.stop("test finished");
janitor.cancel(true);
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestCatalogJanitor method addMockStoreFiles.
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir) throws IOException {
// get the existing store files
FileSystem fs = services.getMasterFileSystem().getFileSystem();
fs.mkdirs(storedir);
// create the store files in the parent
for (int i = 0; i < count; i++) {
Path storeFile = new Path(storedir, "_store" + i);
FSDataOutputStream dos = fs.create(storeFile, true);
dos.writeBytes("Some data: " + i);
dos.close();
}
LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
// make sure the mock store files are there
FileStatus[] storeFiles = fs.listStatus(storedir);
assertEquals("Didn't have expected store files", count, storeFiles.length);
return storeFiles;
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestCatalogJanitor method testCleanParent.
@Test
public void testCleanParent() throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
try {
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
// Add a reference under splitA directory so we don't clear out the parent.
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
Path storedir = HStore.getStoreHomedir(tabledir, splita, htd.getColumnFamilies()[0].getName());
Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Path path = ref.write(fs, p);
assertTrue(fs.exists(path));
assertFalse(janitor.cleanParent(parent, r));
// Remove the reference file and try again.
assertTrue(fs.delete(p, true));
assertTrue(janitor.cleanParent(parent, r));
} finally {
services.stop("shutdown");
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestCatalogJanitor method testDuplicateHFileResolution.
/**
* Test that if a store file with the same name is present as those already backed up cause the
* already archived files to be timestamped backup
*/
@Test
public void testDuplicateHFileResolution() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
MasterServices services = new MockMasterServices(htu);
// create the janitor
CatalogJanitor janitor = new CatalogJanitor(services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
// archive directory. Otherwise, it just seems to pick the first root directory it can find (so
// the single test passes, but when the full suite is run, things get borked).
FSUtils.setRootDir(fs.getConf(), rootdir);
Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
System.out.println("Old root:" + rootdir);
System.out.println("Old table:" + tabledir);
System.out.println("Old store:" + storedir);
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName());
System.out.println("Old archive:" + storeArchive);
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// now add store files with the same names as before to check backup
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
// cleanup
services.stop("Test finished");
janitor.cancel(true);
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestDistributedLogSplitting method testLogReplayForDisablingTable.
@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testLogReplayForDisablingTable() throws Exception {
LOG.info("testLogReplayForDisablingTable");
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
startCluster(NUM_RS);
final int NUM_REGIONS_TO_CREATE = 40;
final int NUM_LOG_LINES = 1000;
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
Table disablingHT = installTable(zkw, "disableTable", "family", NUM_REGIONS_TO_CREATE);
Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE, NUM_REGIONS_TO_CREATE);
try {
// turn off load balancing to prevent regions from moving around otherwise
// they will consume recovered.edits
master.balanceSwitch(false);
List<HRegionInfo> regions = null;
HRegionServer hrs = null;
boolean hasRegionsForBothTables = false;
String tableName = null;
for (int i = 0; i < NUM_RS; i++) {
tableName = null;
hasRegionsForBothTables = false;
boolean isCarryingSystem = false;
hrs = rsts.get(i).getRegionServer();
regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
for (HRegionInfo region : regions) {
if (region.getTable().isSystemTable()) {
isCarryingSystem = true;
break;
}
if (tableName != null && !tableName.equalsIgnoreCase(region.getTable().getNameAsString())) {
// make sure that we find a RS has online regions for both "table" and "disableTable"
hasRegionsForBothTables = true;
break;
} else if (tableName == null) {
tableName = region.getTable().getNameAsString();
}
}
if (isCarryingSystem) {
continue;
}
if (hasRegionsForBothTables) {
break;
}
}
// make sure we found a good RS
Assert.assertTrue(hasRegionsForBothTables);
LOG.info("#regions = " + regions.size());
Iterator<HRegionInfo> it = regions.iterator();
while (it.hasNext()) {
HRegionInfo region = it.next();
if (region.isMetaTable()) {
it.remove();
}
}
makeWAL(hrs, regions, "disableTable", "family", NUM_LOG_LINES, 100, false);
makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
LOG.info("Disabling table\n");
TEST_UTIL.getAdmin().disableTable(TableName.valueOf(name.getMethodName()));
TEST_UTIL.waitTableDisabled(TableName.valueOf(name.getMethodName()).getName());
// abort RS
LOG.info("Aborting region server: " + hrs.getServerName());
hrs.abort("testing");
// wait for abort completes
TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1));
}
});
// wait for regions come online
TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (HBaseTestingUtility.getAllOnlineRegions(cluster).size() >= (NUM_REGIONS_TO_CREATE + 1));
}
});
// wait for all regions are fully recovered
TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false);
ServerManager serverManager = master.getServerManager();
return (!serverManager.areDeadServersInProgress() && recoveringRegions != null && recoveringRegions.isEmpty());
}
});
int count = 0;
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path rootdir = FSUtils.getRootDir(conf);
Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf(name.getMethodName()));
for (HRegionInfo hri : regions) {
Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
LOG.debug("checking edits dir " + editsdir);
if (!fs.exists(editsdir))
continue;
FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
@Override
public boolean accept(Path p) {
if (WALSplitter.isSequenceIdFile(p)) {
return false;
}
return true;
}
});
if (files != null) {
for (FileStatus file : files) {
int c = countWAL(file.getPath(), fs, conf);
count += c;
LOG.info(c + " edits in " + file.getPath());
}
}
}
LOG.info("Verify edits in recovered.edits files");
assertEquals(NUM_LOG_LINES, count);
LOG.info("Verify replayed edits");
assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht));
// clean up
for (HRegionInfo hri : regions) {
Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
fs.delete(editsdir, true);
}
disablingHT.close();
} finally {
if (ht != null)
ht.close();
if (zkw != null)
zkw.close();
}
}
Aggregations