use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestAssignmentManagerOnCluster method testOpenCloseRacing.
/**
* This tests region close racing with open
*/
@Test(timeout = 60000)
public void testOpenCloseRacing() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
MyRegionObserver.postOpenEnabled.set(true);
MyRegionObserver.postOpenCalled = false;
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
// Region will be opened, but it won't complete
am.assign(hri);
long end = EnvironmentEdgeManager.currentTime() + 20000;
// Wait till postOpen is called
while (!MyRegionObserver.postOpenCalled) {
assertFalse("Timed out waiting for postOpen to be called", EnvironmentEdgeManager.currentTime() > end);
Thread.sleep(300);
}
// Now let's unassign it, it should do nothing
am.unassign(hri);
RegionState state = am.getRegionStates().getRegionState(hri);
ServerName oldServerName = state.getServerName();
assertTrue(state.isOpening() && oldServerName != null);
// Now the region is stuck in opening
// Let's forcefully re-assign it to trigger closing/opening
// racing. This test is to make sure this scenario
// is handled properly.
MyRegionObserver.postOpenEnabled.set(false);
ServerName destServerName = null;
int numRS = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size();
for (int i = 0; i < numRS; i++) {
HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(i);
if (!destServer.getServerName().equals(oldServerName)) {
destServerName = destServer.getServerName();
break;
}
}
assertNotNull(destServerName);
assertFalse("Region should be assigned on a new region server", oldServerName.equals(destServerName));
List<HRegionInfo> regions = new ArrayList<>();
regions.add(hri);
am.assign(destServerName, regions);
// let's check if it's assigned after it's out of transition
am.waitOnRegionToClearRegionsInTransition(hri);
assertTrue(am.waitForAssignment(hri));
ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 6000);
} finally {
MyRegionObserver.postOpenEnabled.set(false);
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestAssignmentManagerOnCluster method testOfflineRegion.
/**
* This tests offlining a region
*/
@Test(timeout = 60000)
public void testOfflineRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
HRegionInfo hri = createTableAndGetOneRegion(tableName);
RegionStates regionStates = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
ServerName serverName = regionStates.getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
admin.offline(hri.getRegionName());
long timeoutTime = System.currentTimeMillis() + 800;
while (true) {
if (regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OFFLINE).contains(hri))
break;
long now = System.currentTimeMillis();
if (now > timeoutTime) {
fail("Failed to offline the region in time");
break;
}
Thread.sleep(10);
}
RegionState regionState = regionStates.getRegionState(hri);
assertTrue(regionState.isOffline());
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestAssignmentManagerOnCluster method testAssignDisabledRegionBySSH.
/**
* Test disabled region is ignored by SSH
*/
@Test(timeout = 60000)
public void testAssignDisabledRegionBySSH() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MyMaster master;
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
// Assign the region
master = (MyMaster) cluster.getMaster();
AssignmentManager am = master.getAssignmentManager();
am.assign(hri);
RegionStates regionStates = am.getRegionStates();
ServerName metaServer = regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO);
ServerName oldServerName = null;
while (true) {
assertTrue(am.waitForAssignment(hri));
RegionState state = regionStates.getRegionState(hri);
oldServerName = state.getServerName();
if (!ServerName.isSameHostnameAndPort(oldServerName, metaServer)) {
// Mark the hosting server aborted, but don't actually kill it.
// It doesn't have meta on it.
MyRegionServer.abortedServer = oldServerName;
break;
}
int i = cluster.getServerWithMeta();
HRegionServer rs = cluster.getRegionServer(i == 0 ? 1 : 0);
oldServerName = rs.getServerName();
master.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(oldServerName.getServerName()));
}
// Make sure the region is assigned on the dead server
assertTrue(regionStates.isRegionOnline(hri));
assertEquals(oldServerName, regionStates.getRegionServerOfRegion(hri));
// Disable the table now.
master.disableTable(hri.getTable(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Kill the hosting server, which doesn't have meta on it.
cluster.killRegionServer(oldServerName);
cluster.waitForRegionServerToStop(oldServerName, -1);
ServerManager serverManager = master.getServerManager();
while (!serverManager.isServerDead(oldServerName) || serverManager.getDeadServers().areDeadServersInProgress()) {
Thread.sleep(100);
}
// Wait till no more RIT, the region should be offline.
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
assertTrue(regionStates.isRegionOffline(hri));
} finally {
MyRegionServer.abortedServer = null;
TEST_UTIL.deleteTable(tableName);
cluster.startRegionServer();
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestAssignmentManagerOnCluster method testAssignDisabledRegion.
/**
* Test force unassign/assign a region of a disabled table
*/
@Test(timeout = 60000)
public void testAssignDisabledRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MyMaster master = null;
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
// Assign the region
master = (MyMaster) cluster.getMaster();
AssignmentManager am = master.getAssignmentManager();
RegionStates regionStates = am.getRegionStates();
assertTrue(TEST_UTIL.assignRegion(hri));
// Disable the table
admin.disableTable(tableName);
assertTrue(regionStates.isRegionOffline(hri));
// You can't assign a disabled region
am.assign(hri, true);
assertTrue(regionStates.isRegionOffline(hri));
// You can't unassign a disabled region either
am.unassign(hri);
assertTrue(regionStates.isRegionOffline(hri));
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestCatalogJanitor method parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst.
/**
* Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
*
* @param rootDir the test case name, used as the HBase testing utility root
* @param lastEndKey the end key of the split parent
* @throws IOException
* @throws InterruptedException
*/
private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final String rootDir, final byte[] lastEndKey) throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, rootDir);
MasterServices services = new MockMasterServices(htu);
CatalogJanitor janitor = new CatalogJanitor(services);
final HTableDescriptor htd = createHTableDescriptor();
// Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
// Parent
HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), lastEndKey);
// Sleep a second else the encoded name on these regions comes out
// same for all with same start key and made in same second.
Thread.sleep(1001);
// Daughter a
HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
Thread.sleep(1001);
// Make daughters of daughter a; splitaa and splitab.
HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
// Daughter b
HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), lastEndKey);
Thread.sleep(1001);
// Make Daughters of daughterb; splitba and splitbb.
HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"), lastEndKey);
// First test that our Comparator works right up in CatalogJanitor.
// Just fo kicks.
SortedMap<HRegionInfo, Result> regions = new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator());
// Now make sure that this regions map sorts as we expect it to.
regions.put(parent, createResult(parent, splita, splitb));
regions.put(splitb, createResult(splitb, splitba, splitbb));
regions.put(splita, createResult(splita, splitaa, splitab));
// Assert its properly sorted.
int index = 0;
for (Map.Entry<HRegionInfo, Result> e : regions.entrySet()) {
if (index == 0) {
assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
} else if (index == 1) {
assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
} else if (index == 2) {
assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
}
index++;
}
// Now play around with the cleanParent function. Create a ref from splita
// up to the parent.
Path splitaRef = createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
// Make sure actual super parent sticks around because splita has a ref.
assertFalse(janitor.cleanParent(parent, regions.get(parent)));
//splitba, and split bb, do not have dirs in fs. That means that if
// we test splitb, it should get cleaned up.
assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
// Now remove ref from splita to parent... so parent can be let go and so
// the daughter splita can be split (can't split if still references).
// BUT make the timing such that the daughter gets cleaned up before we
// can get a chance to let go of the parent.
FileSystem fs = FileSystem.get(htu.getConfiguration());
assertTrue(fs.delete(splitaRef, true));
// Create the refs from daughters of splita.
Path splitaaRef = createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
Path splitabRef = createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
// Test splita. It should stick around because references from splitab, etc.
assertFalse(janitor.cleanParent(splita, regions.get(splita)));
// Now clean up parent daughter first. Remove references from its daughters.
assertTrue(fs.delete(splitaaRef, true));
assertTrue(fs.delete(splitabRef, true));
assertTrue(janitor.cleanParent(splita, regions.get(splita)));
// Super parent should get cleaned up now both splita and splitb are gone.
assertTrue(janitor.cleanParent(parent, regions.get(parent)));
services.stop("test finished");
janitor.cancel(true);
}
Aggregations