use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestLoadAndSwitchEncodeOnDisk method loadTest.
@Test(timeout = TIMEOUT_MS)
public void loadTest() throws Exception {
Admin admin = TEST_UTIL.getAdmin();
// used for table setup
compression = Compression.Algorithm.GZ;
super.loadTest();
HColumnDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
Table t = TEST_UTIL.getConnection().getTable(TABLE);
assertAllOnLine(t);
admin.disableTable(TABLE);
admin.modifyColumnFamily(TABLE, hcd);
System.err.println("\nRe-enabling table\n");
admin.enableTable(TABLE);
System.err.println("\nNew column descriptor: " + getColumnDesc(admin) + "\n");
// The table may not have all regions on line yet. Assert online before
// moving to major compact.
assertAllOnLine(t);
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE);
// Wait until compaction completes
Threads.sleepWithoutInterrupt(5000);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
System.err.println("\nDone with the test, shutting down the cluster\n");
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestChangingEncoding method compactAndWait.
private void compactAndWait() throws IOException, InterruptedException {
LOG.debug("Compacting table " + tableName);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
Admin admin = TEST_UTIL.getAdmin();
admin.majorCompact(tableName);
// Waiting for the compaction to start, at least .5s.
final long maxWaitime = System.currentTimeMillis() + 500;
boolean cont;
do {
cont = rs.compactSplitThread.getCompactionQueueSize() == 0;
Threads.sleep(1);
} while (cont && System.currentTimeMillis() < maxWaitime);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(1);
}
LOG.debug("Compaction queue size reached 0, continuing");
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestRegionPlacement method verifyRegionServerUpdated.
/**
* Verify all the online region servers has been updated to the
* latest assignment plan
* @param plan
* @throws IOException
*/
private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException {
// Verify all region servers contain the correct favored nodes information
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
for (int i = 0; i < SLAVES; i++) {
HRegionServer rs = cluster.getRegionServer(i);
for (Region region : rs.getOnlineRegions(TableName.valueOf("testRegionAssignment"))) {
InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(region.getRegionInfo().getEncodedName());
List<ServerName> favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo());
// except for hbase:meta and ROOT
if (favoredServerList == null) {
HTableDescriptor desc = region.getTableDesc();
// Verify they are ROOT and hbase:meta regions since no favored nodes
assertNull(favoredSocketAddress);
assertTrue("User region " + region.getTableDesc().getTableName() + " should have favored nodes", (desc.isRootRegion() || desc.isMetaRegion()));
} else {
// For user region, the favored nodes in the region server should be
// identical to favored nodes in the assignmentPlan
assertTrue(favoredSocketAddress.length == favoredServerList.size());
assertTrue(favoredServerList.size() > 0);
for (int j = 0; j < favoredServerList.size(); j++) {
InetSocketAddress addrFromRS = favoredSocketAddress[j];
InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved(favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort());
assertNotNull(addrFromRS);
assertNotNull(addrFromPlan);
assertTrue("Region server " + rs.getServerName().getHostAndPort() + " has the " + positions[j] + " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + addrFromRS + " which is inconsistent with the plan " + addrFromPlan, addrFromRS.equals(addrFromPlan));
}
}
}
}
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestWarmupRegion method testWarmup.
/**
* Basic client side validation of HBASE-4536
*/
@Test
public void testWarmup() throws Exception {
int serverid = 0;
HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0);
HRegionInfo info = region.getRegionInfo();
runwarmup();
for (int i = 0; i < 10; i++) {
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverid);
byte[] destName = Bytes.toBytes(rs.getServerName().toString());
TEST_UTIL.getMiniHBaseCluster().getMaster().move(info.getEncodedNameAsBytes(), destName);
serverid = (serverid + 1) % 2;
}
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestAsyncRegionAdminApi method testCloseRegion.
@Test
public void testCloseRegion() throws Exception {
TableName TABLENAME = TableName.valueOf("TestHBACloseRegion");
createTableWithDefaultConf(TABLENAME);
HRegionInfo info = null;
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
for (HRegionInfo regionInfo : onlineRegions) {
if (!regionInfo.getTable().isSystemTable()) {
info = regionInfo;
boolean closed = admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), rs.getServerName().getServerName()).get();
assertTrue(closed);
}
}
boolean isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
long timeout = System.currentTimeMillis() + 10000;
while ((System.currentTimeMillis() < timeout) && (isInList)) {
Thread.sleep(100);
isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
}
assertFalse("The region should not be present in online regions list.", isInList);
}
Aggregations