use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class HBaseTestingUtil method assertRegionOnlyOnServer.
/**
* Check to make sure the region is open on the specified region server, but not on any other one.
*/
public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server, final long timeout) throws IOException, InterruptedException {
long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout;
while (true) {
List<RegionInfo> regions = getAdmin().getRegions(server);
if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
List<JVMClusterUtil.RegionServerThread> rsThreads = getHBaseCluster().getLiveRegionServerThreads();
for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) {
HRegionServer rs = rsThread.getRegionServer();
if (server.equals(rs.getServerName())) {
continue;
}
Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
for (HRegion r : hrs) {
assertTrue("Region should not be double assigned", r.getRegionInfo().getRegionId() != hri.getRegionId());
}
}
// good, we are happy
return;
}
long now = EnvironmentEdgeManager.currentTime();
if (now > timeoutTime)
break;
Thread.sleep(10);
}
fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server);
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestChangingEncoding method compactAndWait.
private void compactAndWait() throws IOException, InterruptedException {
LOG.debug("Compacting table " + tableName);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
Admin admin = TEST_UTIL.getAdmin();
admin.majorCompact(tableName);
// Waiting for the compaction to start, at least .5s.
final long maxWaitime = EnvironmentEdgeManager.currentTime() + 500;
boolean cont;
do {
cont = rs.getCompactSplitThread().getCompactionQueueSize() == 0;
Threads.sleep(1);
} while (cont && EnvironmentEdgeManager.currentTime() < maxWaitime);
while (rs.getCompactSplitThread().getCompactionQueueSize() > 0) {
Threads.sleep(1);
}
LOG.debug("Compaction queue size reached 0, continuing");
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestLoadAndSwitchEncodeOnDisk method loadTest.
@Override
@Test
public void loadTest() throws Exception {
Admin admin = TEST_UTIL.getAdmin();
// used for table setup
compression = Compression.Algorithm.GZ;
super.loadTest();
ColumnFamilyDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
Table t = TEST_UTIL.getConnection().getTable(TABLE);
assertAllOnLine(t);
admin.disableTable(TABLE);
admin.modifyColumnFamily(TABLE, hcd);
System.err.println("\nRe-enabling table\n");
admin.enableTable(TABLE);
System.err.println("\nNew column descriptor: " + getColumnDesc(admin) + "\n");
// The table may not have all regions on line yet. Assert online before
// moving to major compact.
assertAllOnLine(t);
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE);
// Wait until compaction completes
Threads.sleepWithoutInterrupt(5000);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
while (rs.getCompactSplitThread().getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
System.err.println("\nDone with the test, shutting down the cluster\n");
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestSlowLogAccessor method setUp.
@Before
public void setUp() throws Exception {
HRegionServer hRegionServer = HBASE_TESTING_UTILITY.getMiniHBaseCluster().getRegionServer(0);
this.namedQueueRecorder = hRegionServer.getNamedQueueRecorder();
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestAsyncRegionAdminApi method testFlushTableAndRegion.
@Test
public void testFlushTableAndRegion() throws Exception {
RegionInfo hri = createTableAndGetOneRegion(tableName);
ServerName serverName = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream().map(rsThread -> rsThread.getRegionServer()).filter(rs -> rs.getServerName().equals(serverName)).findFirst().get();
// write a put into the specific region
ASYNC_CONN.getTable(tableName).put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-1"))).join();
assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0);
// flush region and wait flush operation finished.
LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName()));
admin.flushRegion(hri.getRegionName()).get();
LOG.info("blocking until flush is complete: " + Bytes.toStringBinary(hri.getRegionName()));
Threads.sleepWithoutInterrupt(500);
while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) {
Threads.sleep(50);
}
// check the memstore.
assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0);
// write another put into the specific region
ASYNC_CONN.getTable(tableName).put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-2"))).join();
assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0);
admin.flush(tableName).get();
Threads.sleepWithoutInterrupt(500);
while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) {
Threads.sleep(50);
}
// check the memstore.
assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0);
}
Aggregations