use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestAsyncClusterAdminApi method testRollWALWALWriter.
@Test
public void testRollWALWALWriter() throws Exception {
setUpforLogRolling();
String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) {
v.append(className);
}
byte[] value = Bytes.toBytes(v.toString());
HRegionServer regionServer = startAndWriteData(tableName, value);
LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");
// flush all regions
for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
r.flush(true);
}
admin.rollWALWriter(regionServer.getServerName()).join();
int count = AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));
LOG.info("after flushing all regions and rolling logs there are " + count + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestLogRollAbort method testRSAbortWithUnflushedEdits.
/**
* Tests that RegionServer aborts if we hit an error closing the WAL when
* there are unsynced WAL edits. See HBASE-4282.
*/
@Test
public void testRSAbortWithUnflushedEdits() throws Exception {
LOG.info("Starting testRSAbortWithUnflushedEdits()");
// When the hbase:meta table can be opened, the region servers are running
TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close();
// Create the test table and open it
TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build();
admin.createTable(desc);
Table table = TEST_UTIL.getConnection().getTable(tableName);
try {
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
WAL log = server.getWAL(null);
Put p = new Put(Bytes.toBytes("row2001"));
p.addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2001));
table.put(p);
log.sync();
p = new Put(Bytes.toBytes("row2002"));
p.addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2002));
table.put(p);
dfsCluster.restartDataNodes();
LOG.info("Restarted datanodes");
try {
log.rollWriter(true);
} catch (FailedLogCloseException flce) {
// Expected exception. We used to expect that there would be unsynced appends but this
// not reliable now that sync plays a roll in wall rolling. The above puts also now call
// sync.
} catch (Throwable t) {
LOG.error(HBaseMarkers.FATAL, "FAILED TEST: Got wrong exception", t);
}
} finally {
table.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class AbstractTestLogRollPeriod method testNoEdits.
/**
* Tests that the LogRoller perform the roll even if there are no edits
*/
@Test
public void testNoEdits() throws Exception {
TableName tableName = TableName.valueOf("TestLogRollPeriodNoEdits");
TEST_UTIL.createTable(tableName, "cf");
try {
Table table = TEST_UTIL.getConnection().getTable(tableName);
try {
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
WAL log = server.getWAL(null);
checkMinLogRolls(log, 5);
} finally {
table.close();
}
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class AbstractTestLogRollPeriod method testWithEdits.
/**
* Tests that the LogRoller perform the roll with some data in the log
*/
@Test
public void testWithEdits() throws Exception {
final TableName tableName = TableName.valueOf("TestLogRollPeriodWithEdits");
final String family = "cf";
TEST_UTIL.createTable(tableName, family);
try {
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
WAL log = server.getWAL(null);
final Table table = TEST_UTIL.getConnection().getTable(tableName);
Thread writerThread = new Thread("writer") {
@Override
public void run() {
try {
long row = 0;
while (!interrupted()) {
Put p = new Put(Bytes.toBytes(String.format("row%d", row)));
p.addColumn(Bytes.toBytes(family), Bytes.toBytes("col"), Bytes.toBytes(row));
table.put(p);
row++;
Thread.sleep(LOG_ROLL_PERIOD / 16);
}
} catch (Exception e) {
LOG.warn(e.toString(), e);
}
}
};
try {
writerThread.start();
checkMinLogRolls(log, 5);
} finally {
writerThread.interrupt();
writerThread.join();
table.close();
}
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.
the class TestFlushWithThroughputController method getStoreWithName.
private HStore getStoreWithName(TableName tableName) {
SingleProcessHBaseCluster cluster = hbtu.getMiniHBaseCluster();
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
HRegionServer hrs = rsts.get(i).getRegionServer();
for (Region region : hrs.getRegions(tableName)) {
return ((HRegion) region).getStores().iterator().next();
}
}
return null;
}
Aggregations