use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsAfterAbortingFlush.
/**
* Test that we could recover the data correctly after aborting flush. In the
* test, first we abort flush after writing some data, then writing more data
* and flush again, at last verify the data.
* @throws IOException
*/
@Test
public void testReplayEditsAfterAbortingFlush() throws IOException {
final TableName tableName = TableName.valueOf("testReplayEditsAfterAbortingFlush");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region3);
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
Mockito.doReturn(false).when(rsServices).isAborted();
when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
Configuration customConf = new Configuration(this.conf);
customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName());
HRegion region = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
int writtenRowCount = 10;
List<HColumnDescriptor> families = new ArrayList<>(htd.getFamilies());
for (int i = 0; i < writtenRowCount; i++) {
Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
region.put(put);
}
// Now assert edits made it in.
RegionScanner scanner = region.getScanner(new Scan());
assertEquals(writtenRowCount, getScannedCount(scanner));
// Let us flush the region
CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
try {
region.flush(true);
fail("Injected exception hasn't been thrown");
} catch (Throwable t) {
LOG.info("Expected simulated exception when flushing region," + t.getMessage());
// simulated to abort server
Mockito.doReturn(true).when(rsServices).isAborted();
// region normally does not accept writes after
region.setClosing(false);
// DroppedSnapshotException. We mock around it for this test.
}
// writing more data
int moreRow = 10;
for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
region.put(put);
}
writtenRowCount += moreRow;
// call flush again
CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
try {
region.flush(true);
} catch (IOException t) {
LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage());
}
region.close(true);
wal.shutdown();
// Let us try to split and recover
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
Mockito.doReturn(false).when(rsServices).isAborted();
HRegion region2 = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
scanner = region2.getScanner(new Scan());
assertEquals(writtenRowCount, getScannedCount(scanner));
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestReplicationEndpoint method testInterClusterReplication.
@Test(timeout = 120000)
public void testInterClusterReplication() throws Exception {
final String id = "testInterClusterReplication";
List<HRegion> regions = utility1.getHBaseCluster().getRegions(tableName);
int totEdits = 0;
// before shipping edits.
for (HRegion region : regions) {
HRegionInfo hri = region.getRegionInfo();
byte[] row = hri.getStartKey();
for (int i = 0; i < 100; i++) {
if (row.length > 0) {
Put put = new Put(row);
put.addColumn(famName, row, row);
region.put(put);
totEdits++;
}
}
}
admin.addPeer(id, new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf2)).setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()), null);
final int numEdits = totEdits;
Waiter.waitFor(conf1, 30000, new Waiter.ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits;
}
@Override
public String explainFailure() throws Exception {
String failure = "Failed to replicate all edits, expected = " + numEdits + " replicated = " + InterClusterReplicationEndpointForTest.replicateCount.get();
return failure;
}
});
admin.removePeer("testInterClusterReplication");
utility1.deleteTableData(tableName);
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestMasterReplication method rollWALAndWait.
private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table, final byte[] row) throws IOException {
final Admin admin = utility.getAdmin();
final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();
// find the region that corresponds to the given row.
HRegion region = null;
for (HRegion candidate : cluster.getRegions(table)) {
if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
region = candidate;
break;
}
}
assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);
final CountDownLatch latch = new CountDownLatch(1);
// listen for successful log rolls
final WALActionsListener listener = new WALActionsListener.Base() {
@Override
public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
latch.countDown();
}
};
region.getWAL().registerWALActionsListener(listener);
// request a roll
admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(), region.getRegionInfo().getRegionName()));
// wait
try {
latch.await();
} catch (InterruptedException exception) {
LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + "replication tests fail, it's probably because we should still be waiting.");
Thread.currentThread().interrupt();
}
region.getWAL().unregisterWALActionsListener(listener);
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestMultiSlaveReplication method rollWALAndWait.
private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table, final byte[] row) throws IOException {
final Admin admin = utility.getAdmin();
final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();
// find the region that corresponds to the given row.
HRegion region = null;
for (HRegion candidate : cluster.getRegions(table)) {
if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
region = candidate;
break;
}
}
assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);
final CountDownLatch latch = new CountDownLatch(1);
// listen for successful log rolls
final WALActionsListener listener = new WALActionsListener.Base() {
@Override
public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
latch.countDown();
}
};
region.getWAL().registerWALActionsListener(listener);
// request a roll
admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(), region.getRegionInfo().getRegionName()));
// wait
try {
latch.await();
} catch (InterruptedException exception) {
LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + "replication tests fail, it's probably because we should still be waiting.");
Thread.currentThread().interrupt();
}
region.getWAL().unregisterWALActionsListener(listener);
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project hbase by apache.
the class TestDurability method testIncrement.
@Test
public void testIncrement() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
byte[] col2 = Bytes.toBytes("col2");
byte[] col3 = Bytes.toBytes("col3");
// Setting up region
final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
byte[] tableName = Bytes.toBytes("TestIncrement");
final WAL wal = wals.getWAL(tableName, null);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
// col1: amount = 0, 1 write back to WAL
Increment inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
Result res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 1);
// col1: amount = 1, 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 1);
res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 2);
// col1: amount = 0, 0 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 2);
// col1: amount = 0, col2: amount = 0, col3: amount = 0
// 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
inc1.addColumn(FAMILY, col2, 0);
inc1.addColumn(FAMILY, col3, 0);
res = region.increment(inc1);
assertEquals(3, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
verifyWALCount(wals, wal, 3);
// col1: amount = 5, col2: amount = 4, col3: amount = 3
// 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 5);
inc1.addColumn(FAMILY, col2, 4);
inc1.addColumn(FAMILY, col3, 3);
res = region.increment(inc1);
assertEquals(3, res.size());
assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
verifyWALCount(wals, wal, 4);
}
Aggregations