use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayFlushCommitMarkerWithoutFlushStartMarker.
/**
* Tests the case where we receive a flush commit before receiving any flush prepare markers
*/
public void testReplayFlushCommitMarkerWithoutFlushStartMarker(boolean droppableMemstore) throws IOException {
// load some data to primary and flush. 1 flushes and some more unflushed data.
// write more data after flush depending on whether droppableSnapshot
putDataWithFlushes(primaryRegion, 100, 100, droppableMemstore ? 0 : 100);
int numRows = droppableMemstore ? 100 : 200;
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
LOG.info("-- Replaying edits and flush events in secondary");
FlushDescriptor commitFlushDesc = null;
int lastReplayed = 0;
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
// do not replay flush start marker
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
// hold on to the flush commit marker
commitFlushDesc = flushDesc;
}
// after replay verify that everything is still visible
verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
} else {
lastReplayed = replayEdit(secondaryRegion, entry);
}
}
// at this point, there should be some data (rows 0-200) in the memstore without snapshot
// and some more data in memstores (rows 100-300)
verifyData(secondaryRegion, 0, numRows, cq, families);
// no store files in the region
int expectedStoreFileCount = 0;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
// Test case 1: replay a flush commit marker without start flush marker
assertNull(secondaryRegion.getPrepareFlushResult());
assertTrue(commitFlushDesc.getFlushSequenceNumber() > 0);
// ensure all files are visible in secondary
for (HStore store : secondaryRegion.getStores()) {
assertTrue(store.getMaxSequenceId().orElse(0L) <= secondaryRegion.getReadPoint(null));
}
LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc);
secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc);
// assert that the flush files are picked
expectedStoreFileCount++;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
MemStoreSize mss = store.getFlushableSize();
if (droppableMemstore) {
// assert that the memstore is dropped
assertTrue(mss.getHeapSize() == MutableSegment.DEEP_OVERHEAD);
} else {
// assert that the memstore is not dropped
assertTrue(mss.getHeapSize() > 0);
}
// assert that the region memstore is same as before (we could not drop)
long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
if (droppableMemstore) {
assertTrue(0 == newRegionMemstoreSize);
} else {
assertTrue(regionMemstoreSize == newRegionMemstoreSize);
}
LOG.info("-- Verifying edits from secondary");
verifyData(secondaryRegion, 0, numRows, cq, families);
LOG.info("-- Verifying edits from primary.");
verifyData(primaryRegion, 0, numRows, cq, families);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayingFlushRestoresReadsEnabledState.
/**
* Test the case where the secondary region replica is not in reads enabled state because it is
* waiting for a flush or region open marker from primary region. Replaying flush start and commit
* entries should restore the reads enabled status in the region and allow the reads
* to continue.
*/
@Test
public void testReplayingFlushRestoresReadsEnabledState() throws IOException {
// Test case 2: Test that replaying FLUSH_START and FLUSH_COMMIT markers assuming these came
// from triggered flush restores readsEnabled
disableReads(secondaryRegion);
// put some data in primary
putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families);
primaryRegion.flush(true);
// I seem to need to push more edits through so the WAL flushes on local fs. This was not
// needed before HBASE-15028. Not sure whats up. I can see that we have not flushed if I
// look at the WAL if I pause the test here and then use WALPrettyPrinter to look at content..
// Doing same check before HBASE-15028 I can see all edits flushed to the WAL. Somethings up
// but can't figure it... and this is only test that seems to suffer this flush issue.
// St.Ack 20160201
putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
LOG.info(Objects.toString(entry));
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getSequenceId());
} else {
replayEdit(secondaryRegion, entry);
}
}
// now reads should be enabled
verifyData(secondaryRegion, 0, 100, cq, families);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayFlushesAndCompactions.
@Test
public void testReplayFlushesAndCompactions() throws IOException {
// initiate a secondary region with some data.
// load some data to primary and flush. 3 flushes and some more unflushed data
putDataWithFlushes(primaryRegion, 100, 300, 100);
// compaction from primary
LOG.info("-- Compacting primary, only 1 store");
primaryRegion.compactStore(Bytes.toBytes("cf1"), NoLimitThroughputController.INSTANCE);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
LOG.info("-- Replaying edits and flush events in secondary");
int lastReplayed = 0;
int expectedStoreFileCount = 0;
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
CompactionDescriptor compactionDesc = WALEdit.getCompaction(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
// first verify that everything is replayed and visible before flush event replay
verifyData(secondaryRegion, 0, lastReplayed, cq, families);
HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
MemStoreSize mss = store.getFlushableSize();
long storeSize = store.getSize();
long storeSizeUncompressed = store.getStoreSizeUncompressed();
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
assertNull(result.result);
assertEquals(result.flushOpSeqId, flushDesc.getFlushSequenceNumber());
// assert that the store memstore is smaller now
long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize));
assertTrue(storeMemstoreSize > newStoreMemstoreSize);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- Replaying flush commit in secondary");
secondaryRegion.replayWALFlushCommitMarker(flushDesc);
// assert that the flush files are picked
expectedStoreFileCount++;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
MemStoreSize newMss = store.getFlushableSize();
assertTrue(mss.getHeapSize() > newMss.getHeapSize());
// assert that the region memstore is smaller now
long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
assertTrue(regionMemstoreSize > newRegionMemstoreSize);
// assert that the store sizes are bigger
assertTrue(store.getSize() > storeSize);
assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed);
assertEquals(store.getSize(), store.getStorefilesSize());
}
// after replay verify that everything is still visible
verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
} else if (compactionDesc != null) {
secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, Long.MAX_VALUE);
// assert that the compaction is applied
for (HStore store : secondaryRegion.getStores()) {
if (store.getColumnFamilyName().equals("cf1")) {
assertEquals(1, store.getStorefilesCount());
} else {
assertEquals(expectedStoreFileCount, store.getStorefilesCount());
}
}
} else {
lastReplayed = replayEdit(secondaryRegion, entry);
}
}
assertEquals(400 - 1, lastReplayed);
LOG.info("-- Verifying edits from secondary");
verifyData(secondaryRegion, 0, 400, cq, families);
LOG.info("-- Verifying edits from primary. Ensuring that files are not deleted");
verifyData(primaryRegion, 0, lastReplayed, cq, families);
for (HStore store : primaryRegion.getStores()) {
if (store.getColumnFamilyName().equals("cf1")) {
assertEquals(1, store.getStorefilesCount());
} else {
assertEquals(expectedStoreFileCount, store.getStorefilesCount());
}
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayRegionOpenEventAfterFlushStart.
/**
* Tests the case where we replay a region open event after a flush start but before receiving
* flush commit
*/
@Test
public void testReplayRegionOpenEventAfterFlushStart() throws IOException {
putDataWithFlushes(primaryRegion, 100, 100, 100);
int numRows = 200;
// close the region and open again.
primaryRegion.close();
primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
List<RegionEventDescriptor> regionEvents = Lists.newArrayList();
LOG.info("-- Replaying edits and region events in secondary");
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
RegionEventDescriptor regionEventDesc = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
// only replay flush start
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
secondaryRegion.replayWALFlushStartMarker(flushDesc);
}
} else if (regionEventDesc != null) {
regionEvents.add(regionEventDesc);
} else {
replayEdit(secondaryRegion, entry);
}
}
// at this point, there should be some data (rows 0-100) in the memstore snapshot
// and some more data in memstores (rows 100-200)
verifyData(secondaryRegion, 0, numRows, cq, families);
// we should have 1 open, 1 close and 1 open event
assertEquals(3, regionEvents.size());
// no store files in the region
int expectedStoreFileCount = 0;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
// now replay the region open event that should contain new file locations
LOG.info("Testing replaying region open event " + regionEvents.get(2));
secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2));
// assert that the flush files are picked
// two flushes happened
expectedStoreFileCount = 2;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
MemStoreSize newSnapshotSize = store.getSnapshotSize();
assertTrue(newSnapshotSize.getDataSize() == 0);
// assert that the region memstore is empty
long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
assertTrue(newRegionMemstoreSize == 0);
// prepare snapshot should be dropped if any
assertNull(secondaryRegion.getPrepareFlushResult());
LOG.info("-- Verifying edits from secondary");
verifyData(secondaryRegion, 0, numRows, cq, families);
LOG.info("-- Verifying edits from primary.");
verifyData(primaryRegion, 0, numRows, cq, families);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayingFlushWithEmptyMemstoreRestoresReadsEnabledState.
/**
* Test the case where the secondary region replica is not in reads enabled state because it is
* waiting for a flush or region open marker from primary region. Replaying flush start and commit
* entries should restore the reads enabled status in the region and allow the reads
* to continue.
*/
@Test
public void testReplayingFlushWithEmptyMemstoreRestoresReadsEnabledState() throws IOException {
// Test case 2: Test that replaying FLUSH_START and FLUSH_COMMIT markers assuming these came
// from triggered flush restores readsEnabled
disableReads(secondaryRegion);
// put some data in primary
putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families);
primaryRegion.flush(true);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getSequenceId());
}
}
// now reads should be enabled
verifyData(secondaryRegion, 0, 100, cq, families);
}
Aggregations