use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayFlushesAndCompactions.
@Test
public void testReplayFlushesAndCompactions() throws IOException {
// initiate a secondary region with some data.
// load some data to primary and flush. 3 flushes and some more unflushed data
putDataWithFlushes(primaryRegion, 100, 300, 100);
// compaction from primary
LOG.info("-- Compacting primary, only 1 store");
primaryRegion.compactStore(Bytes.toBytes("cf1"), NoLimitThroughputController.INSTANCE);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
LOG.info("-- Replaying edits and flush events in secondary");
int lastReplayed = 0;
int expectedStoreFileCount = 0;
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
CompactionDescriptor compactionDesc = WALEdit.getCompaction(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
// first verify that everything is replayed and visible before flush event replay
verifyData(secondaryRegion, 0, lastReplayed, cq, families);
HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
MemStoreSize mss = store.getFlushableSize();
long storeSize = store.getSize();
long storeSizeUncompressed = store.getStoreSizeUncompressed();
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
assertNull(result.result);
assertEquals(result.flushOpSeqId, flushDesc.getFlushSequenceNumber());
// assert that the store memstore is smaller now
long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize));
assertTrue(storeMemstoreSize > newStoreMemstoreSize);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- Replaying flush commit in secondary");
secondaryRegion.replayWALFlushCommitMarker(flushDesc);
// assert that the flush files are picked
expectedStoreFileCount++;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
MemStoreSize newMss = store.getFlushableSize();
assertTrue(mss.getHeapSize() > newMss.getHeapSize());
// assert that the region memstore is smaller now
long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
assertTrue(regionMemstoreSize > newRegionMemstoreSize);
// assert that the store sizes are bigger
assertTrue(store.getSize() > storeSize);
assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed);
assertEquals(store.getSize(), store.getStorefilesSize());
}
// after replay verify that everything is still visible
verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
} else if (compactionDesc != null) {
secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, Long.MAX_VALUE);
// assert that the compaction is applied
for (HStore store : secondaryRegion.getStores()) {
if (store.getColumnFamilyName().equals("cf1")) {
assertEquals(1, store.getStorefilesCount());
} else {
assertEquals(expectedStoreFileCount, store.getStorefilesCount());
}
}
} else {
lastReplayed = replayEdit(secondaryRegion, entry);
}
}
assertEquals(400 - 1, lastReplayed);
LOG.info("-- Verifying edits from secondary");
verifyData(secondaryRegion, 0, 400, cq, families);
LOG.info("-- Verifying edits from primary. Ensuring that files are not deleted");
verifyData(primaryRegion, 0, lastReplayed, cq, families);
for (HStore store : primaryRegion.getStores()) {
if (store.getColumnFamilyName().equals("cf1")) {
assertEquals(1, store.getStorefilesCount());
} else {
assertEquals(expectedStoreFileCount, store.getStorefilesCount());
}
}
}
Aggregations