Search in sources :

Example 11 with CompactionDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushesAndCompactions.

@Test
public void testReplayFlushesAndCompactions() throws IOException {
    // initiate a secondary region with some data.
    // load some data to primary and flush. 3 flushes and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 300, 100);
    // compaction from primary
    LOG.info("-- Compacting primary, only 1 store");
    primaryRegion.compactStore(Bytes.toBytes("cf1"), NoLimitThroughputController.INSTANCE);
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    int lastReplayed = 0;
    int expectedStoreFileCount = 0;
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        CompactionDescriptor compactionDesc = WALEdit.getCompaction(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            // first verify that everything is replayed and visible before flush event replay
            verifyData(secondaryRegion, 0, lastReplayed, cq, families);
            HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
            long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
            long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
            MemStoreSize mss = store.getFlushableSize();
            long storeSize = store.getSize();
            long storeSizeUncompressed = store.getStoreSizeUncompressed();
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                LOG.info("-- Replaying flush start in secondary");
                PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
                assertNull(result.result);
                assertEquals(result.flushOpSeqId, flushDesc.getFlushSequenceNumber());
                // assert that the store memstore is smaller now
                long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
                LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize));
                assertTrue(storeMemstoreSize > newStoreMemstoreSize);
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                LOG.info("-- Replaying flush commit in secondary");
                secondaryRegion.replayWALFlushCommitMarker(flushDesc);
                // assert that the flush files are picked
                expectedStoreFileCount++;
                for (HStore s : secondaryRegion.getStores()) {
                    assertEquals(expectedStoreFileCount, s.getStorefilesCount());
                }
                MemStoreSize newMss = store.getFlushableSize();
                assertTrue(mss.getHeapSize() > newMss.getHeapSize());
                // assert that the region memstore is smaller now
                long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
                assertTrue(regionMemstoreSize > newRegionMemstoreSize);
                // assert that the store sizes are bigger
                assertTrue(store.getSize() > storeSize);
                assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed);
                assertEquals(store.getSize(), store.getStorefilesSize());
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else if (compactionDesc != null) {
            secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, Long.MAX_VALUE);
            // assert that the compaction is applied
            for (HStore store : secondaryRegion.getStores()) {
                if (store.getColumnFamilyName().equals("cf1")) {
                    assertEquals(1, store.getStorefilesCount());
                } else {
                    assertEquals(expectedStoreFileCount, store.getStorefilesCount());
                }
            }
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    assertEquals(400 - 1, lastReplayed);
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, 400, cq, families);
    LOG.info("-- Verifying edits from primary. Ensuring that files are not deleted");
    verifyData(primaryRegion, 0, lastReplayed, cq, families);
    for (HStore store : primaryRegion.getStores()) {
        if (store.getColumnFamilyName().equals("cf1")) {
            assertEquals(1, store.getStorefilesCount());
        } else {
            assertEquals(expectedStoreFileCount, store.getStorefilesCount());
        }
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Aggregations

CompactionDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor)11 Path (org.apache.hadoop.fs.Path)8 ArrayList (java.util.ArrayList)4 Cell (org.apache.hadoop.hbase.Cell)4 WAL (org.apache.hadoop.hbase.wal.WAL)4 IOException (java.io.IOException)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)3 EOFException (java.io.EOFException)2 InterruptedIOException (java.io.InterruptedIOException)2 ParseException (java.text.ParseException)2 List (java.util.List)2 ByteBufferExtendedCell (org.apache.hadoop.hbase.ByteBufferExtendedCell)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)2 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)2 MonitoredTask (org.apache.hadoop.hbase.monitoring.MonitoredTask)2 BulkLoadDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor)2 RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)2 WALKey (org.apache.hadoop.hbase.wal.WALKey)2