Search in sources :

Example 1 with PrepareFlushResult

use of org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult in project hbase by apache.

the class TestSplitWalDataLoss method test.

@Test
public void test() throws IOException, InterruptedException {
    final HRegionServer rs = testUtil.getRSForFirstRegionInTable(tableName);
    final HRegion region = (HRegion) rs.getRegions(tableName).get(0);
    HRegion spiedRegion = spy(region);
    final MutableBoolean flushed = new MutableBoolean(false);
    final MutableBoolean reported = new MutableBoolean(false);
    doAnswer(new Answer<FlushResult>() {

        @Override
        public FlushResult answer(InvocationOnMock invocation) throws Throwable {
            synchronized (flushed) {
                flushed.setValue(true);
                flushed.notifyAll();
            }
            synchronized (reported) {
                while (!reported.booleanValue()) {
                    reported.wait();
                }
            }
            rs.getWAL(region.getRegionInfo()).abortCacheFlush(region.getRegionInfo().getEncodedNameAsBytes());
            throw new DroppedSnapshotException("testcase");
        }
    }).when(spiedRegion).internalFlushCacheAndCommit(Matchers.<WAL>any(), Matchers.<MonitoredTask>any(), Matchers.<PrepareFlushResult>any(), Matchers.<Collection<HStore>>any());
    // Find region key; don't pick up key for hbase:meta by mistake.
    String key = null;
    for (Map.Entry<String, HRegion> entry : rs.getOnlineRegions().entrySet()) {
        if (entry.getValue().getRegionInfo().getTable().equals(this.tableName)) {
            key = entry.getKey();
            break;
        }
    }
    rs.getOnlineRegions().put(key, spiedRegion);
    Connection conn = testUtil.getConnection();
    try (Table table = conn.getTable(tableName)) {
        table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0")));
    }
    long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family);
    LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore);
    assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM);
    rs.getMemStoreFlusher().requestFlush(spiedRegion, FlushLifeCycleTracker.DUMMY);
    synchronized (flushed) {
        while (!flushed.booleanValue()) {
            flushed.wait();
        }
    }
    try (Table table = conn.getTable(tableName)) {
        table.put(new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, Bytes.toBytes("val1")));
    }
    long now = EnvironmentEdgeManager.currentTime();
    rs.tryRegionServerReport(now - 500, now);
    synchronized (reported) {
        reported.setValue(true);
        reported.notifyAll();
    }
    while (testUtil.getRSForFirstRegionInTable(tableName) == rs) {
        Thread.sleep(100);
    }
    try (Table table = conn.getTable(tableName)) {
        Result result = table.get(new Get(Bytes.toBytes("row0")));
        assertArrayEquals(Bytes.toBytes("val0"), result.getValue(family, qualifier));
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) MutableBoolean(org.apache.commons.lang3.mutable.MutableBoolean) Connection(org.apache.hadoop.hbase.client.Connection) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushResult(org.apache.hadoop.hbase.regionserver.HRegion.FlushResult) Put(org.apache.hadoop.hbase.client.Put) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushResult(org.apache.hadoop.hbase.regionserver.HRegion.FlushResult) Result(org.apache.hadoop.hbase.client.Result) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) Test(org.junit.Test)

Example 2 with PrepareFlushResult

use of org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushCommitMarkerLargerThanFlushStartMarker.

/**
 * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker
 * larger than the previous flush start marker.
 */
@Test
public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOException {
    // load some data to primary and flush. 1 flush and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 100, 100);
    int numRows = 200;
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    FlushDescriptor startFlushDesc = null;
    FlushDescriptor commitFlushDesc = null;
    int lastReplayed = 0;
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                if (startFlushDesc == null) {
                    LOG.info("-- Replaying flush start in secondary");
                    startFlushDesc = flushDesc;
                    PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
                    assertNull(result.result);
                }
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                // do not replay any flush commit yet
                // hold on to the flush commit marker but simulate a larger
                // flush commit seqId
                commitFlushDesc = FlushDescriptor.newBuilder(flushDesc).setFlushSequenceNumber(flushDesc.getFlushSequenceNumber() + 50).build();
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    // at this point, there should be some data (rows 0-100) in memstore snapshot
    // and some more data in memstores (rows 100-200)
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // no store files in the region
    int expectedStoreFileCount = 0;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    // Test case 1: replay the a flush commit marker larger than what we have prepared
    LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" + startFlushDesc);
    assertTrue(commitFlushDesc.getFlushSequenceNumber() > startFlushDesc.getFlushSequenceNumber());
    LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc);
    secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc);
    // assert that the flush files are picked
    expectedStoreFileCount++;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
    MemStoreSize mss = store.getFlushableSize();
    // assert that the memstore is not dropped
    assertTrue(mss.getHeapSize() > 0);
    // assert that the region memstore is smaller than before, but not empty
    long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    assertTrue(newRegionMemstoreSize > 0);
    assertTrue(regionMemstoreSize > newRegionMemstoreSize);
    // prepare snapshot should be dropped
    assertNull(secondaryRegion.getPrepareFlushResult());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from primary.");
    verifyData(primaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 3 with PrepareFlushResult

use of org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushStartMarkers.

/**
 * Tests cases where we prepare a flush with some seqId and we receive other flush start markers
 * equal to, greater or less than the previous flush start marker.
 */
@Test
public void testReplayFlushStartMarkers() throws IOException {
    // load some data to primary and flush. 1 flush and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 100, 100);
    int numRows = 200;
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    FlushDescriptor startFlushDesc = null;
    int lastReplayed = 0;
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            // first verify that everything is replayed and visible before flush event replay
            HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
            long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
            long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
            MemStoreSize mss = store.getFlushableSize();
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                startFlushDesc = flushDesc;
                LOG.info("-- Replaying flush start in secondary");
                PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
                assertNull(result.result);
                assertEquals(result.flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
                assertTrue(regionMemstoreSize > 0);
                assertTrue(mss.getHeapSize() > 0);
                // assert that the store memstore is smaller now
                long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
                LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize));
                assertTrue(storeMemstoreSize > newStoreMemstoreSize);
                verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    // at this point, there should be some data (rows 0-100) in memstore snapshot
    // and some more data in memstores (rows 100-200)
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 1: replay the same flush start marker again
    LOG.info("-- Replaying same flush start in secondary again");
    PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
    // this should return null. Ignoring the flush start marker
    assertNull(result);
    // assert that we still have prepared flush with the previous setup.
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
    // memstore is not empty
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 2: replay a flush start marker with a smaller seqId
    FlushDescriptor startFlushDescSmallerSeqId = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() - 50);
    LOG.info("-- Replaying same flush start in secondary again " + startFlushDescSmallerSeqId);
    result = secondaryRegion.replayWALFlushStartMarker(startFlushDescSmallerSeqId);
    // this should return null. Ignoring the flush start marker
    assertNull(result);
    // assert that we still have prepared flush with the previous setup.
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
    // memstore is not empty
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 3: replay a flush start marker with a larger seqId
    FlushDescriptor startFlushDescLargerSeqId = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() + 50);
    LOG.info("-- Replaying same flush start in secondary again " + startFlushDescLargerSeqId);
    result = secondaryRegion.replayWALFlushStartMarker(startFlushDescLargerSeqId);
    // this should return null. Ignoring the flush start marker
    assertNull(result);
    // assert that we still have prepared flush with the previous setup.
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
    // memstore is not empty
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from primary.");
    verifyData(primaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 4 with PrepareFlushResult

use of org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushCommitMarkerSmallerThanFlushStartMarker.

/**
 * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker
 * less than the previous flush start marker.
 */
@Test
public void testReplayFlushCommitMarkerSmallerThanFlushStartMarker() throws IOException {
    // load some data to primary and flush. 2 flushes and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 200, 100);
    int numRows = 300;
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    FlushDescriptor startFlushDesc = null;
    FlushDescriptor commitFlushDesc = null;
    int lastReplayed = 0;
    while (true) {
        System.out.println(lastReplayed);
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                // don't replay the first flush start marker, hold on to it, replay the second one
                if (startFlushDesc == null) {
                    startFlushDesc = flushDesc;
                } else {
                    LOG.info("-- Replaying flush start in secondary");
                    startFlushDesc = flushDesc;
                    PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
                    assertNull(result.result);
                }
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                // do not replay any flush commit yet
                if (commitFlushDesc == null) {
                    // hold on to the first flush commit marker
                    commitFlushDesc = flushDesc;
                }
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    // at this point, there should be some data (rows 0-200) in memstore snapshot
    // and some more data in memstores (rows 200-300)
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // no store files in the region
    int expectedStoreFileCount = 0;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    // Test case 1: replay the a flush commit marker smaller than what we have prepared
    LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" + startFlushDesc);
    assertTrue(commitFlushDesc.getFlushSequenceNumber() < startFlushDesc.getFlushSequenceNumber());
    LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc);
    secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc);
    // assert that the flush files are picked
    expectedStoreFileCount++;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
    MemStoreSize mss = store.getFlushableSize();
    // assert that the memstore is not dropped
    assertTrue(mss.getHeapSize() > 0);
    // assert that the region memstore is same as before
    long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    assertEquals(regionMemstoreSize, newRegionMemstoreSize);
    // not dropped
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from primary.");
    verifyData(primaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 5 with PrepareFlushResult

use of org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushesAndCompactions.

@Test
public void testReplayFlushesAndCompactions() throws IOException {
    // initiate a secondary region with some data.
    // load some data to primary and flush. 3 flushes and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 300, 100);
    // compaction from primary
    LOG.info("-- Compacting primary, only 1 store");
    primaryRegion.compactStore(Bytes.toBytes("cf1"), NoLimitThroughputController.INSTANCE);
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    int lastReplayed = 0;
    int expectedStoreFileCount = 0;
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        CompactionDescriptor compactionDesc = WALEdit.getCompaction(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            // first verify that everything is replayed and visible before flush event replay
            verifyData(secondaryRegion, 0, lastReplayed, cq, families);
            HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
            long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
            long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
            MemStoreSize mss = store.getFlushableSize();
            long storeSize = store.getSize();
            long storeSizeUncompressed = store.getStoreSizeUncompressed();
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                LOG.info("-- Replaying flush start in secondary");
                PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
                assertNull(result.result);
                assertEquals(result.flushOpSeqId, flushDesc.getFlushSequenceNumber());
                // assert that the store memstore is smaller now
                long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
                LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize));
                assertTrue(storeMemstoreSize > newStoreMemstoreSize);
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                LOG.info("-- Replaying flush commit in secondary");
                secondaryRegion.replayWALFlushCommitMarker(flushDesc);
                // assert that the flush files are picked
                expectedStoreFileCount++;
                for (HStore s : secondaryRegion.getStores()) {
                    assertEquals(expectedStoreFileCount, s.getStorefilesCount());
                }
                MemStoreSize newMss = store.getFlushableSize();
                assertTrue(mss.getHeapSize() > newMss.getHeapSize());
                // assert that the region memstore is smaller now
                long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
                assertTrue(regionMemstoreSize > newRegionMemstoreSize);
                // assert that the store sizes are bigger
                assertTrue(store.getSize() > storeSize);
                assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed);
                assertEquals(store.getSize(), store.getStorefilesSize());
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else if (compactionDesc != null) {
            secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, Long.MAX_VALUE);
            // assert that the compaction is applied
            for (HStore store : secondaryRegion.getStores()) {
                if (store.getColumnFamilyName().equals("cf1")) {
                    assertEquals(1, store.getStorefilesCount());
                } else {
                    assertEquals(expectedStoreFileCount, store.getStorefilesCount());
                }
            }
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    assertEquals(400 - 1, lastReplayed);
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, 400, cq, families);
    LOG.info("-- Verifying edits from primary. Ensuring that files are not deleted");
    verifyData(primaryRegion, 0, lastReplayed, cq, families);
    for (HStore store : primaryRegion.getStores()) {
        if (store.getColumnFamilyName().equals("cf1")) {
            assertEquals(1, store.getStorefilesCount());
        } else {
            assertEquals(expectedStoreFileCount, store.getStorefilesCount());
        }
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Aggregations

PrepareFlushResult (org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult)5 Test (org.junit.Test)5 FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)4 StoreFlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)4 WAL (org.apache.hadoop.hbase.wal.WAL)4 Map (java.util.Map)1 MutableBoolean (org.apache.commons.lang3.mutable.MutableBoolean)1 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)1 Connection (org.apache.hadoop.hbase.client.Connection)1 Get (org.apache.hadoop.hbase.client.Get)1 Put (org.apache.hadoop.hbase.client.Put)1 Result (org.apache.hadoop.hbase.client.Result)1 Table (org.apache.hadoop.hbase.client.Table)1 FlushResult (org.apache.hadoop.hbase.regionserver.HRegion.FlushResult)1 CompactionDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor)1 InvocationOnMock (org.mockito.invocation.InvocationOnMock)1