Search in sources :

Example 11 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent.

/**
 * Tests whether edits coming in for replay are skipped which have smaller seq id than the seqId
 * of the last replayed region open event.
 */
@Test
public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOException {
    putDataWithFlushes(primaryRegion, 100, 100, 0);
    int numRows = 100;
    // close the region and open again.
    primaryRegion.close();
    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    List<RegionEventDescriptor> regionEvents = Lists.newArrayList();
    List<WAL.Entry> edits = Lists.newArrayList();
    LOG.info("-- Replaying edits and region events in secondary");
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        RegionEventDescriptor regionEventDesc = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
        // don't replay flushes
        } else if (regionEventDesc != null) {
            regionEvents.add(regionEventDesc);
        } else {
            edits.add(entry);
        }
    }
    // replay the region open of first open, but with the seqid of the second open
    // this way non of the flush files will be picked up.
    secondaryRegion.replayWALRegionEventMarker(RegionEventDescriptor.newBuilder(regionEvents.get(0)).setLogSequenceNumber(regionEvents.get(2).getLogSequenceNumber()).build());
    // skip these the following verification will NOT fail.
    for (WAL.Entry entry : edits) {
        replayEdit(secondaryRegion, entry);
    }
    boolean expectedFail = false;
    try {
        verifyData(secondaryRegion, 0, numRows, cq, families);
    } catch (AssertionError e) {
        // expected
        expectedFail = true;
    }
    if (!expectedFail) {
        fail("Should have failed this verification");
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 12 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushCommitMarkerLargerThanFlushStartMarker.

/**
 * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker
 * larger than the previous flush start marker.
 */
@Test
public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOException {
    // load some data to primary and flush. 1 flush and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 100, 100);
    int numRows = 200;
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    FlushDescriptor startFlushDesc = null;
    FlushDescriptor commitFlushDesc = null;
    int lastReplayed = 0;
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                if (startFlushDesc == null) {
                    LOG.info("-- Replaying flush start in secondary");
                    startFlushDesc = flushDesc;
                    PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
                    assertNull(result.result);
                }
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                // do not replay any flush commit yet
                // hold on to the flush commit marker but simulate a larger
                // flush commit seqId
                commitFlushDesc = FlushDescriptor.newBuilder(flushDesc).setFlushSequenceNumber(flushDesc.getFlushSequenceNumber() + 50).build();
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    // at this point, there should be some data (rows 0-100) in memstore snapshot
    // and some more data in memstores (rows 100-200)
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // no store files in the region
    int expectedStoreFileCount = 0;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    // Test case 1: replay the a flush commit marker larger than what we have prepared
    LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" + startFlushDesc);
    assertTrue(commitFlushDesc.getFlushSequenceNumber() > startFlushDesc.getFlushSequenceNumber());
    LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc);
    secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc);
    // assert that the flush files are picked
    expectedStoreFileCount++;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
    MemStoreSize mss = store.getFlushableSize();
    // assert that the memstore is not dropped
    assertTrue(mss.getHeapSize() > 0);
    // assert that the region memstore is smaller than before, but not empty
    long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    assertTrue(newRegionMemstoreSize > 0);
    assertTrue(regionMemstoreSize > newRegionMemstoreSize);
    // prepare snapshot should be dropped
    assertNull(secondaryRegion.getPrepareFlushResult());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from primary.");
    verifyData(primaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 13 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushStartMarkers.

/**
 * Tests cases where we prepare a flush with some seqId and we receive other flush start markers
 * equal to, greater or less than the previous flush start marker.
 */
@Test
public void testReplayFlushStartMarkers() throws IOException {
    // load some data to primary and flush. 1 flush and some more unflushed data
    putDataWithFlushes(primaryRegion, 100, 100, 100);
    int numRows = 200;
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    FlushDescriptor startFlushDesc = null;
    int lastReplayed = 0;
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            // first verify that everything is replayed and visible before flush event replay
            HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
            long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
            long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
            MemStoreSize mss = store.getFlushableSize();
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                startFlushDesc = flushDesc;
                LOG.info("-- Replaying flush start in secondary");
                PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
                assertNull(result.result);
                assertEquals(result.flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
                assertTrue(regionMemstoreSize > 0);
                assertTrue(mss.getHeapSize() > 0);
                // assert that the store memstore is smaller now
                long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
                LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize));
                assertTrue(storeMemstoreSize > newStoreMemstoreSize);
                verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
            }
            // after replay verify that everything is still visible
            verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families);
        } else {
            lastReplayed = replayEdit(secondaryRegion, entry);
        }
    }
    // at this point, there should be some data (rows 0-100) in memstore snapshot
    // and some more data in memstores (rows 100-200)
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 1: replay the same flush start marker again
    LOG.info("-- Replaying same flush start in secondary again");
    PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc);
    // this should return null. Ignoring the flush start marker
    assertNull(result);
    // assert that we still have prepared flush with the previous setup.
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
    // memstore is not empty
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 2: replay a flush start marker with a smaller seqId
    FlushDescriptor startFlushDescSmallerSeqId = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() - 50);
    LOG.info("-- Replaying same flush start in secondary again " + startFlushDescSmallerSeqId);
    result = secondaryRegion.replayWALFlushStartMarker(startFlushDescSmallerSeqId);
    // this should return null. Ignoring the flush start marker
    assertNull(result);
    // assert that we still have prepared flush with the previous setup.
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
    // memstore is not empty
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 3: replay a flush start marker with a larger seqId
    FlushDescriptor startFlushDescLargerSeqId = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() + 50);
    LOG.info("-- Replaying same flush start in secondary again " + startFlushDescLargerSeqId);
    result = secondaryRegion.replayWALFlushStartMarker(startFlushDescLargerSeqId);
    // this should return null. Ignoring the flush start marker
    assertNull(result);
    // assert that we still have prepared flush with the previous setup.
    assertNotNull(secondaryRegion.getPrepareFlushResult());
    assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber());
    // memstore is not empty
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from primary.");
    verifyData(primaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 14 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class HRegion method internalFlushCacheAndCommit.

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", justification = "Intentional; notify is about completed flush")
protected FlushResult internalFlushCacheAndCommit(final WAL wal, MonitoredTask status, final PrepareFlushResult prepareResult, final Collection<Store> storesToFlush) throws IOException {
    // prepare flush context is carried via PrepareFlushResult
    TreeMap<byte[], StoreFlushContext> storeFlushCtxs = prepareResult.storeFlushCtxs;
    TreeMap<byte[], List<Path>> committedFiles = prepareResult.committedFiles;
    long startTime = prepareResult.startTime;
    long flushOpSeqId = prepareResult.flushOpSeqId;
    long flushedSeqId = prepareResult.flushedSeqId;
    String s = "Flushing stores of " + this;
    status.setStatus(s);
    if (LOG.isTraceEnabled())
        LOG.trace(s);
    // Any failure from here on out will be catastrophic requiring server
    // restart so wal content can be replayed and put back into the memstore.
    // Otherwise, the snapshot content while backed up in the wal, it will not
    // be part of the current running servers state.
    boolean compactionRequested = false;
    long flushedOutputFileSize = 0;
    try {
        for (StoreFlushContext flush : storeFlushCtxs.values()) {
            flush.flushCache(status);
        }
        // Switch snapshot (in memstore) -> new hfile (thus causing
        // all the store scanners to reset/reseek).
        Iterator<Store> it = storesToFlush.iterator();
        // stores.values() and storeFlushCtxs have same order
        for (StoreFlushContext flush : storeFlushCtxs.values()) {
            boolean needsCompaction = flush.commit(status);
            if (needsCompaction) {
                compactionRequested = true;
            }
            byte[] storeName = it.next().getFamily().getName();
            List<Path> storeCommittedFiles = flush.getCommittedFiles();
            committedFiles.put(storeName, storeCommittedFiles);
            // Flush committed no files, indicating flush is empty or flush was canceled
            if (storeCommittedFiles == null || storeCommittedFiles.isEmpty()) {
                MemstoreSize storeFlushableSize = prepareResult.storeFlushableSize.get(storeName);
                prepareResult.totalFlushableSize.decMemstoreSize(storeFlushableSize);
            }
            flushedOutputFileSize += flush.getOutputFileSize();
        }
        storeFlushCtxs.clear();
        // Set down the memstore size by amount of flush.
        this.decrMemstoreSize(prepareResult.totalFlushableSize);
        if (wal != null) {
            // write flush marker to WAL. If fail, we should throw DroppedSnapshotException
            FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles);
            WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, true, mvcc);
        }
    } catch (Throwable t) {
        // all and sundry.
        if (wal != null) {
            try {
                FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles);
                WALUtil.writeFlushMarker(wal, this.replicationScope, getRegionInfo(), desc, false, mvcc);
            } catch (Throwable ex) {
                LOG.warn(getRegionInfo().getEncodedName() + " : " + "failed writing ABORT_FLUSH marker to WAL", ex);
            // ignore this since we will be aborting the RS with DSE.
            }
            wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
        }
        DroppedSnapshotException dse = new DroppedSnapshotException("region: " + Bytes.toStringBinary(getRegionInfo().getRegionName()));
        dse.initCause(t);
        status.abort("Flush failed: " + StringUtils.stringifyException(t));
        // Callers for flushcache() should catch DroppedSnapshotException and abort the region server.
        // However, since we may have the region read lock, we cannot call close(true) here since
        // we cannot promote to a write lock. Instead we are setting closing so that all other region
        // operations except for close will be rejected.
        this.closing.set(true);
        if (rsServices != null) {
            // This is a safeguard against the case where the caller fails to explicitly handle aborting
            rsServices.abort("Replay of WAL required. Forcing server shutdown", dse);
        }
        throw dse;
    }
    // If we get to here, the HStores have been written.
    if (wal != null) {
        wal.completeCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
    }
    // Record latest flush time
    for (Store store : storesToFlush) {
        this.lastStoreFlushTimeMap.put(store, startTime);
    }
    this.maxFlushedSeqId = flushedSeqId;
    this.lastFlushOpSeqId = flushOpSeqId;
    // e.g. checkResources().
    synchronized (this) {
        // FindBugs NN_NAKED_NOTIFY
        notifyAll();
    }
    long time = EnvironmentEdgeManager.currentTime() - startTime;
    long memstoresize = this.memstoreDataSize.get();
    String msg = "Finished memstore flush of ~" + StringUtils.byteDesc(prepareResult.totalFlushableSize.getDataSize()) + "/" + prepareResult.totalFlushableSize.getDataSize() + ", currentsize=" + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + " for region " + this + " in " + time + "ms, sequenceid=" + flushOpSeqId + ", compaction requested=" + compactionRequested + ((wal == null) ? "; wal=null" : "");
    LOG.info(msg);
    status.setStatus(msg);
    if (rsServices != null && rsServices.getMetrics() != null) {
        rsServices.getMetrics().updateFlush(time - startTime, prepareResult.totalFlushableSize.getDataSize(), flushedOutputFileSize);
    }
    return new FlushResultImpl(compactionRequested ? FlushResult.Result.FLUSHED_COMPACTION_NEEDED : FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId);
}
Also used : Path(org.apache.hadoop.fs.Path) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List)

Example 15 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testRefreshStoreFiles.

@Test
public void testRefreshStoreFiles() throws IOException {
    assertEquals(0, primaryRegion.getStoreFileList(families).size());
    assertEquals(0, secondaryRegion.getStoreFileList(families).size());
    // Test case 1: refresh with an empty region
    secondaryRegion.refreshStoreFiles();
    assertEquals(0, secondaryRegion.getStoreFileList(families).size());
    // do one flush
    putDataWithFlushes(primaryRegion, 100, 100, 0);
    int numRows = 100;
    // refresh the store file list, and ensure that the files are picked up.
    secondaryRegion.refreshStoreFiles();
    assertPathListsEqual(primaryRegion.getStoreFileList(families), secondaryRegion.getStoreFileList(families));
    assertEquals(families.length, secondaryRegion.getStoreFileList(families).size());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 2: 3 some more flushes
    putDataWithFlushes(primaryRegion, 100, 300, 0);
    numRows = 300;
    // refresh the store file list, and ensure that the files are picked up.
    secondaryRegion.refreshStoreFiles();
    assertPathListsEqual(primaryRegion.getStoreFileList(families), secondaryRegion.getStoreFileList(families));
    assertEquals(families.length * 4, secondaryRegion.getStoreFileList(families).size());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    if (FSUtils.WINDOWS) {
        // compaction cannot move files while they are open in secondary on windows. Skip remaining.
        return;
    }
    // Test case 3: compact primary files
    primaryRegion.compactStores();
    List<Region> regions = new ArrayList<>();
    regions.add(primaryRegion);
    when(rss.getOnlineRegions()).thenReturn(regions);
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, rss, false);
    cleaner.chore();
    secondaryRegion.refreshStoreFiles();
    assertPathListsEqual(primaryRegion.getStoreFileList(families), secondaryRegion.getStoreFileList(families));
    assertEquals(families.length, secondaryRegion.getStoreFileList(families).size());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Replaying edits in secondary");
    // Test case 4: replay some edits, ensure that memstore is dropped.
    assertTrue(secondaryRegion.getMemstoreSize() == 0);
    putDataWithFlushes(primaryRegion, 400, 400, 0);
    numRows = 400;
    reader = createWALReaderForPrimary();
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flush != null) {
        // do not replay flush
        } else {
            replayEdit(secondaryRegion, entry);
        }
    }
    assertTrue(secondaryRegion.getMemstoreSize() > 0);
    secondaryRegion.refreshStoreFiles();
    assertTrue(secondaryRegion.getMemstoreSize() == 0);
    LOG.info("-- Verifying edits from primary");
    verifyData(primaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) ArrayList(java.util.ArrayList) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Aggregations

FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)30 StoreFlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)24 Test (org.junit.Test)19 WAL (org.apache.hadoop.hbase.wal.WAL)17 ArrayList (java.util.ArrayList)11 List (java.util.List)10 Path (org.apache.hadoop.fs.Path)8 IOException (java.io.IOException)6 HashMap (java.util.HashMap)5 Map (java.util.Map)5 TreeMap (java.util.TreeMap)5 RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)5 InterruptedIOException (java.io.InterruptedIOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 Cell (org.apache.hadoop.hbase.Cell)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 PrepareFlushResult (org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)3