Search in sources :

Example 6 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class HRegion method replayWALMetaEdit.

/**
 * Replay the meta edits, i.e, flush marker, compaction marker, bulk load marker, region event
 * marker, etc.
 * <p/>
 * For all events other than start flush, we will just call {@link #refreshStoreFiles()} as the
 * logic is straight-forward and robust. For start flush, we need to snapshot the memstore, so
 * later {@link #refreshStoreFiles()} call could drop the snapshot, otherwise we may run out of
 * memory.
 */
private void replayWALMetaEdit(Cell cell) throws IOException {
    startRegionOperation(Operation.REPLAY_EVENT);
    try {
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
        if (flushDesc != null) {
            switch(flushDesc.getAction()) {
                case START_FLUSH:
                    // for start flush, we need to take a snapshot of the current memstore
                    synchronized (writestate) {
                        if (!writestate.flushing) {
                            this.writestate.flushing = true;
                        } else {
                            // usually this should not happen but let's make the code more robust, it is not a
                            // big deal to just ignore it, the refreshStoreFiles call should have the ability to
                            // clean up the inconsistent state.
                            LOG.debug("NOT flushing {} as already flushing", getRegionInfo());
                            break;
                        }
                    }
                    MonitoredTask status = TaskMonitor.get().createStatus("Preparing flush " + getRegionInfo());
                    Collection<HStore> storesToFlush = getStoresToFlush(flushDesc);
                    try {
                        PrepareFlushResult prepareResult = internalPrepareFlushCache(null, flushDesc.getFlushSequenceNumber(), storesToFlush, status, false, FlushLifeCycleTracker.DUMMY);
                        if (prepareResult.result == null) {
                            // save the PrepareFlushResult so that we can use it later from commit flush
                            this.prepareFlushResult = prepareResult;
                            status.markComplete("Flush prepare successful");
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("{} prepared flush with seqId: {}", getRegionInfo(), flushDesc.getFlushSequenceNumber());
                            }
                        } else {
                            // since our memstore is empty, but the primary is still flushing
                            if (prepareResult.getResult().getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) {
                                this.prepareFlushResult = prepareResult;
                                if (LOG.isDebugEnabled()) {
                                    LOG.debug("{} prepared empty flush with seqId: {}", getRegionInfo(), flushDesc.getFlushSequenceNumber());
                                }
                            }
                            status.abort("Flush prepare failed with " + prepareResult.result);
                        // nothing much to do. prepare flush failed because of some reason.
                        }
                    } finally {
                        status.cleanup();
                    }
                    break;
                case ABORT_FLUSH:
                    // up the memstore.
                    synchronized (writestate) {
                        writestate.flushing = false;
                    }
                    break;
                case COMMIT_FLUSH:
                case CANNOT_FLUSH:
                    // just call refreshStoreFiles
                    refreshStoreFiles();
                    logRegionFiles();
                    synchronized (writestate) {
                        writestate.flushing = false;
                    }
                    break;
                default:
                    LOG.warn("{} received a flush event with unknown action: {}", getRegionInfo(), TextFormat.shortDebugString(flushDesc));
            }
        } else {
            // for all other region events, we will do a refreshStoreFiles
            refreshStoreFiles();
            logRegionFiles();
        }
    } finally {
        closeRegionOperation(Operation.REPLAY_EVENT);
    }
}
Also used : FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask)

Example 7 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestRegionReplicationSink method testAddToFailedReplica.

@Test
public void testAddToFailedReplica() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(5).collect(Collectors.toList());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    ServerCall<?> rpcCall1 = mock(ServerCall.class);
    WALKeyImpl key1 = mock(WALKeyImpl.class);
    when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
    when(key1.getSequenceId()).thenReturn(1L);
    WALEdit edit1 = mock(WALEdit.class);
    when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key1, edit1, rpcCall1);
    ServerCall<?> rpcCall2 = mock(ServerCall.class);
    WALKeyImpl key2 = mock(WALKeyImpl.class);
    when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
    when(key2.getSequenceId()).thenReturn(1L);
    WALEdit edit2 = mock(WALEdit.class);
    when(edit2.estimatedSerializedSizeOf()).thenReturn(2000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key2, edit2, rpcCall2);
    // fail the call to replica 2
    futures.get(0).complete(null);
    futures.get(1).completeExceptionally(new IOException("inject error"));
    // we should only call replicate once for edit2, since replica 2 is marked as failed
    verify(conn, times(3)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    futures.get(2).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
    ServerCall<?> rpcCall3 = mock(ServerCall.class);
    WALKeyImpl key3 = mock(WALKeyImpl.class);
    when(key3.estimatedSerializedSizeOf()).thenReturn(200L);
    when(key3.getSequenceId()).thenReturn(3L);
    Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
        throw new IllegalStateException();
    }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
    FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
    WALEdit edit3 = WALEdit.createFlushWALEdit(primary, fd);
    sink.add(key3, edit3, rpcCall3);
    // the flush marker should have cleared the failedReplicas, so we will send the edit to 2
    // replicas again
    verify(conn, times(5)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    futures.get(3).complete(null);
    futures.get(4).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) Arrays(java.util.Arrays) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) MutableInt(org.apache.commons.lang3.mutable.MutableInt) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) RegionServerTests(org.apache.hadoop.hbase.testclassification.RegionServerTests) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Path(org.apache.hadoop.fs.Path) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ClassRule(org.junit.ClassRule) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) Mockito.when(org.mockito.Mockito.when) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Mockito.verify(org.mockito.Mockito.verify) Mockito.never(org.mockito.Mockito.never) List(java.util.List) Stream(java.util.stream.Stream) Rule(org.junit.Rule) TreeMap(java.util.TreeMap) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TableNameTestRule(org.apache.hadoop.hbase.TableNameTestRule) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Assert.assertEquals(org.junit.Assert.assertEquals) Mockito.mock(org.mockito.Mockito.mock) IOException(java.io.IOException) TreeMap(java.util.TreeMap) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) List(java.util.List) Test(org.junit.Test)

Example 8 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegion method testFlushMarkers.

@Test
public void testFlushMarkers() throws Exception {
    // tests that flush markers are written to WAL and handled at recovered edits
    byte[] family = Bytes.toBytes("family");
    Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
    CommonFSUtils.setRootDir(walConf, logDir);
    final WALFactory wals = new WALFactory(walConf, method);
    final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build());
    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        long maxSeqId = 3;
        long minSeqId = 0;
        for (long i = minSeqId; i < maxSeqId; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
            region.put(put);
            region.flush(true);
        }
        // this will create a region with 3 files from flush
        assertEquals(3, region.getStore(family).getStorefilesCount());
        List<String> storeFiles = new ArrayList<>(3);
        for (HStoreFile sf : region.getStore(family).getStorefiles()) {
            storeFiles.add(sf.getPath().getName());
        }
        // now verify that the flush markers are written
        wal.shutdown();
        WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal), TEST_UTIL.getConfiguration());
        try {
            List<WAL.Entry> flushDescriptors = new ArrayList<>();
            long lastFlushSeqId = -1;
            while (true) {
                WAL.Entry entry = reader.next();
                if (entry == null) {
                    break;
                }
                Cell cell = entry.getEdit().getCells().get(0);
                if (WALEdit.isMetaEditFamily(cell)) {
                    FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
                    assertNotNull(flushDesc);
                    assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
                    if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                        assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
                    } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                        assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
                    }
                    lastFlushSeqId = flushDesc.getFlushSequenceNumber();
                    assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
                    // only one store
                    assertEquals(1, flushDesc.getStoreFlushesCount());
                    StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
                    assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
                    assertEquals("family", storeFlushDesc.getStoreHomeDir());
                    if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                        assertEquals(0, storeFlushDesc.getFlushOutputCount());
                    } else {
                        // only one file from flush
                        assertEquals(1, storeFlushDesc.getFlushOutputCount());
                        assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
                    }
                    flushDescriptors.add(entry);
                }
            }
            // START_FLUSH and COMMIT_FLUSH per flush
            assertEquals(3 * 2, flushDescriptors.size());
            // now write those markers to the recovered edits again.
            Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            for (WAL.Entry entry : flushDescriptors) {
                writer.append(entry);
            }
            writer.close();
        } finally {
            if (null != reader) {
                try {
                    reader.close();
                } catch (IOException exception) {
                    LOG.warn("Problem closing wal: " + exception.getMessage());
                    LOG.debug("exception details", exception);
                }
            }
        }
        // close the region now, and reopen again
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        region = HRegion.openHRegion(region, null);
        // now check whether we have can read back the data from region
        for (long i = minSeqId; i < maxSeqId; i++) {
            Get get = new Get(Bytes.toBytes(i));
            Result result = region.get(get);
            byte[] value = result.getValue(family, Bytes.toBytes(i));
            assertArrayEquals(Bytes.toBytes(i), value);
        }
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Put(org.apache.hadoop.hbase.client.Put) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) Test(org.junit.Test)

Example 9 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testReplayFlushSeqIds.

@Test
public void testReplayFlushSeqIds() throws IOException {
    // load some data to primary and flush
    int start = 0;
    LOG.info("-- Writing some data to primary from " + start + " to " + (start + 100));
    putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
    LOG.info("-- Flushing primary, creating 3 files for 3 stores");
    primaryRegion.flush(true);
    // now replay the flush marker
    reader = createWALReaderForPrimary();
    long flushSeqId = -1;
    LOG.info("-- Replaying flush events in secondary");
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                LOG.info("-- Replaying flush start in secondary");
                secondaryRegion.replayWALFlushStartMarker(flushDesc);
                flushSeqId = flushDesc.getFlushSequenceNumber();
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                LOG.info("-- Replaying flush commit in secondary");
                secondaryRegion.replayWALFlushCommitMarker(flushDesc);
                assertEquals(flushSeqId, flushDesc.getFlushSequenceNumber());
            }
        }
    // else do not replay
    }
    // TODO: what to do with this?
    // assert that the newly picked up flush file is visible
    long readPoint = secondaryRegion.getMVCC().getReadPoint();
    assertEquals(flushSeqId, readPoint);
    // after replay verify that everything is still visible
    verifyData(secondaryRegion, 0, 100, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 10 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testRefresStoreFiles.

@Test
public void testRefresStoreFiles() throws IOException {
    assertEquals(0, primaryRegion.getStoreFileList(families).size());
    assertEquals(0, secondaryRegion.getStoreFileList(families).size());
    // Test case 1: refresh with an empty region
    secondaryRegion.refreshStoreFiles();
    assertEquals(0, secondaryRegion.getStoreFileList(families).size());
    // do one flush
    putDataWithFlushes(primaryRegion, 100, 100, 0);
    int numRows = 100;
    // refresh the store file list, and ensure that the files are picked up.
    secondaryRegion.refreshStoreFiles();
    assertPathListsEqual(primaryRegion.getStoreFileList(families), secondaryRegion.getStoreFileList(families));
    assertEquals(families.length, secondaryRegion.getStoreFileList(families).size());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    // Test case 2: 3 some more flushes
    putDataWithFlushes(primaryRegion, 100, 300, 0);
    numRows = 300;
    // refresh the store file list, and ensure that the files are picked up.
    secondaryRegion.refreshStoreFiles();
    assertPathListsEqual(primaryRegion.getStoreFileList(families), secondaryRegion.getStoreFileList(families));
    assertEquals(families.length * 4, secondaryRegion.getStoreFileList(families).size());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    if (FSUtils.WINDOWS) {
        // compaction cannot move files while they are open in secondary on windows. Skip remaining.
        return;
    }
    // Test case 3: compact primary files
    primaryRegion.compactStores();
    List<HRegion> regions = new ArrayList<>();
    regions.add(primaryRegion);
    Mockito.doReturn(regions).when(rss).getRegions();
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, rss, false);
    cleaner.chore();
    secondaryRegion.refreshStoreFiles();
    assertPathListsEqual(primaryRegion.getStoreFileList(families), secondaryRegion.getStoreFileList(families));
    assertEquals(families.length, secondaryRegion.getStoreFileList(families).size());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Replaying edits in secondary");
    // Test case 4: replay some edits, ensure that memstore is dropped.
    assertTrue(secondaryRegion.getMemStoreDataSize() == 0);
    putDataWithFlushes(primaryRegion, 400, 400, 0);
    numRows = 400;
    reader = createWALReaderForPrimary();
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flush != null) {
        // do not replay flush
        } else {
            replayEdit(secondaryRegion, entry);
        }
    }
    assertTrue(secondaryRegion.getMemStoreDataSize() > 0);
    secondaryRegion.refreshStoreFiles();
    assertTrue(secondaryRegion.getMemStoreDataSize() == 0);
    LOG.info("-- Verifying edits from primary");
    verifyData(primaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) ArrayList(java.util.ArrayList) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Aggregations

FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)30 StoreFlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)24 Test (org.junit.Test)19 WAL (org.apache.hadoop.hbase.wal.WAL)17 ArrayList (java.util.ArrayList)11 List (java.util.List)10 Path (org.apache.hadoop.fs.Path)8 IOException (java.io.IOException)6 HashMap (java.util.HashMap)5 Map (java.util.Map)5 TreeMap (java.util.TreeMap)5 RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)5 InterruptedIOException (java.io.InterruptedIOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 Cell (org.apache.hadoop.hbase.Cell)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 PrepareFlushResult (org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)3