Search in sources :

Example 26 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testWriteFlushRequestMarker.

/**
 * Tests the case where a request for flush cache is sent to the region, but region cannot flush.
 * It should write the flush request marker instead.
 */
@Test
public void testWriteFlushRequestMarker() throws IOException {
    // primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false
    FlushResultImpl result = primaryRegion.flushcache(true, false, FlushLifeCycleTracker.DUMMY);
    assertNotNull(result);
    assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result);
    assertFalse(result.wroteFlushWalMarker);
    // request flush again, but this time with writeFlushRequestWalMarker = true
    result = primaryRegion.flushcache(true, true, FlushLifeCycleTracker.DUMMY);
    assertNotNull(result);
    assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result);
    assertTrue(result.wroteFlushWalMarker);
    List<FlushDescriptor> flushes = Lists.newArrayList();
    reader = createWALReaderForPrimary();
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flush != null) {
            flushes.add(flush);
        }
    }
    assertEquals(1, flushes.size());
    assertNotNull(flushes.get(0));
    assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction());
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) FlushResultImpl(org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl) Test(org.junit.Test)

Example 27 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testReplayRegionOpenEvent.

/**
 * Tests replaying region open markers from primary region. Checks whether the files are picked up
 */
@Test
public void testReplayRegionOpenEvent() throws IOException {
    // no flush
    putDataWithFlushes(primaryRegion, 100, 0, 100);
    int numRows = 100;
    // close the region and open again.
    primaryRegion.close();
    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    List<RegionEventDescriptor> regionEvents = Lists.newArrayList();
    LOG.info("-- Replaying edits and region events in secondary");
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        RegionEventDescriptor regionEventDesc = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
        // don't replay flush events
        } else if (regionEventDesc != null) {
            regionEvents.add(regionEventDesc);
        } else {
        // don't replay edits
        }
    }
    // we should have 1 open, 1 close and 1 open event
    assertEquals(3, regionEvents.size());
    // replay the first region open event.
    secondaryRegion.replayWALRegionEventMarker(regionEvents.get(0));
    // replay the close event as well
    secondaryRegion.replayWALRegionEventMarker(regionEvents.get(1));
    // no store files in the region
    int expectedStoreFileCount = 0;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    assertTrue(regionMemstoreSize == 0);
    // now replay the region open event that should contain new file locations
    LOG.info("Testing replaying region open event " + regionEvents.get(2));
    secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2));
    // assert that the flush files are picked
    expectedStoreFileCount++;
    for (HStore s : secondaryRegion.getStores()) {
        assertEquals(expectedStoreFileCount, s.getStorefilesCount());
    }
    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
    MemStoreSize mss = store.getFlushableSize();
    assertTrue(mss.getHeapSize() == MutableSegment.DEEP_OVERHEAD);
    // assert that the region memstore is empty
    long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
    assertTrue(newRegionMemstoreSize == 0);
    // prepare snapshot should be dropped if any
    assertNull(secondaryRegion.getPrepareFlushResult());
    LOG.info("-- Verifying edits from secondary");
    verifyData(secondaryRegion, 0, numRows, cq, families);
    LOG.info("-- Verifying edits from primary.");
    verifyData(primaryRegion, 0, numRows, cq, families);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 28 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegion method testFlushMarkersWALFail.

@Test
public void testFlushMarkersWALFail() throws Exception {
    // test the cases where the WAL append for flush markers fail.
    byte[] family = Bytes.toBytes("family");
    // spy an actual WAL implementation to throw exception (was not able to mock)
    Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + "log");
    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
    CommonFSUtils.setRootDir(walConf, logDir);
    // Make up a WAL that we can manipulate at append time.
    class FailAppendFlushMarkerWAL extends FSHLog {

        volatile FlushAction[] flushActions = null;

        public FailAppendFlushMarkerWAL(FileSystem fs, Path root, String logDir, Configuration conf) throws IOException {
            super(fs, root, logDir, conf);
        }

        @Override
        protected Writer createWriterInstance(Path path) throws IOException {
            final Writer w = super.createWriterInstance(path);
            return new Writer() {

                @Override
                public void close() throws IOException {
                    w.close();
                }

                @Override
                public void sync(boolean forceSync) throws IOException {
                    w.sync(forceSync);
                }

                @Override
                public void append(Entry entry) throws IOException {
                    List<Cell> cells = entry.getEdit().getCells();
                    if (WALEdit.isMetaEditFamily(cells.get(0))) {
                        FlushDescriptor desc = WALEdit.getFlushDescriptor(cells.get(0));
                        if (desc != null) {
                            for (FlushAction flushAction : flushActions) {
                                if (desc.getAction().equals(flushAction)) {
                                    throw new IOException("Failed to append flush marker! " + flushAction);
                                }
                            }
                        }
                    }
                    w.append(entry);
                }

                @Override
                public long getLength() {
                    return w.getLength();
                }

                @Override
                public long getSyncedLength() {
                    return w.getSyncedLength();
                }
            };
        }
    }
    FailAppendFlushMarkerWAL wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), CommonFSUtils.getRootDir(walConf), method, walConf);
    wal.init();
    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
    int i = 0;
    Put put = new Put(Bytes.toBytes(i));
    // have to skip mocked wal
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
    region.put(put);
    // 1. Test case where START_FLUSH throws exception
    wal.flushActions = new FlushAction[] { FlushAction.START_FLUSH };
    // start cache flush will throw exception
    try {
        region.flush(true);
        fail("This should have thrown exception");
    } catch (DroppedSnapshotException unexpected) {
        // this should not be a dropped snapshot exception. Meaning that RS will not abort
        throw unexpected;
    } catch (IOException expected) {
    // expected
    }
    // The WAL is hosed now. It has two edits appended. We cannot roll the log without it
    // throwing a DroppedSnapshotException to force an abort. Just clean up the mess.
    region.close(true);
    wal.close();
    // 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception
    wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH };
    wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), CommonFSUtils.getRootDir(walConf), method, walConf);
    wal.init();
    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
    region.put(put);
    // 3. Test case where ABORT_FLUSH will throw exception.
    // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with
    // DroppedSnapshotException. Below COMMIT_FLUSH will cause flush to abort
    wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH };
    try {
        region.flush(true);
        fail("This should have thrown exception");
    } catch (DroppedSnapshotException expected) {
    // we expect this exception, since we were able to write the snapshot, but failed to
    // write the flush marker to WAL
    } catch (IOException unexpected) {
        throw unexpected;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Put(org.apache.hadoop.hbase.client.Put) FSHLog(org.apache.hadoop.hbase.regionserver.wal.FSHLog) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Cell(org.apache.hadoop.hbase.Cell) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Test(org.junit.Test)

Example 29 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testOnlyReplayingFlushStartDoesNotHoldUpRegionClose.

/**
 * Tests a case where we replay only a flush start marker, then the region is closed. This region
 * should not block indefinitely
 */
@Test
public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException {
    // load some data to primary and flush
    int start = 0;
    LOG.info("-- Writing some data to primary from " + start + " to " + (start + 100));
    putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
    LOG.info("-- Flushing primary, creating 3 files for 3 stores");
    primaryRegion.flush(true);
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    LOG.info("-- Replaying edits and flush events in secondary");
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
                LOG.info("-- Replaying flush start in secondary");
                secondaryRegion.replayWALFlushStartMarker(flushDesc);
            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
                LOG.info("-- NOT Replaying flush commit in secondary");
            }
        } else {
            replayEdit(secondaryRegion, entry);
        }
    }
    assertTrue(rss.getRegionServerAccounting().getGlobalMemStoreDataSize() > 0);
    // now close the region which should not cause hold because of un-committed flush
    secondaryRegion.close();
    // verify that the memstore size is back to what it was
    assertEquals(0, rss.getRegionServerAccounting().getGlobalMemStoreDataSize());
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 30 with FlushDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.

the class TestRegionReplicationSink method testNotAddToFailedReplicas.

@Test
public void testNotAddToFailedReplicas() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(4).collect(Collectors.toList());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    ServerCall<?> rpcCall1 = mock(ServerCall.class);
    WALKeyImpl key1 = mock(WALKeyImpl.class);
    when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
    when(key1.getSequenceId()).thenReturn(1L);
    WALEdit edit1 = mock(WALEdit.class);
    when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key1, edit1, rpcCall1);
    ServerCall<?> rpcCall2 = mock(ServerCall.class);
    WALKeyImpl key2 = mock(WALKeyImpl.class);
    when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
    when(key2.getSequenceId()).thenReturn(3L);
    Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
        throw new IllegalStateException();
    }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
    FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
    WALEdit edit2 = WALEdit.createFlushWALEdit(primary, fd);
    sink.add(key2, edit2, rpcCall2);
    // fail the call to replica 2
    futures.get(0).complete(null);
    futures.get(1).completeExceptionally(new IOException("inject error"));
    // the failure should not cause replica 2 to be added to failedReplicas, as we have already
    // trigger a flush after it.
    verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    futures.get(2).complete(null);
    futures.get(3).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) Arrays(java.util.Arrays) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) MutableInt(org.apache.commons.lang3.mutable.MutableInt) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) RegionServerTests(org.apache.hadoop.hbase.testclassification.RegionServerTests) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Path(org.apache.hadoop.fs.Path) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ClassRule(org.junit.ClassRule) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) Mockito.when(org.mockito.Mockito.when) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Mockito.verify(org.mockito.Mockito.verify) Mockito.never(org.mockito.Mockito.never) List(java.util.List) Stream(java.util.stream.Stream) Rule(org.junit.Rule) TreeMap(java.util.TreeMap) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TableNameTestRule(org.apache.hadoop.hbase.TableNameTestRule) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Assert.assertEquals(org.junit.Assert.assertEquals) Mockito.mock(org.mockito.Mockito.mock) IOException(java.io.IOException) TreeMap(java.util.TreeMap) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) List(java.util.List) Test(org.junit.Test)

Aggregations

FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)30 StoreFlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)24 Test (org.junit.Test)19 WAL (org.apache.hadoop.hbase.wal.WAL)17 ArrayList (java.util.ArrayList)11 List (java.util.List)10 Path (org.apache.hadoop.fs.Path)8 IOException (java.io.IOException)6 HashMap (java.util.HashMap)5 Map (java.util.Map)5 TreeMap (java.util.TreeMap)5 RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)5 InterruptedIOException (java.io.InterruptedIOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 Cell (org.apache.hadoop.hbase.Cell)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 PrepareFlushResult (org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)3