use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testWriteFlushRequestMarker.
/**
* Tests the case where a request for flush cache is sent to the region, but region cannot flush.
* It should write the flush request marker instead.
*/
@Test
public void testWriteFlushRequestMarker() throws IOException {
// primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false
FlushResultImpl result = primaryRegion.flushcache(true, false, FlushLifeCycleTracker.DUMMY);
assertNotNull(result);
assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result);
assertFalse(result.wroteFlushWalMarker);
// request flush again, but this time with writeFlushRequestWalMarker = true
result = primaryRegion.flushcache(true, true, FlushLifeCycleTracker.DUMMY);
assertNotNull(result);
assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result);
assertTrue(result.wroteFlushWalMarker);
List<FlushDescriptor> flushes = Lists.newArrayList();
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
flushes.add(flush);
}
}
assertEquals(1, flushes.size());
assertNotNull(flushes.get(0));
assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction());
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testReplayRegionOpenEvent.
/**
* Tests replaying region open markers from primary region. Checks whether the files are picked up
*/
@Test
public void testReplayRegionOpenEvent() throws IOException {
// no flush
putDataWithFlushes(primaryRegion, 100, 0, 100);
int numRows = 100;
// close the region and open again.
primaryRegion.close();
primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
List<RegionEventDescriptor> regionEvents = Lists.newArrayList();
LOG.info("-- Replaying edits and region events in secondary");
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
RegionEventDescriptor regionEventDesc = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
// don't replay flush events
} else if (regionEventDesc != null) {
regionEvents.add(regionEventDesc);
} else {
// don't replay edits
}
}
// we should have 1 open, 1 close and 1 open event
assertEquals(3, regionEvents.size());
// replay the first region open event.
secondaryRegion.replayWALRegionEventMarker(regionEvents.get(0));
// replay the close event as well
secondaryRegion.replayWALRegionEventMarker(regionEvents.get(1));
// no store files in the region
int expectedStoreFileCount = 0;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
long regionMemstoreSize = secondaryRegion.getMemStoreDataSize();
assertTrue(regionMemstoreSize == 0);
// now replay the region open event that should contain new file locations
LOG.info("Testing replaying region open event " + regionEvents.get(2));
secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2));
// assert that the flush files are picked
expectedStoreFileCount++;
for (HStore s : secondaryRegion.getStores()) {
assertEquals(expectedStoreFileCount, s.getStorefilesCount());
}
HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
MemStoreSize mss = store.getFlushableSize();
assertTrue(mss.getHeapSize() == MutableSegment.DEEP_OVERHEAD);
// assert that the region memstore is empty
long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize();
assertTrue(newRegionMemstoreSize == 0);
// prepare snapshot should be dropped if any
assertNull(secondaryRegion.getPrepareFlushResult());
LOG.info("-- Verifying edits from secondary");
verifyData(secondaryRegion, 0, numRows, cq, families);
LOG.info("-- Verifying edits from primary.");
verifyData(primaryRegion, 0, numRows, cq, families);
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegion method testFlushMarkersWALFail.
@Test
public void testFlushMarkersWALFail() throws Exception {
// test the cases where the WAL append for flush markers fail.
byte[] family = Bytes.toBytes("family");
// spy an actual WAL implementation to throw exception (was not able to mock)
Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + "log");
final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
CommonFSUtils.setRootDir(walConf, logDir);
// Make up a WAL that we can manipulate at append time.
class FailAppendFlushMarkerWAL extends FSHLog {
volatile FlushAction[] flushActions = null;
public FailAppendFlushMarkerWAL(FileSystem fs, Path root, String logDir, Configuration conf) throws IOException {
super(fs, root, logDir, conf);
}
@Override
protected Writer createWriterInstance(Path path) throws IOException {
final Writer w = super.createWriterInstance(path);
return new Writer() {
@Override
public void close() throws IOException {
w.close();
}
@Override
public void sync(boolean forceSync) throws IOException {
w.sync(forceSync);
}
@Override
public void append(Entry entry) throws IOException {
List<Cell> cells = entry.getEdit().getCells();
if (WALEdit.isMetaEditFamily(cells.get(0))) {
FlushDescriptor desc = WALEdit.getFlushDescriptor(cells.get(0));
if (desc != null) {
for (FlushAction flushAction : flushActions) {
if (desc.getAction().equals(flushAction)) {
throw new IOException("Failed to append flush marker! " + flushAction);
}
}
}
}
w.append(entry);
}
@Override
public long getLength() {
return w.getLength();
}
@Override
public long getSyncedLength() {
return w.getSyncedLength();
}
};
}
}
FailAppendFlushMarkerWAL wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), CommonFSUtils.getRootDir(walConf), method, walConf);
wal.init();
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
int i = 0;
Put put = new Put(Bytes.toBytes(i));
// have to skip mocked wal
put.setDurability(Durability.SKIP_WAL);
put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
region.put(put);
// 1. Test case where START_FLUSH throws exception
wal.flushActions = new FlushAction[] { FlushAction.START_FLUSH };
// start cache flush will throw exception
try {
region.flush(true);
fail("This should have thrown exception");
} catch (DroppedSnapshotException unexpected) {
// this should not be a dropped snapshot exception. Meaning that RS will not abort
throw unexpected;
} catch (IOException expected) {
// expected
}
// The WAL is hosed now. It has two edits appended. We cannot roll the log without it
// throwing a DroppedSnapshotException to force an abort. Just clean up the mess.
region.close(true);
wal.close();
// 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception
wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH };
wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), CommonFSUtils.getRootDir(walConf), method, walConf);
wal.init();
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family);
region.put(put);
// 3. Test case where ABORT_FLUSH will throw exception.
// Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with
// DroppedSnapshotException. Below COMMIT_FLUSH will cause flush to abort
wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH };
try {
region.flush(true);
fail("This should have thrown exception");
} catch (DroppedSnapshotException expected) {
// we expect this exception, since we were able to write the snapshot, but failed to
// write the flush marker to WAL
} catch (IOException unexpected) {
throw unexpected;
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestHRegionReplayEvents method testOnlyReplayingFlushStartDoesNotHoldUpRegionClose.
/**
* Tests a case where we replay only a flush start marker, then the region is closed. This region
* should not block indefinitely
*/
@Test
public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException {
// load some data to primary and flush
int start = 0;
LOG.info("-- Writing some data to primary from " + start + " to " + (start + 100));
putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
LOG.info("-- Flushing primary, creating 3 files for 3 stores");
primaryRegion.flush(true);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
LOG.info("-- Replaying edits and flush events in secondary");
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
secondaryRegion.replayWALFlushStartMarker(flushDesc);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- NOT Replaying flush commit in secondary");
}
} else {
replayEdit(secondaryRegion, entry);
}
}
assertTrue(rss.getRegionServerAccounting().getGlobalMemStoreDataSize() > 0);
// now close the region which should not cause hold because of un-committed flush
secondaryRegion.close();
// verify that the memstore size is back to what it was
assertEquals(0, rss.getRegionServerAccounting().getGlobalMemStoreDataSize());
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor in project hbase by apache.
the class TestRegionReplicationSink method testNotAddToFailedReplicas.
@Test
public void testNotAddToFailedReplicas() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(4).collect(Collectors.toList());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
ServerCall<?> rpcCall1 = mock(ServerCall.class);
WALKeyImpl key1 = mock(WALKeyImpl.class);
when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
when(key1.getSequenceId()).thenReturn(1L);
WALEdit edit1 = mock(WALEdit.class);
when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key1, edit1, rpcCall1);
ServerCall<?> rpcCall2 = mock(ServerCall.class);
WALKeyImpl key2 = mock(WALKeyImpl.class);
when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
when(key2.getSequenceId()).thenReturn(3L);
Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
throw new IllegalStateException();
}, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
WALEdit edit2 = WALEdit.createFlushWALEdit(primary, fd);
sink.add(key2, edit2, rpcCall2);
// fail the call to replica 2
futures.get(0).complete(null);
futures.get(1).completeExceptionally(new IOException("inject error"));
// the failure should not cause replica 2 to be added to failedReplicas, as we have already
// trigger a flush after it.
verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
futures.get(2).complete(null);
futures.get(3).complete(null);
// should have send out all so no pending entries.
assertEquals(0, sink.pendingSize());
}
Aggregations