use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionReplicationSink method testAddToFailedReplica.
@Test
public void testAddToFailedReplica() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(5).collect(Collectors.toList());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
ServerCall<?> rpcCall1 = mock(ServerCall.class);
WALKeyImpl key1 = mock(WALKeyImpl.class);
when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
when(key1.getSequenceId()).thenReturn(1L);
WALEdit edit1 = mock(WALEdit.class);
when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key1, edit1, rpcCall1);
ServerCall<?> rpcCall2 = mock(ServerCall.class);
WALKeyImpl key2 = mock(WALKeyImpl.class);
when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
when(key2.getSequenceId()).thenReturn(1L);
WALEdit edit2 = mock(WALEdit.class);
when(edit2.estimatedSerializedSizeOf()).thenReturn(2000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key2, edit2, rpcCall2);
// fail the call to replica 2
futures.get(0).complete(null);
futures.get(1).completeExceptionally(new IOException("inject error"));
// we should only call replicate once for edit2, since replica 2 is marked as failed
verify(conn, times(3)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
futures.get(2).complete(null);
// should have send out all so no pending entries.
assertEquals(0, sink.pendingSize());
ServerCall<?> rpcCall3 = mock(ServerCall.class);
WALKeyImpl key3 = mock(WALKeyImpl.class);
when(key3.estimatedSerializedSizeOf()).thenReturn(200L);
when(key3.getSequenceId()).thenReturn(3L);
Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
throw new IllegalStateException();
}, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
WALEdit edit3 = WALEdit.createFlushWALEdit(primary, fd);
sink.add(key3, edit3, rpcCall3);
// the flush marker should have cleared the failedReplicas, so we will send the edit to 2
// replicas again
verify(conn, times(5)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
futures.get(3).complete(null);
futures.get(4).complete(null);
// should have send out all so no pending entries.
assertEquals(0, sink.pendingSize());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionReplicationSink method testNormal.
@Test
public void testNormal() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
ServerCall<?> rpcCall = mock(ServerCall.class);
WALKeyImpl key = mock(WALKeyImpl.class);
when(key.estimatedSerializedSizeOf()).thenReturn(100L);
WALEdit edit = mock(WALEdit.class);
when(edit.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key, edit, rpcCall);
// should call increase on manager
verify(manager, times(1)).increase(anyLong());
// should have been retained
verify(rpcCall, times(1)).retainByWAL();
assertEquals(1100, sink.pendingSize());
futures.get(0).complete(null);
// should not call decrease yet
verify(manager, never()).decrease(anyLong());
// should not call release yet
verify(rpcCall, never()).releaseByWAL();
assertEquals(1100, sink.pendingSize());
futures.get(1).complete(null);
// should call decrease
verify(manager, times(1)).decrease(anyLong());
// should call release
verify(rpcCall, times(1)).releaseByWAL();
assertEquals(0, sink.pendingSize());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsWrittenIntoWAL.
/**
* Create an HRegion with the result of a WAL split and test we only see the good edits=
*/
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
final TableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtil.closeRegionAndWAL(region2);
final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
final byte[] rowName = tableName.getName();
final byte[] regionName = hri.getEncodedNameAsBytes();
// Add 1k to each family.
final int countPerFamily = 1000;
Set<byte[]> familyNames = new HashSet<>();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, mvcc, scopes);
familyNames.add(hcd.getName());
}
// Add a cache flush, shouldn't have any effect
wal.startCacheFlush(regionName, familyNames);
wal.completeCacheFlush(regionName, HConstants.NO_SEQNUM);
// Add an edit to another family, should be skipped.
WALEdit edit = new WALEdit();
long now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// Delete the c family to verify deletes make it over.
edit = new WALEdit();
now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// Sync.
wal.sync();
// Make a new conf and a new fs for the splitter to run on so we can take
// over old wal.
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf);
// 100k seems to make for about 4 flushes during HRegion#initialize.
newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
// Make a new wal for new region.
WAL newWal = createWAL(newConf, hbaseRootDir, logName);
final AtomicInteger flushcount = new AtomicInteger(0);
try {
final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
@Override
protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid, final Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException {
LOG.info("InternalFlushCache Invoked");
FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker);
flushcount.incrementAndGet();
return fs;
}
};
// The seq id this region has opened up with
long seqid = region.initialize();
// The mvcc readpoint of from inserting data.
long writePoint = mvcc.getWritePoint();
// We flushed during init.
assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
assertTrue((seqid - 1) == writePoint);
Get get = new Get(rowName);
Result result = region.get(get);
// Make sure we only see the good edits
assertEquals(countPerFamily * (htd.getColumnFamilies().length - 1), result.size());
region.close();
} finally {
newWal.close();
}
return null;
}
});
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class ProtobufLogTestHelper method generateEdit.
private static WAL.Entry generateEdit(int i, RegionInfo hri, TableName tableName, byte[] row, int columnCount, long timestamp, MultiVersionConcurrencyControl mvcc) {
WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID, mvcc);
WALEdit edit = new WALEdit();
int prefix = i;
IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j)).map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add);
return new WAL.Entry(key, edit);
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestWALActionsListener method testActionListener.
/**
* Add a bunch of dummy data and roll the logs every two insert. We
* should end up with 10 rolled files (plus the roll called in
* the constructor). Also test adding a listener while it's running.
*/
@Test
public void testActionListener() throws Exception {
DummyWALActionsListener observer = new DummyWALActionsListener();
final WALFactory wals = new WALFactory(conf, "testActionListener");
wals.getWALProvider().addWALActionsListener(observer);
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(SOME_BYTES)).setStartKey(SOME_BYTES).setEndKey(SOME_BYTES).build();
final WAL wal = wals.getWAL(hri);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < 20; i++) {
byte[] b = Bytes.toBytes(i + "");
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
scopes.put(b, 0);
long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit);
wal.sync(txid);
if (i == 10) {
wal.registerWALActionsListener(laterobserver);
}
if (i % 2 == 0) {
wal.rollWriter();
}
}
wal.close();
assertEquals(11, observer.preLogRollCounter);
assertEquals(11, observer.postLogRollCounter);
assertEquals(5, laterobserver.preLogRollCounter);
assertEquals(5, laterobserver.postLogRollCounter);
assertEquals(1, observer.closedCount);
}
Aggregations