use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestHRegion method mockWAL.
/**
* Utility method to setup a WAL mock.
* <p/>
* Needs to do the bit where we close latch on the WALKeyImpl on append else test hangs.
* @return a mock WAL
*/
private WAL mockWAL() throws IOException {
WAL wal = mock(WAL.class);
when(wal.appendData(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))).thenAnswer(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocation) throws Throwable {
WALKeyImpl key = invocation.getArgument(1);
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin();
key.setWriteEntry(we);
return 1L;
}
});
when(wal.appendMarker(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))).thenAnswer(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocation) throws Throwable {
WALKeyImpl key = invocation.getArgument(1);
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin();
key.setWriteEntry(we);
return 1L;
}
});
return wal;
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionReplicationSink method testDropEdits.
@Test
public void testDropEdits() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
ServerCall<?> rpcCall1 = mock(ServerCall.class);
WALKeyImpl key1 = mock(WALKeyImpl.class);
when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
WALEdit edit1 = mock(WALEdit.class);
when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key1, edit1, rpcCall1);
verify(manager, times(1)).increase(anyLong());
verify(manager, never()).decrease(anyLong());
verify(rpcCall1, times(1)).retainByWAL();
assertEquals(1100, sink.pendingSize());
ServerCall<?> rpcCall2 = mock(ServerCall.class);
WALKeyImpl key2 = mock(WALKeyImpl.class);
when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
WALEdit edit2 = mock(WALEdit.class);
when(edit2.estimatedSerializedSizeOf()).thenReturn(2000L);
sink.add(key2, edit2, rpcCall2);
verify(manager, times(2)).increase(anyLong());
verify(manager, never()).decrease(anyLong());
verify(rpcCall2, times(1)).retainByWAL();
assertEquals(3300, sink.pendingSize());
ServerCall<?> rpcCall3 = mock(ServerCall.class);
WALKeyImpl key3 = mock(WALKeyImpl.class);
when(key3.estimatedSerializedSizeOf()).thenReturn(200L);
WALEdit edit3 = mock(WALEdit.class);
when(edit3.estimatedSerializedSizeOf()).thenReturn(3000L);
when(manager.increase(anyLong())).thenReturn(false);
// should not buffer this edit
sink.add(key3, edit3, rpcCall3);
verify(manager, times(3)).increase(anyLong());
verify(manager, times(1)).decrease(anyLong());
// should retain and then release immediately
verify(rpcCall3, times(1)).retainByWAL();
verify(rpcCall3, times(1)).releaseByWAL();
// should also clear the pending edit
verify(rpcCall2, times(1)).releaseByWAL();
assertEquals(1100, sink.pendingSize());
// should have request flush
verify(flushRequester, times(1)).run();
// finish the replication for first edit, we should decrease the size, release the rpc call,and
// the pendingSize should be 0 as there are no pending entries
futures.forEach(f -> f.complete(null));
verify(manager, times(2)).decrease(anyLong());
verify(rpcCall1, times(1)).releaseByWAL();
assertEquals(0, sink.pendingSize());
// should only call replicate 2 times for replicating the first edit, as we have 2 secondary
// replicas
verify(conn, times(2)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionReplicationSink method testCountCapacity.
@Test
public void testCountCapacity() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(6).collect(Collectors.toList());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
for (int i = 0; i < 7; i++) {
ServerCall<?> rpcCall = mock(ServerCall.class);
WALKeyImpl key = mock(WALKeyImpl.class);
when(key.estimatedSerializedSizeOf()).thenReturn(100L);
when(key.getSequenceId()).thenReturn(i + 1L);
WALEdit edit = mock(WALEdit.class);
when(edit.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key, edit, rpcCall);
}
// the first entry will be send out immediately
verify(conn, times(2)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
// complete the first send
futures.get(0).complete(null);
futures.get(1).complete(null);
// we should have another batch
verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
// complete the second send
futures.get(2).complete(null);
futures.get(3).complete(null);
// because of the count limit is 5, the above send can not send all the edits, so we will do
// another send
verify(conn, times(6)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
// complete the third send
futures.get(4).complete(null);
futures.get(5).complete(null);
// should have send out all so no pending entries.
assertEquals(0, sink.pendingSize());
}
use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.
the class TestRegionReplicationSink method testNotAddToFailedReplicas.
@Test
public void testNotAddToFailedReplicas() {
MutableInt next = new MutableInt(0);
List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(4).collect(Collectors.toList());
when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
ServerCall<?> rpcCall1 = mock(ServerCall.class);
WALKeyImpl key1 = mock(WALKeyImpl.class);
when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
when(key1.getSequenceId()).thenReturn(1L);
WALEdit edit1 = mock(WALEdit.class);
when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
when(manager.increase(anyLong())).thenReturn(true);
sink.add(key1, edit1, rpcCall1);
ServerCall<?> rpcCall2 = mock(ServerCall.class);
WALKeyImpl key2 = mock(WALKeyImpl.class);
when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
when(key2.getSequenceId()).thenReturn(3L);
Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
throw new IllegalStateException();
}, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
WALEdit edit2 = WALEdit.createFlushWALEdit(primary, fd);
sink.add(key2, edit2, rpcCall2);
// fail the call to replica 2
futures.get(0).complete(null);
futures.get(1).completeExceptionally(new IOException("inject error"));
// the failure should not cause replica 2 to be added to failedReplicas, as we have already
// trigger a flush after it.
verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
futures.get(2).complete(null);
futures.get(3).complete(null);
// should have send out all so no pending entries.
assertEquals(0, sink.pendingSize());
}
Aggregations