Search in sources :

Example 56 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestHRegion method mockWAL.

/**
 * Utility method to setup a WAL mock.
 * <p/>
 * Needs to do the bit where we close latch on the WALKeyImpl on append else test hangs.
 * @return a mock WAL
 */
private WAL mockWAL() throws IOException {
    WAL wal = mock(WAL.class);
    when(wal.appendData(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))).thenAnswer(new Answer<Long>() {

        @Override
        public Long answer(InvocationOnMock invocation) throws Throwable {
            WALKeyImpl key = invocation.getArgument(1);
            MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin();
            key.setWriteEntry(we);
            return 1L;
        }
    });
    when(wal.appendMarker(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))).thenAnswer(new Answer<Long>() {

        @Override
        public Long answer(InvocationOnMock invocation) throws Throwable {
            WALKeyImpl key = invocation.getArgument(1);
            MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin();
            key.setWriteEntry(we);
            return 1L;
        }
    });
    return wal;
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) InvocationOnMock(org.mockito.invocation.InvocationOnMock) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl)

Example 57 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestRegionReplicationSink method testDropEdits.

@Test
public void testDropEdits() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    ServerCall<?> rpcCall1 = mock(ServerCall.class);
    WALKeyImpl key1 = mock(WALKeyImpl.class);
    when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
    WALEdit edit1 = mock(WALEdit.class);
    when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key1, edit1, rpcCall1);
    verify(manager, times(1)).increase(anyLong());
    verify(manager, never()).decrease(anyLong());
    verify(rpcCall1, times(1)).retainByWAL();
    assertEquals(1100, sink.pendingSize());
    ServerCall<?> rpcCall2 = mock(ServerCall.class);
    WALKeyImpl key2 = mock(WALKeyImpl.class);
    when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
    WALEdit edit2 = mock(WALEdit.class);
    when(edit2.estimatedSerializedSizeOf()).thenReturn(2000L);
    sink.add(key2, edit2, rpcCall2);
    verify(manager, times(2)).increase(anyLong());
    verify(manager, never()).decrease(anyLong());
    verify(rpcCall2, times(1)).retainByWAL();
    assertEquals(3300, sink.pendingSize());
    ServerCall<?> rpcCall3 = mock(ServerCall.class);
    WALKeyImpl key3 = mock(WALKeyImpl.class);
    when(key3.estimatedSerializedSizeOf()).thenReturn(200L);
    WALEdit edit3 = mock(WALEdit.class);
    when(edit3.estimatedSerializedSizeOf()).thenReturn(3000L);
    when(manager.increase(anyLong())).thenReturn(false);
    // should not buffer this edit
    sink.add(key3, edit3, rpcCall3);
    verify(manager, times(3)).increase(anyLong());
    verify(manager, times(1)).decrease(anyLong());
    // should retain and then release immediately
    verify(rpcCall3, times(1)).retainByWAL();
    verify(rpcCall3, times(1)).releaseByWAL();
    // should also clear the pending edit
    verify(rpcCall2, times(1)).releaseByWAL();
    assertEquals(1100, sink.pendingSize());
    // should have request flush
    verify(flushRequester, times(1)).run();
    // finish the replication for first edit, we should decrease the size, release the rpc call,and
    // the pendingSize should be 0 as there are no pending entries
    futures.forEach(f -> f.complete(null));
    verify(manager, times(2)).decrease(anyLong());
    verify(rpcCall1, times(1)).releaseByWAL();
    assertEquals(0, sink.pendingSize());
    // should only call replicate 2 times for replicating the first edit, as we have 2 secondary
    // replicas
    verify(conn, times(2)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Example 58 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestRegionReplicationSink method testCountCapacity.

@Test
public void testCountCapacity() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(6).collect(Collectors.toList());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    for (int i = 0; i < 7; i++) {
        ServerCall<?> rpcCall = mock(ServerCall.class);
        WALKeyImpl key = mock(WALKeyImpl.class);
        when(key.estimatedSerializedSizeOf()).thenReturn(100L);
        when(key.getSequenceId()).thenReturn(i + 1L);
        WALEdit edit = mock(WALEdit.class);
        when(edit.estimatedSerializedSizeOf()).thenReturn(1000L);
        when(manager.increase(anyLong())).thenReturn(true);
        sink.add(key, edit, rpcCall);
    }
    // the first entry will be send out immediately
    verify(conn, times(2)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    // complete the first send
    futures.get(0).complete(null);
    futures.get(1).complete(null);
    // we should have another batch
    verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    // complete the second send
    futures.get(2).complete(null);
    futures.get(3).complete(null);
    // because of the count limit is 5, the above send can not send all the edits, so we will do
    // another send
    verify(conn, times(6)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    // complete the third send
    futures.get(4).complete(null);
    futures.get(5).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Example 59 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestRegionReplicationSink method testNotAddToFailedReplicas.

@Test
public void testNotAddToFailedReplicas() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(4).collect(Collectors.toList());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    ServerCall<?> rpcCall1 = mock(ServerCall.class);
    WALKeyImpl key1 = mock(WALKeyImpl.class);
    when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
    when(key1.getSequenceId()).thenReturn(1L);
    WALEdit edit1 = mock(WALEdit.class);
    when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key1, edit1, rpcCall1);
    ServerCall<?> rpcCall2 = mock(ServerCall.class);
    WALKeyImpl key2 = mock(WALKeyImpl.class);
    when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
    when(key2.getSequenceId()).thenReturn(3L);
    Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
        throw new IllegalStateException();
    }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
    FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
    WALEdit edit2 = WALEdit.createFlushWALEdit(primary, fd);
    sink.add(key2, edit2, rpcCall2);
    // fail the call to replica 2
    futures.get(0).complete(null);
    futures.get(1).completeExceptionally(new IOException("inject error"));
    // the failure should not cause replica 2 to be added to failedReplicas, as we have already
    // trigger a flush after it.
    verify(conn, times(4)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    futures.get(2).complete(null);
    futures.get(3).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) Arrays(java.util.Arrays) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) MutableInt(org.apache.commons.lang3.mutable.MutableInt) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) RegionServerTests(org.apache.hadoop.hbase.testclassification.RegionServerTests) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Path(org.apache.hadoop.fs.Path) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ClassRule(org.junit.ClassRule) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) Mockito.when(org.mockito.Mockito.when) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Mockito.verify(org.mockito.Mockito.verify) Mockito.never(org.mockito.Mockito.never) List(java.util.List) Stream(java.util.stream.Stream) Rule(org.junit.Rule) TreeMap(java.util.TreeMap) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TableNameTestRule(org.apache.hadoop.hbase.TableNameTestRule) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Assert.assertEquals(org.junit.Assert.assertEquals) Mockito.mock(org.mockito.Mockito.mock) IOException(java.io.IOException) TreeMap(java.util.TreeMap) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) List(java.util.List) Test(org.junit.Test)

Aggregations

WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)59 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)44 Test (org.junit.Test)42 KeyValue (org.apache.hadoop.hbase.KeyValue)24 TreeMap (java.util.TreeMap)22 WAL (org.apache.hadoop.hbase.wal.WAL)20 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)17 Path (org.apache.hadoop.fs.Path)16 IOException (java.io.IOException)13 TableName (org.apache.hadoop.hbase.TableName)12 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)12 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)11 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)10 ArrayList (java.util.ArrayList)9 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)7 Configuration (org.apache.hadoop.conf.Configuration)7