Search in sources :

Example 16 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestRegionReplicationSink method testAddToFailedReplica.

@Test
public void testAddToFailedReplica() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Stream.generate(() -> new CompletableFuture<Void>()).limit(5).collect(Collectors.toList());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    ServerCall<?> rpcCall1 = mock(ServerCall.class);
    WALKeyImpl key1 = mock(WALKeyImpl.class);
    when(key1.estimatedSerializedSizeOf()).thenReturn(100L);
    when(key1.getSequenceId()).thenReturn(1L);
    WALEdit edit1 = mock(WALEdit.class);
    when(edit1.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key1, edit1, rpcCall1);
    ServerCall<?> rpcCall2 = mock(ServerCall.class);
    WALKeyImpl key2 = mock(WALKeyImpl.class);
    when(key2.estimatedSerializedSizeOf()).thenReturn(200L);
    when(key2.getSequenceId()).thenReturn(1L);
    WALEdit edit2 = mock(WALEdit.class);
    when(edit2.estimatedSerializedSizeOf()).thenReturn(2000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key2, edit2, rpcCall2);
    // fail the call to replica 2
    futures.get(0).complete(null);
    futures.get(1).completeExceptionally(new IOException("inject error"));
    // we should only call replicate once for edit2, since replica 2 is marked as failed
    verify(conn, times(3)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    futures.get(2).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
    ServerCall<?> rpcCall3 = mock(ServerCall.class);
    WALKeyImpl key3 = mock(WALKeyImpl.class);
    when(key3.estimatedSerializedSizeOf()).thenReturn(200L);
    when(key3.getSequenceId()).thenReturn(3L);
    Map<byte[], List<Path>> committedFiles = td.getColumnFamilyNames().stream().collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> {
        throw new IllegalStateException();
    }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR)));
    FlushDescriptor fd = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles);
    WALEdit edit3 = WALEdit.createFlushWALEdit(primary, fd);
    sink.add(key3, edit3, rpcCall3);
    // the flush marker should have cleared the failedReplicas, so we will send the edit to 2
    // replicas again
    verify(conn, times(5)).replicate(any(), anyList(), anyInt(), anyLong(), anyLong());
    futures.get(3).complete(null);
    futures.get(4).complete(null);
    // should have send out all so no pending entries.
    assertEquals(0, sink.pendingSize());
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) Arrays(java.util.Arrays) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) MutableInt(org.apache.commons.lang3.mutable.MutableInt) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) RegionServerTests(org.apache.hadoop.hbase.testclassification.RegionServerTests) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Path(org.apache.hadoop.fs.Path) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ClassRule(org.junit.ClassRule) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) Mockito.when(org.mockito.Mockito.when) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Mockito.verify(org.mockito.Mockito.verify) Mockito.never(org.mockito.Mockito.never) List(java.util.List) Stream(java.util.stream.Stream) Rule(org.junit.Rule) TreeMap(java.util.TreeMap) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TableNameTestRule(org.apache.hadoop.hbase.TableNameTestRule) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Assert.assertEquals(org.junit.Assert.assertEquals) Mockito.mock(org.mockito.Mockito.mock) IOException(java.io.IOException) TreeMap(java.util.TreeMap) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) List(java.util.List) Test(org.junit.Test)

Example 17 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestRegionReplicationSink method testNormal.

@Test
public void testNormal() {
    MutableInt next = new MutableInt(0);
    List<CompletableFuture<Void>> futures = Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>());
    when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())).then(i -> futures.get(next.getAndIncrement()));
    ServerCall<?> rpcCall = mock(ServerCall.class);
    WALKeyImpl key = mock(WALKeyImpl.class);
    when(key.estimatedSerializedSizeOf()).thenReturn(100L);
    WALEdit edit = mock(WALEdit.class);
    when(edit.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(manager.increase(anyLong())).thenReturn(true);
    sink.add(key, edit, rpcCall);
    // should call increase on manager
    verify(manager, times(1)).increase(anyLong());
    // should have been retained
    verify(rpcCall, times(1)).retainByWAL();
    assertEquals(1100, sink.pendingSize());
    futures.get(0).complete(null);
    // should not call decrease yet
    verify(manager, never()).decrease(anyLong());
    // should not call release yet
    verify(rpcCall, never()).releaseByWAL();
    assertEquals(1100, sink.pendingSize());
    futures.get(1).complete(null);
    // should call decrease
    verify(manager, times(1)).decrease(anyLong());
    // should call release
    verify(rpcCall, times(1)).releaseByWAL();
    assertEquals(0, sink.pendingSize());
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableInt(org.apache.commons.lang3.mutable.MutableInt) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Example 18 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsWrittenIntoWAL.

/**
 * Create an HRegion with the result of a WAL split and test we only see the good edits=
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);
    final TableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtil.closeRegionAndWAL(region2);
    final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();
    // Add 1k to each family.
    final int countPerFamily = 1000;
    Set<byte[]> familyNames = new HashSet<>();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getColumnFamilyNames()) {
        scopes.put(fam, 0);
    }
    for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, mvcc, scopes);
        familyNames.add(hcd.getName());
    }
    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName, familyNames);
    wal.completeCacheFlush(regionName, HConstants.NO_SEQNUM);
    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
    // Sync.
    wal.sync();
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            WAL newWal = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {

                    @Override
                    protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid, final Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker);
                        flushcount.incrementAndGet();
                        return fs;
                    }
                };
                // The seq id this region has opened up with
                long seqid = region.initialize();
                // The mvcc readpoint of from inserting data.
                long writePoint = mvcc.getWritePoint();
                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue((seqid - 1) == writePoint);
                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getColumnFamilies().length - 1), result.size());
                region.close();
            } finally {
                newWal.close();
            }
            return null;
        }
    });
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Result(org.apache.hadoop.hbase.client.Result) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) HStore(org.apache.hadoop.hbase.regionserver.HStore) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) StreamLacksCapabilityException(org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Get(org.apache.hadoop.hbase.client.Get) FlushLifeCycleTracker(org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 19 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class ProtobufLogTestHelper method generateEdit.

private static WAL.Entry generateEdit(int i, RegionInfo hri, TableName tableName, byte[] row, int columnCount, long timestamp, MultiVersionConcurrencyControl mvcc) {
    WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID, mvcc);
    WALEdit edit = new WALEdit();
    int prefix = i;
    IntStream.range(0, columnCount).mapToObj(j -> toValue(prefix, j)).map(value -> new KeyValue(row, row, row, timestamp, value)).forEachOrdered(edit::add);
    return new WAL.Entry(key, edit);
}
Also used : IntStream(java.util.stream.IntStream) TableName(org.apache.hadoop.hbase.TableName) Assert.assertNotNull(org.junit.Assert.assertNotNull) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) CellUtil(org.apache.hadoop.hbase.CellUtil) HConstants(org.apache.hadoop.hbase.HConstants) Assert.assertNull(org.junit.Assert.assertNull) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) Assert.assertEquals(org.junit.Assert.assertEquals) Cell(org.apache.hadoop.hbase.Cell) KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) Bytes(org.apache.hadoop.hbase.util.Bytes) KeyValue(org.apache.hadoop.hbase.KeyValue) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl)

Example 20 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestWALActionsListener method testActionListener.

/**
 * Add a bunch of dummy data and roll the logs every two insert. We
 * should end up with 10 rolled files (plus the roll called in
 * the constructor). Also test adding a listener while it's running.
 */
@Test
public void testActionListener() throws Exception {
    DummyWALActionsListener observer = new DummyWALActionsListener();
    final WALFactory wals = new WALFactory(conf, "testActionListener");
    wals.getWALProvider().addWALActionsListener(observer);
    DummyWALActionsListener laterobserver = new DummyWALActionsListener();
    RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(SOME_BYTES)).setStartKey(SOME_BYTES).setEndKey(SOME_BYTES).build();
    final WAL wal = wals.getWAL(hri);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    for (int i = 0; i < 20; i++) {
        byte[] b = Bytes.toBytes(i + "");
        KeyValue kv = new KeyValue(b, b, b);
        WALEdit edit = new WALEdit();
        edit.add(kv);
        NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        scopes.put(b, 0);
        long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit);
        wal.sync(txid);
        if (i == 10) {
            wal.registerWALActionsListener(laterobserver);
        }
        if (i % 2 == 0) {
            wal.rollWriter();
        }
    }
    wal.close();
    assertEquals(11, observer.preLogRollCounter);
    assertEquals(11, observer.postLogRollCounter);
    assertEquals(5, laterobserver.preLogRollCounter);
    assertEquals(5, laterobserver.postLogRollCounter);
    assertEquals(1, observer.closedCount);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Aggregations

WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)59 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)44 Test (org.junit.Test)42 KeyValue (org.apache.hadoop.hbase.KeyValue)24 TreeMap (java.util.TreeMap)22 WAL (org.apache.hadoop.hbase.wal.WAL)20 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)17 Path (org.apache.hadoop.fs.Path)16 IOException (java.io.IOException)13 TableName (org.apache.hadoop.hbase.TableName)12 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)12 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)11 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)10 ArrayList (java.util.ArrayList)9 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)7 Configuration (org.apache.hadoop.conf.Configuration)7