Search in sources :

Example 1 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplay.

@Test
public void testSkipRecoveredEditsReplay() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = new WALEdit();
            edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (HStore store : region.getStores()) {
            maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        region.getMVCC().advanceTo(seqId);
        Get get = new Get(row);
        Result result = region.get(get);
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
            assertEquals(1, kvs.size());
            assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
        }
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) TreeMap(java.util.TreeMap) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 2 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationSource method testLogMoving.

/**
 * Sanity check that we can move logs around while we are reading
 * from them. Should this test fail, ReplicationSource would have a hard
 * time reading logs that are being archived.
 */
// This tests doesn't belong in here... it is not about ReplicationSource.
@Test
public void testLogMoving() throws Exception {
    Path logPath = new Path(logDir, "log");
    if (!FS.exists(logDir)) {
        FS.mkdirs(logDir);
    }
    if (!FS.exists(oldLogDir)) {
        FS.mkdirs(oldLogDir);
    }
    WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath, TEST_UTIL.getConfiguration());
    for (int i = 0; i < 3; i++) {
        byte[] b = Bytes.toBytes(Integer.toString(i));
        KeyValue kv = new KeyValue(b, b, b);
        WALEdit edit = new WALEdit();
        edit.add(kv);
        WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID);
        writer.append(new WAL.Entry(key, edit));
        writer.sync(false);
    }
    writer.close();
    WAL.Reader reader = WALFactory.createReader(FS, logPath, TEST_UTIL.getConfiguration());
    WAL.Entry entry = reader.next();
    assertNotNull(entry);
    Path oldLogPath = new Path(oldLogDir, "log");
    FS.rename(logPath, oldLogPath);
    entry = reader.next();
    assertNotNull(entry);
    reader.next();
    entry = reader.next();
    assertNull(entry);
    reader.close();
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) Test(org.junit.Test)

Example 3 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationSource method testTerminateClearsBuffer.

@Test
public void testTerminateClearsBuffer() throws Exception {
    ReplicationSource source = new ReplicationSource();
    ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class);
    MetricsReplicationGlobalSourceSource mockMetrics = mock(MetricsReplicationGlobalSourceSource.class);
    AtomicLong buffer = new AtomicLong();
    Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer);
    Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics);
    ReplicationPeer mockPeer = mock(ReplicationPeer.class);
    Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
    Configuration testConf = HBaseConfiguration.create();
    source.init(testConf, null, mockManager, null, mockPeer, null, "testPeer", null, p -> OptionalLong.empty(), mock(MetricsSource.class));
    ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null, conf, null, 0, null, source, null);
    ReplicationSourceShipper shipper = new ReplicationSourceShipper(conf, null, null, source);
    shipper.entryReader = reader;
    source.workerThreads.put("testPeer", shipper);
    WALEntryBatch batch = new WALEntryBatch(10, logDir);
    WAL.Entry mockEntry = mock(WAL.Entry.class);
    WALEdit mockEdit = mock(WALEdit.class);
    WALKeyImpl mockKey = mock(WALKeyImpl.class);
    when(mockEntry.getEdit()).thenReturn(mockEdit);
    when(mockEdit.isEmpty()).thenReturn(false);
    when(mockEntry.getKey()).thenReturn(mockKey);
    when(mockKey.estimatedSerializedSizeOf()).thenReturn(1000L);
    when(mockEdit.heapSize()).thenReturn(10000L);
    when(mockEdit.size()).thenReturn(0);
    ArrayList<Cell> cells = new ArrayList<>();
    KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("v1"));
    cells.add(kv);
    when(mockEdit.getCells()).thenReturn(cells);
    reader.addEntryToBatch(batch, mockEntry);
    reader.entryBatchQueue.put(batch);
    source.terminate("test");
    assertEquals(0, source.getSourceManager().getTotalBufferUsed().get());
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) ReplicationPeer(org.apache.hadoop.hbase.replication.ReplicationPeer) AtomicLong(java.util.concurrent.atomic.AtomicLong) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 4 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestReplicationWALEntryFilters method testSystemTableWALEntryFilter.

@Test
public void testSystemTableWALEntryFilter() {
    SystemTableWALEntryFilter filter = new SystemTableWALEntryFilter();
    // meta
    WALKeyImpl key1 = new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime());
    Entry metaEntry = new Entry(key1, null);
    assertNull(filter.filter(metaEntry));
    // user table
    WALKeyImpl key3 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"), EnvironmentEdgeManager.currentTime());
    Entry userEntry = new Entry(key3, null);
    assertEquals(userEntry, filter.filter(userEntry));
}
Also used : Entry(org.apache.hadoop.hbase.wal.WAL.Entry) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Example 5 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestBasicWALEntryStream method testWALKeySerialization.

@Test
public void testWALKeySerialization() throws Exception {
    Map<String, byte[]> attributes = new HashMap<String, byte[]>();
    attributes.put("foo", Bytes.toBytes("foo-value"));
    attributes.put("bar", Bytes.toBytes("bar-value"));
    WALKeyImpl key = new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), new ArrayList<UUID>(), 0L, 0L, mvcc, scopes, attributes);
    Assert.assertEquals(attributes, key.getExtendedAttributes());
    WALProtos.WALKey.Builder builder = key.getBuilder(WALCellCodec.getNoneCompressor());
    WALProtos.WALKey serializedKey = builder.build();
    WALKeyImpl deserializedKey = new WALKeyImpl();
    deserializedKey.readFieldsFromPb(serializedKey, WALCellCodec.getNoneUncompressor());
    // equals() only checks region name, sequence id and write time
    Assert.assertEquals(key, deserializedKey);
    // can't use Map.equals() because byte arrays use reference equality
    Assert.assertEquals(key.getExtendedAttributes().keySet(), deserializedKey.getExtendedAttributes().keySet());
    for (Map.Entry<String, byte[]> entry : deserializedKey.getExtendedAttributes().entrySet()) {
        Assert.assertArrayEquals(key.getExtendedAttribute(entry.getKey()), entry.getValue());
    }
    Assert.assertEquals(key.getReplicationScopes(), deserializedKey.getReplicationScopes());
}
Also used : HashMap(java.util.HashMap) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) UUID(java.util.UUID) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)59 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)44 Test (org.junit.Test)42 KeyValue (org.apache.hadoop.hbase.KeyValue)24 TreeMap (java.util.TreeMap)22 WAL (org.apache.hadoop.hbase.wal.WAL)20 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)17 Path (org.apache.hadoop.fs.Path)16 IOException (java.io.IOException)13 TableName (org.apache.hadoop.hbase.TableName)12 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)12 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)11 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)10 ArrayList (java.util.ArrayList)9 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)7 Configuration (org.apache.hadoop.conf.Configuration)7