Search in sources :

Example 51 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class AbstractTestFSWAL method addEdits.

protected void addEdits(WAL log, RegionInfo hri, TableDescriptor htd, int times, MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes, String cf) throws IOException {
    final byte[] row = Bytes.toBytes(cf);
    for (int i = 0; i < times; i++) {
        long timestamp = EnvironmentEdgeManager.currentTime();
        WALEdit cols = new WALEdit();
        cols.add(new KeyValue(row, row, row, timestamp, row));
        WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), htd.getTableName(), SequenceId.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes);
        log.appendData(hri, key, cols);
    }
    log.sync();
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl)

Example 52 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestAsyncFSWALRollStuck method testRoll.

@Test
public void testRoll() throws Exception {
    byte[] row = Bytes.toBytes("family");
    WALEdit edit = new WALEdit();
    edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setFamily(row).setQualifier(row).setRow(row).setValue(row).setTimestamp(EnvironmentEdgeManager.currentTime()).setType(Type.Put).build());
    WALKeyImpl key1 = new WALKeyImpl(RI.getEncodedNameAsBytes(), TN, EnvironmentEdgeManager.currentTime(), MVCC);
    WAL.appendData(RI, key1, edit);
    WALKeyImpl key2 = new WALKeyImpl(RI.getEncodedNameAsBytes(), TN, key1.getWriteTime() + 1, MVCC);
    long txid = WAL.appendData(RI, key2, edit);
    // we need to make sure the two edits have both been added unackedAppends, so we have two syncs
    UTIL.waitFor(10000, () -> FUTURES.size() == 2);
    FUTURES.poll().completeExceptionally(new IOException("inject error"));
    FUTURES.poll().completeExceptionally(new IOException("inject error"));
    ARRIVE.await();
    // resume after 1 seconds, to give us enough time to enter the roll state
    EXECUTOR.schedule(() -> RESUME.countDown(), 1, TimeUnit.SECONDS);
    // let's roll the wal, before the fix in HBASE-25905, it will hang forever inside
    // waitForSafePoint
    WAL.rollWriter();
    // make sure we can finally succeed
    WAL.sync(txid);
}
Also used : WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) IOException(java.io.IOException) Test(org.junit.Test)

Example 53 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestMetricsWAL method testSlowAppend.

@Test
public void testSlowAppend() throws Exception {
    String testName = name.getMethodName();
    MetricsWALSource source = new MetricsWALSourceImpl(testName, testName, testName, testName);
    MetricsWAL metricsWAL = new MetricsWAL(source);
    TableName tableName = TableName.valueOf("foo");
    WALKey walKey = new WALKeyImpl(null, tableName, -1);
    // One not so slow append (< 1000)
    metricsWAL.postAppend(1, 900, walKey, null);
    // Two slow appends (> 1000)
    metricsWAL.postAppend(1, 1010, walKey, null);
    metricsWAL.postAppend(1, 2000, walKey, null);
    assertEquals(2, source.getSlowAppendCount());
}
Also used : WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Example 54 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplayTheLastFileIgnored.

@Test
public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        byte[][] columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]);
        assertEquals(0, region.getStoreFileList(columns).size());
        Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = null;
            if (i == maxSeqId) {
                edit = WALEdit.createCompaction(region.getRegionInfo(), CompactionDescriptor.newBuilder().setTableName(ByteString.copyFrom(tableName.getName())).setFamilyName(ByteString.copyFrom(regionName)).setEncodedRegionName(ByteString.copyFrom(regionName)).setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))).setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())).build());
            } else {
                edit = new WALEdit();
                edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            }
            writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        long recoverSeqId = 1030;
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        for (HStore store : region.getStores()) {
            maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), recoverSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        // assert that the files are flushed
        assertEquals(1, region.getStoreFileList(columns).size());
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) TreeMap(java.util.TreeMap) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 55 with WALKeyImpl

use of org.apache.hadoop.hbase.wal.WALKeyImpl in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplaySomeIgnored.

@Test
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = new WALEdit();
            edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        long recoverSeqId = 1030;
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (HStore store : region.getStores()) {
            maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), recoverSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        region.getMVCC().advanceTo(seqId);
        Get get = new Get(row);
        Result result = region.get(get);
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
            if (i < recoverSeqId) {
                assertEquals(0, kvs.size());
            } else {
                assertEquals(1, kvs.size());
                assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
            }
        }
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) TreeMap(java.util.TreeMap) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestHStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Aggregations

WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)59 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)44 Test (org.junit.Test)42 KeyValue (org.apache.hadoop.hbase.KeyValue)24 TreeMap (java.util.TreeMap)22 WAL (org.apache.hadoop.hbase.wal.WAL)20 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)17 Path (org.apache.hadoop.fs.Path)16 IOException (java.io.IOException)13 TableName (org.apache.hadoop.hbase.TableName)12 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)12 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)11 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)10 ArrayList (java.util.ArrayList)9 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)8 CompletableFuture (java.util.concurrent.CompletableFuture)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)7 Configuration (org.apache.hadoop.conf.Configuration)7