Search in sources :

Example 6 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALActionsListener method testActionListener.

/**
   * Add a bunch of dummy data and roll the logs every two insert. We
   * should end up with 10 rolled files (plus the roll called in
   * the constructor). Also test adding a listener while it's running.
   */
@Test
public void testActionListener() throws Exception {
    DummyWALActionsListener observer = new DummyWALActionsListener();
    List<WALActionsListener> list = new ArrayList<>(1);
    list.add(observer);
    final WALFactory wals = new WALFactory(conf, list, "testActionListener");
    DummyWALActionsListener laterobserver = new DummyWALActionsListener();
    HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES), SOME_BYTES, SOME_BYTES, false);
    final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    for (int i = 0; i < 20; i++) {
        byte[] b = Bytes.toBytes(i + "");
        KeyValue kv = new KeyValue(b, b, b);
        WALEdit edit = new WALEdit();
        edit.add(kv);
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES));
        htd.addFamily(new HColumnDescriptor(b));
        NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (byte[] fam : htd.getFamiliesKeys()) {
            scopes.put(fam, 0);
        }
        final long txid = wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit, true);
        wal.sync(txid);
        if (i == 10) {
            wal.registerWALActionsListener(laterobserver);
        }
        if (i % 2 == 0) {
            wal.rollWriter();
        }
    }
    wal.close();
    assertEquals(11, observer.preLogRollCounter);
    assertEquals(11, observer.postLogRollCounter);
    assertEquals(5, laterobserver.preLogRollCounter);
    assertEquals(5, laterobserver.postLogRollCounter);
    assertEquals(1, observer.closedCount);
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Test(org.junit.Test)

Example 7 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALRecordReader method setUp.

@Before
public void setUp() throws Exception {
    fs.delete(hbaseDir, true);
    walFs.delete(walRootDir, true);
    mvcc = new MultiVersionConcurrencyControl();
}
Also used : MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) Before(org.junit.Before)

Example 8 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALObserver method testEmptyWALEditAreNotSeen.

/**
   * Coprocessors shouldn't get notice of empty waledits.
   */
@Test
public void testEmptyWALEditAreNotSeen() throws Exception {
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
    final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    WAL log = wals.getWAL(UNSPECIFIED_REGION, null);
    try {
        SampleRegionWALObserver cp = getCoprocessor(log, SampleRegionWALObserver.class);
        cp.setTestValues(TEST_TABLE, null, null, null, null, null, null, null);
        assertFalse(cp.isPreWALWriteCalled());
        assertFalse(cp.isPostWALWriteCalled());
        final long now = EnvironmentEdgeManager.currentTime();
        long txid = log.append(hri, new WALKey(hri.getEncodedNameAsBytes(), hri.getTable(), now, mvcc, scopes), new WALEdit(), true);
        log.sync(txid);
        assertFalse("Empty WALEdit should skip coprocessor evaluation.", cp.isPreWALWriteCalled());
        assertFalse("Empty WALEdit should skip coprocessor evaluation.", cp.isPostWALWriteCalled());
    } finally {
        log.close();
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 9 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestReplicationSmallTests method testReplicationInReplay.

/**
   *  Test for HBase-15259 WALEdits under replay will also be replicated
   * */
@Test
public void testReplicationInReplay() throws Exception {
    final TableName tableName = htable1.getName();
    HRegion region = utility1.getMiniHBaseCluster().getRegions(tableName).get(0);
    HRegionInfo hri = region.getRegionInfo();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htable1.getTableDescriptor().getFamiliesKeys()) {
        scopes.put(fam, 1);
    }
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    int index = utility1.getMiniHBaseCluster().getServerWith(hri.getRegionName());
    WAL wal = utility1.getMiniHBaseCluster().getRegionServer(index).getWAL(region.getRegionInfo());
    final byte[] rowName = Bytes.toBytes("testReplicationInReplay");
    final byte[] qualifier = Bytes.toBytes("q");
    final byte[] value = Bytes.toBytes("v");
    WALEdit edit = new WALEdit(true);
    long now = EnvironmentEdgeManager.currentTime();
    edit.add(new KeyValue(rowName, famName, qualifier, now, value));
    WALKey walKey = new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes);
    wal.append(hri, walKey, edit, true);
    wal.sync();
    Get get = new Get(rowName);
    for (int i = 0; i < NB_RETRIES; i++) {
        if (i == NB_RETRIES - 1) {
            break;
        }
        Result res = htable2.get(get);
        if (res.size() >= 1) {
            fail("Not supposed to be replicated for " + Bytes.toString(res.getRow()));
        } else {
            LOG.info("Row not replicated, let's wait a bit more...");
            Thread.sleep(SLEEP_TIME);
        }
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 10 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALReaderOnSecureWAL method writeWAL.

@SuppressWarnings("deprecation")
private Path writeWAL(final WALFactory wals, final String tblName, boolean offheap) throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
    conf.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, SecureWALCellCodec.class, WALCellCodec.class);
    try {
        TableName tableName = TableName.valueOf(tblName);
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(tableName.getName()));
        NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (byte[] fam : htd.getFamiliesKeys()) {
            scopes.put(fam, 0);
        }
        HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
        final int total = 10;
        final byte[] row = Bytes.toBytes("row");
        final byte[] family = Bytes.toBytes("family");
        final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
        // Write the WAL
        WAL wal = wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            KeyValue kv = new KeyValue(row, family, Bytes.toBytes(i), value);
            if (offheap) {
                ByteBuffer bb = ByteBuffer.allocateDirect(kv.getBuffer().length);
                bb.put(kv.getBuffer());
                ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(bb, 0, kv.getLength());
                kvs.add(offheapKV);
            } else {
                kvs.add(kv);
            }
            wal.append(regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), kvs, true);
        }
        wal.sync();
        final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal);
        wal.shutdown();
        return walPath;
    } finally {
        // restore the cell codec class
        conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, clsName);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ByteBufferKeyValue(org.apache.hadoop.hbase.ByteBufferKeyValue) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) ByteBuffer(java.nio.ByteBuffer) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ByteBufferKeyValue(org.apache.hadoop.hbase.ByteBufferKeyValue) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) SecureWALCellCodec(org.apache.hadoop.hbase.regionserver.wal.SecureWALCellCodec) WALCellCodec(org.apache.hadoop.hbase.regionserver.wal.WALCellCodec)

Aggregations

MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)22 TreeMap (java.util.TreeMap)20 Test (org.junit.Test)17 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)16 Path (org.apache.hadoop.fs.Path)14 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)14 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)13 KeyValue (org.apache.hadoop.hbase.KeyValue)12 TableName (org.apache.hadoop.hbase.TableName)9 WAL (org.apache.hadoop.hbase.wal.WAL)9 WALKey (org.apache.hadoop.hbase.wal.WALKey)8 Configuration (org.apache.hadoop.conf.Configuration)5 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)5 IOException (java.io.IOException)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Cell (org.apache.hadoop.hbase.Cell)4 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 Get (org.apache.hadoop.hbase.client.Get)3