Search in sources :

Example 1 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALRecordReader method setUp.

@Before
public void setUp() throws Exception {
    fs.delete(hbaseDir, true);
    walFs.delete(walRootDir, true);
    mvcc = new MultiVersionConcurrencyControl();
}
Also used : MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) Before(org.junit.Before)

Example 2 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class AbstractTestFSWAL method testFindMemStoresEligibleForFlush.

/**
 * On rolling a wal after reaching the threshold, {@link WAL#rollWriter()} returns the list of
 * regions which should be flushed in order to archive the oldest wal file.
 * <p>
 * This method tests this behavior by inserting edits and rolling the wal enough times to reach
 * the max number of logs threshold. It checks whether we get the "right regions and stores" for
 * flush on rolling the wal.
 * @throws Exception
 */
@Test
public void testFindMemStoresEligibleForFlush() throws Exception {
    LOG.debug("testFindMemStoresEligibleForFlush");
    Configuration conf1 = HBaseConfiguration.create(CONF);
    conf1.setInt("hbase.regionserver.maxlogs", 1);
    AbstractFSWAL<?> wal = newWAL(FS, CommonFSUtils.getWALRootDir(conf1), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
    String cf1 = "cf1";
    String cf2 = "cf2";
    String cf3 = "cf3";
    TableDescriptor t1 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t1")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build();
    TableDescriptor t2 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t2")).setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build();
    RegionInfo hri1 = RegionInfoBuilder.newBuilder(t1.getTableName()).build();
    RegionInfo hri2 = RegionInfoBuilder.newBuilder(t2.getTableName()).build();
    List<ColumnFamilyDescriptor> cfs = new ArrayList();
    cfs.add(ColumnFamilyDescriptorBuilder.of(cf1));
    cfs.add(ColumnFamilyDescriptorBuilder.of(cf2));
    TableDescriptor t3 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t3")).setColumnFamilies(cfs).build();
    RegionInfo hri3 = RegionInfoBuilder.newBuilder(t3.getTableName()).build();
    // add edits and roll the wal
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : t1.getColumnFamilyNames()) {
        scopes1.put(fam, 0);
    }
    NavigableMap<byte[], Integer> scopes2 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : t2.getColumnFamilyNames()) {
        scopes2.put(fam, 0);
    }
    NavigableMap<byte[], Integer> scopes3 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : t3.getColumnFamilyNames()) {
        scopes3.put(fam, 0);
    }
    try {
        addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
        wal.rollWriter();
        // add some more edits and roll the wal. This would reach the log number threshold
        addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
        wal.rollWriter();
        // with above rollWriter call, the max logs limit is reached.
        assertTrue(wal.getNumRolledLogFiles() == 2);
        // get the regions to flush; since there is only one region in the oldest wal, it should
        // return only one region.
        Map<byte[], List<byte[]>> regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(1, regionsToFlush.size());
        assertEquals(hri1.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]);
        // insert edits in second region
        addEdits(wal, hri2, t2, 2, mvcc, scopes2, cf1);
        // get the regions to flush, it should still read region1.
        regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(1, regionsToFlush.size());
        assertEquals(hri1.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]);
        // flush region 1, and roll the wal file. Only last wal which has entries for region1 should
        // remain.
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
        wal.rollWriter();
        // only one wal should remain now (that is for the second region).
        assertEquals(1, wal.getNumRolledLogFiles());
        // flush the second region
        flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getColumnFamilyNames());
        wal.rollWriter(true);
        // no wal should remain now.
        assertEquals(0, wal.getNumRolledLogFiles());
        // add edits both to region 1 and region 2, and roll.
        addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
        addEdits(wal, hri2, t2, 2, mvcc, scopes2, cf1);
        wal.rollWriter();
        // add edits and roll the writer, to reach the max logs limit.
        assertEquals(1, wal.getNumRolledLogFiles());
        addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
        wal.rollWriter();
        // it should return two regions to flush, as the oldest wal file has entries
        // for both regions.
        regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(2, regionsToFlush.size());
        // flush both regions
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
        flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getColumnFamilyNames());
        wal.rollWriter(true);
        assertEquals(0, wal.getNumRolledLogFiles());
        // Add an edit to region1, and roll the wal.
        addEdits(wal, hri1, t1, 2, mvcc, scopes1, cf1);
        // tests partial flush: roll on a partial flush, and ensure that wal is not archived.
        wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
        wal.rollWriter();
        wal.completeCacheFlush(hri1.getEncodedNameAsBytes(), HConstants.NO_SEQNUM);
        assertEquals(1, wal.getNumRolledLogFiles());
        // clear test data
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames());
        wal.rollWriter(true);
        // add edits for three familes
        addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf1);
        addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf2);
        addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf3);
        wal.rollWriter();
        addEdits(wal, hri3, t3, 2, mvcc, scopes3, cf1);
        wal.rollWriter();
        assertEquals(2, wal.getNumRolledLogFiles());
        // flush one family before archive oldest wal
        Set<byte[]> flushedFamilyNames = new HashSet<>();
        flushedFamilyNames.add(Bytes.toBytes(cf1));
        flushRegion(wal, hri3.getEncodedNameAsBytes(), flushedFamilyNames);
        regionsToFlush = wal.findRegionsToForceFlush();
        // then only two family need to be flushed when archive oldest wal
        assertEquals(1, regionsToFlush.size());
        assertEquals(hri3.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]);
        assertEquals(2, regionsToFlush.get(hri3.getEncodedNameAsBytes()).size());
    } finally {
        if (wal != null) {
            wal.close();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class AbstractTestWALReplay method test2727.

/**
 * Tests for hbase-2727.
 * @see <a href="https://issues.apache.org/jira/browse/HBASE-2727">HBASE-2727</a>
 */
@Test
public void test2727() throws Exception {
    // Test being able to have > 1 set of edits in the recovered.edits directory.
    // Ensure edits are replayed properly.
    final TableName tableName = TableName.valueOf("test2727");
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);
    TableDescriptor tableDescriptor = createBasic3FamilyHTD(tableName);
    Region region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, tableDescriptor);
    HBaseTestingUtil.closeRegionAndWAL(region2);
    final byte[] rowName = tableName.getName();
    WAL wal1 = createWAL(this.conf, hbaseRootDir, logName);
    // Add 1k to each family.
    final int countPerFamily = 1000;
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : tableDescriptor.getColumnFamilyNames()) {
        scopes.put(fam, 0);
    }
    for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) {
        addWALEdits(tableName, hri, rowName, familyDescriptor.getName(), countPerFamily, ee, wal1, mvcc, scopes);
    }
    wal1.shutdown();
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    // Add 1k to each family.
    for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) {
        addWALEdits(tableName, hri, rowName, familyDescriptor.getName(), countPerFamily, ee, wal2, mvcc, scopes);
    }
    wal2.shutdown();
    runWALSplit(this.conf);
    WAL wal3 = createWAL(this.conf, hbaseRootDir, logName);
    try {
        HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, tableDescriptor, wal3);
        long seqid = region.getOpenSeqNum();
        // The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1.
        // When opened, this region would apply 6k edits, and increment the sequenceId by 1
        assertTrue(seqid > mvcc.getWritePoint());
        assertEquals(seqid - 1, mvcc.getWritePoint());
        LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: " + mvcc.getReadPoint());
        // TODO: Scan all.
        region.close();
    } finally {
        wal3.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Test(org.junit.Test)

Example 4 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestFSHLog method testSyncRunnerIndexOverflow.

@Test
public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
    final String name = this.name.getMethodName();
    FSHLog log = new FSHLog(FS, CommonFSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null);
    log.init();
    try {
        Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
        ringBufferEventHandlerField.setAccessible(true);
        FSHLog.RingBufferEventHandler ringBufferEventHandler = (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
        Field syncRunnerIndexField = FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex");
        syncRunnerIndexField.setAccessible(true);
        syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
        TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
        NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (byte[] fam : htd.getColumnFamilyNames()) {
            scopes.put(fam, 0);
        }
        RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
        MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
        for (int i = 0; i < 10; i++) {
            addEdits(log, hri, htd, 1, mvcc, scopes, "row");
        }
    } finally {
        log.close();
    }
}
Also used : MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Field(java.lang.reflect.Field) Test(org.junit.Test)

Example 5 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestLogRollAbort method testLogRollAfterSplitStart.

/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test
public void testLogRollAfterSplitStart() throws IOException {
    LOG.info("Verify wal roll after split starts will fail.");
    String logName = ServerName.valueOf("testLogRollAfterSplitStart", 16010, EnvironmentEdgeManager.currentTime()).toString();
    Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
    final WALFactory wals = new WALFactory(conf, logName);
    try {
        // put some entries in an WAL
        TableName tableName = TableName.valueOf(this.getClass().getName());
        RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
        WAL log = wals.getWAL(regionInfo);
        MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
        int total = 20;
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
            NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
            scopes.put(Bytes.toBytes("column"), 0);
            log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs);
        }
        // Send the data to HDFS datanodes and close the HDFS writer
        log.sync();
        ((AbstractFSWAL<?>) log).replaceWriter(((FSHLog) log).getOldPath(), null, null);
        // code taken from MasterFileSystem.getLogDirs(), which is called from
        // MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process)
        // rename the directory so a rogue RS doesn't create more WALs
        Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
        if (!fs.rename(thisTestsDir, rsSplitDir)) {
            throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
        }
        LOG.debug("Renamed region directory: " + rsSplitDir);
        LOG.debug("Processing the old log files.");
        WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);
        LOG.debug("Trying to roll the WAL.");
        try {
            log.rollWriter();
            Assert.fail("rollWriter() did not throw any exception.");
        } catch (IOException ioe) {
            if (ioe.getCause() instanceof FileNotFoundException) {
                LOG.info("Got the expected exception: ", ioe.getCause());
            } else {
                Assert.fail("Unexpected exception: " + ioe);
            }
        }
    } finally {
        wals.close();
        if (fs.exists(thisTestsDir)) {
            fs.delete(thisTestsDir, true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) FileNotFoundException(java.io.FileNotFoundException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) TreeMap(java.util.TreeMap) TableName(org.apache.hadoop.hbase.TableName) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) Test(org.junit.Test)

Aggregations

MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)31 TreeMap (java.util.TreeMap)25 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)25 Path (org.apache.hadoop.fs.Path)20 Test (org.junit.Test)20 KeyValue (org.apache.hadoop.hbase.KeyValue)19 TableName (org.apache.hadoop.hbase.TableName)14 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)14 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)12 WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)12 WAL (org.apache.hadoop.hbase.wal.WAL)11 IOException (java.io.IOException)10 Cell (org.apache.hadoop.hbase.Cell)8 Configuration (org.apache.hadoop.conf.Configuration)7 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)5 List (java.util.List)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)4 Method (java.lang.reflect.Method)3