Search in sources :

Example 16 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALFactory method testAppendClose.

/*
   * We pass different values to recoverFileLease() so that different code paths are covered
   *
   * For this test to pass, requires:
   * 1. HDFS-200 (append support)
   * 2. HDFS-988 (SafeMode should freeze file operations
   *              [FSNamesystem.nextGenerationStampForBlock])
   * 3. HDFS-142 (on restart, maintain pendingCreates)
   */
@Test(timeout = 300000)
public void testAppendClose() throws Exception {
    TableName tableName = TableName.valueOf(currentTest.getMethodName());
    HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
    final WAL wal = wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
    final int total = 20;
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(tableName.getName()));
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    for (int i = 0; i < total; i++) {
        WALEdit kvs = new WALEdit();
        kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
        wal.append(regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), kvs, true);
    }
    // Now call sync to send the data to HDFS datanodes
    wal.sync();
    int namenodePort = cluster.getNameNodePort();
    final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal);
    // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
    try {
        DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
        dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
        TEST_UTIL.shutdownMiniDFSCluster();
        try {
            // wal.writer.close() will throw an exception,
            // but still call this since it closes the LogSyncer thread first
            wal.shutdown();
        } catch (IOException e) {
            LOG.info(e);
        }
        // closing FS last so DFSOutputStream can't call close
        fs.close();
        LOG.info("STOPPED first instance of the cluster");
    } finally {
        // Restart the cluster
        while (cluster.isClusterUp()) {
            LOG.error("Waiting for cluster to go down");
            Thread.sleep(1000);
        }
        assertFalse(cluster.isClusterUp());
        cluster = null;
        for (int i = 0; i < 100; i++) {
            try {
                cluster = TEST_UTIL.startMiniDFSClusterForTestWAL(namenodePort);
                break;
            } catch (BindException e) {
                LOG.info("Sleeping.  BindException bringing up new cluster");
                Threads.sleep(1000);
            }
        }
        cluster.waitActive();
        fs = cluster.getFileSystem();
        LOG.info("STARTED second instance.");
    }
    // set the lease period to be 1 second so that the
    // namenode triggers lease recovery upon append request
    Method setLeasePeriod = cluster.getClass().getDeclaredMethod("setLeasePeriod", new Class[] { Long.TYPE, Long.TYPE });
    setLeasePeriod.setAccessible(true);
    setLeasePeriod.invoke(cluster, 1000L, 1000L);
    try {
        Thread.sleep(1000);
    } catch (InterruptedException e) {
        LOG.info(e);
    }
    // Now try recovering the log, like the HMaster would do
    final FileSystem recoveredFs = fs;
    final Configuration rlConf = conf;
    class RecoverLogThread extends Thread {

        public Exception exception = null;

        public void run() {
            try {
                FSUtils.getInstance(fs, rlConf).recoverFileLease(recoveredFs, walPath, rlConf, null);
            } catch (IOException e) {
                exception = e;
            }
        }
    }
    RecoverLogThread t = new RecoverLogThread();
    t.start();
    // Timeout after 60 sec. Without correct patches, would be an infinite loop
    t.join(60 * 1000);
    if (t.isAlive()) {
        t.interrupt();
        throw new Exception("Timed out waiting for WAL.recoverLog()");
    }
    if (t.exception != null)
        throw t.exception;
    // Make sure you can read all the content
    WAL.Reader reader = wals.createReader(fs, walPath);
    int count = 0;
    WAL.Entry entry = new WAL.Entry();
    while (reader.next(entry) != null) {
        count++;
        assertTrue("Should be one KeyValue per WALEdit", entry.getEdit().getCells().size() == 1);
    }
    assertEquals(total, count);
    reader.close();
    // Reset the lease period
    setLeasePeriod.invoke(cluster, new Object[] { new Long(60000), new Long(3600000) });
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Path(org.apache.hadoop.fs.Path) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) BindException(java.net.BindException) IOException(java.io.IOException) Method(java.lang.reflect.Method) TreeMap(java.util.TreeMap) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BindException(java.net.BindException) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 17 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALFactory method testVisitors.

/**
   * Test that we can visit entries before they are appended
   * @throws Exception
   */
@Test
public void testVisitors() throws Exception {
    final int COL_COUNT = 10;
    final TableName tableName = TableName.valueOf(currentTest.getMethodName());
    final byte[] row = Bytes.toBytes("row");
    final DumbWALActionsListener visitor = new DumbWALActionsListener();
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
    long timestamp = System.currentTimeMillis();
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("column"));
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    final WAL log = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
    log.registerWALActionsListener(visitor);
    for (int i = 0; i < COL_COUNT; i++) {
        WALEdit cols = new WALEdit();
        cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') }));
        log.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), cols, true);
    }
    log.sync();
    assertEquals(COL_COUNT, visitor.increments);
    log.unregisterWALActionsListener(visitor);
    WALEdit cols = new WALEdit();
    cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(11)), timestamp, new byte[] { (byte) (11 + '0') }));
    log.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), cols, true);
    log.sync();
    assertEquals(COL_COUNT, visitor.increments);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Test(org.junit.Test)

Example 18 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALFactory method testEditAdd.

/**
   * Tests that we can write out an edit, close, and then read it back in again.
   * @throws IOException
   */
@Test
public void testEditAdd() throws IOException {
    final int COL_COUNT = 10;
    final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(currentTest.getMethodName())).addFamily(new HColumnDescriptor("column"));
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    final byte[] row = Bytes.toBytes("row");
    WAL.Reader reader = null;
    try {
        final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
        // Write columns named 1, 2, 3, etc. and then values of single byte
        // 1, 2, 3...
        long timestamp = System.currentTimeMillis();
        WALEdit cols = new WALEdit();
        for (int i = 0; i < COL_COUNT; i++) {
            cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') }));
        }
        HRegionInfo info = new HRegionInfo(htd.getTableName(), row, Bytes.toBytes(Bytes.toString(row) + "1"), false);
        final WAL log = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
        final long txid = log.append(info, new WALKey(info.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(), mvcc, scopes), cols, true);
        log.sync(txid);
        log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getFamiliesKeys());
        log.completeCacheFlush(info.getEncodedNameAsBytes());
        log.shutdown();
        Path filename = AbstractFSWALProvider.getCurrentFileName(log);
        // Now open a reader on the log and assert append worked.
        reader = wals.createReader(fs, filename);
        // entry in the below... thats why we have '1'.
        for (int i = 0; i < 1; i++) {
            WAL.Entry entry = reader.next(null);
            if (entry == null)
                break;
            WALKey key = entry.getKey();
            WALEdit val = entry.getEdit();
            assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
            assertTrue(htd.getTableName().equals(key.getTablename()));
            Cell cell = val.getCells().get(0);
            assertTrue(Bytes.equals(row, 0, row.length, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
            assertEquals((byte) (i + '0'), CellUtil.cloneValue(cell)[0]);
            System.out.println(key + " " + val);
        }
    } finally {
        if (reader != null) {
            reader.close();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 19 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestFSHLogProvider method setUp.

@Before
public void setUp() throws Exception {
    mvcc = new MultiVersionConcurrencyControl();
    FileStatus[] entries = fs.listStatus(new Path("/"));
    for (FileStatus dir : entries) {
        fs.delete(dir.getPath(), true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) Before(org.junit.Before)

Example 20 with MultiVersionConcurrencyControl

use of org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl in project hbase by apache.

the class TestWALObserver method testWALCoprocessorReplay.

/**
   * Test WAL replay behavior with WALObserver.
   */
@Test
public void testWALCoprocessorReplay() throws Exception {
    // WAL replay is handled at HRegion::replayRecoveredEdits(), which is
    // ultimately called by HRegion::initialize()
    final TableName tableName = TableName.valueOf(currentTest.getMethodName());
    final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    // final HRegionInfo hri =
    // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
    // final HRegionInfo hri1 =
    // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
    final HRegionInfo hri = new HRegionInfo(tableName, null, null);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    fs.mkdirs(new Path(basedir, hri.getEncodedName()));
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    // WAL wal = new WAL(this.fs, this.dir, this.oldLogDir, this.conf);
    WAL wal = wals.getWAL(UNSPECIFIED_REGION, null);
    // Put p = creatPutWith2Families(TEST_ROW);
    WALEdit edit = new WALEdit();
    long now = EnvironmentEdgeManager.currentTime();
    final int countPerFamily = 1000;
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        scopes.put(hcd.getName(), 0);
    }
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily, EnvironmentEdgeManager.getDelegate(), wal, scopes, mvcc);
    }
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);
    // sync to fs.
    wal.sync();
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Path p = runWALSplit(newConf);
            LOG.info("WALSplit path == " + p);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            final WALFactory wals2 = new WALFactory(conf, null, ServerName.valueOf(currentTest.getMethodName() + "2", 16010, System.currentTimeMillis()).toString());
            WAL wal2 = wals2.getWAL(UNSPECIFIED_REGION, null);
            ;
            HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
            long seqid2 = region.getOpenSeqNum();
            SampleRegionWALObserver cp2 = (SampleRegionWALObserver) region.getCoprocessorHost().findCoprocessor(SampleRegionWALObserver.class.getName());
            // TODO: asserting here is problematic.
            assertNotNull(cp2);
            assertTrue(cp2.isPreWALRestoreCalled());
            assertTrue(cp2.isPostWALRestoreCalled());
            region.close();
            wals2.close();
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) TreeMap(java.util.TreeMap) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Test(org.junit.Test)

Aggregations

MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)22 TreeMap (java.util.TreeMap)20 Test (org.junit.Test)17 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)16 Path (org.apache.hadoop.fs.Path)14 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)14 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)13 KeyValue (org.apache.hadoop.hbase.KeyValue)12 TableName (org.apache.hadoop.hbase.TableName)9 WAL (org.apache.hadoop.hbase.wal.WAL)9 WALKey (org.apache.hadoop.hbase.wal.WALKey)8 Configuration (org.apache.hadoop.conf.Configuration)5 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)5 IOException (java.io.IOException)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Cell (org.apache.hadoop.hbase.Cell)4 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 Get (org.apache.hadoop.hbase.client.Get)3