Search in sources :

Example 46 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class AbstractTestFSWAL method addEdits.

protected void addEdits(WAL log, HRegionInfo hri, HTableDescriptor htd, int times, MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes) throws IOException {
    final byte[] row = Bytes.toBytes("row");
    for (int i = 0; i < times; i++) {
        long timestamp = System.currentTimeMillis();
        WALEdit cols = new WALEdit();
        cols.add(new KeyValue(row, row, row, timestamp, row));
        WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(), WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes);
        log.append(hri, key, cols, true);
    }
    log.sync();
}
Also used : WALKey(org.apache.hadoop.hbase.wal.WALKey) KeyValue(org.apache.hadoop.hbase.KeyValue)

Example 47 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class AbstractTestFSWAL method testFlushSequenceIdIsGreaterThanAllEditsInHFile.

/**
   * Test flush for sure has a sequence id that is beyond the last edit appended. We do this by
   * slowing appends in the background ring buffer thread while in foreground we call flush. The
   * addition of the sync over HRegion in flush should fix an issue where flush was returning before
   * all of its appends had made it out to the WAL (HBASE-11109).
   * @throws IOException
   * @see <a href="https://issues.apache.org/jira/browse/HBASE-11109">HBASE-11109</a>
   */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
    String testName = currentTest.getMethodName();
    final TableName tableName = TableName.valueOf(testName);
    final HRegionInfo hri = new HRegionInfo(tableName);
    final byte[] rowName = tableName.getName();
    final HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("f"));
    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(), TEST_UTIL.getConfiguration(), htd);
    HBaseTestingUtility.closeRegionAndWAL(r);
    final int countPerFamily = 10;
    final AtomicBoolean goslow = new AtomicBoolean(false);
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    // subclass and doctor a method.
    AbstractFSWAL<?> wal = newSlowWAL(FS, FSUtils.getWALRootDir(CONF), DIR.toString(), testName, CONF, null, true, null, null, new Runnable() {

        @Override
        public void run() {
            if (goslow.get()) {
                Threads.sleep(100);
                LOG.debug("Sleeping before appending 100ms");
            }
        }
    });
    HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
    EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
    try {
        List<Put> puts = null;
        for (HColumnDescriptor hcd : htd.getFamilies()) {
            puts = TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
        }
        // Now assert edits made it in.
        final Get g = new Get(rowName);
        Result result = region.get(g);
        assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
        // Construct a WALEdit and add it a few times to the WAL.
        WALEdit edits = new WALEdit();
        for (Put p : puts) {
            CellScanner cs = p.cellScanner();
            while (cs.advance()) {
                edits.add(cs.current());
            }
        }
        // Add any old cluster id.
        List<UUID> clusterIds = new ArrayList<>(1);
        clusterIds.add(UUID.randomUUID());
        // Now make appends run slow.
        goslow.set(true);
        for (int i = 0; i < countPerFamily; i++) {
            final HRegionInfo info = region.getRegionInfo();
            final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes);
            wal.append(info, logkey, edits, true);
            region.getMVCC().completeAndWait(logkey.getWriteEntry());
        }
        region.flush(true);
        // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
        long currentSequenceId = region.getReadPoint(null);
        // Now release the appends
        goslow.set(false);
        assertTrue(currentSequenceId >= region.getReadPoint(null));
    } finally {
        region.close(true);
        wal.close();
    }
}
Also used : ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) UUID(java.util.UUID) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) EnvironmentEdge(org.apache.hadoop.hbase.util.EnvironmentEdge) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 48 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestFSHLog method testUnflushedSeqIdTracking.

/**
   * Test case for https://issues.apache.org/jira/browse/HBASE-16721
   */
@Test(timeout = 30000)
public void testUnflushedSeqIdTracking() throws IOException, InterruptedException {
    final String name = this.name.getMethodName();
    final byte[] b = Bytes.toBytes("b");
    final AtomicBoolean startHoldingForAppend = new AtomicBoolean(false);
    final CountDownLatch holdAppend = new CountDownLatch(1);
    final CountDownLatch flushFinished = new CountDownLatch(1);
    final CountDownLatch putFinished = new CountDownLatch(1);
    try (FSHLog log = new FSHLog(FS, FSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null)) {
        log.registerWALActionsListener(new WALActionsListener.Base() {

            @Override
            public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) throws IOException {
                if (startHoldingForAppend.get()) {
                    try {
                        holdAppend.await();
                    } catch (InterruptedException e) {
                        LOG.error(e);
                    }
                }
            }
        });
        // open a new region which uses this WAL
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.name.getMethodName())).addFamily(new HColumnDescriptor(b));
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
        final HRegion region = TEST_UTIL.createLocalHRegion(hri, htd, log);
        ExecutorService exec = Executors.newFixedThreadPool(2);
        // do a regular write first because of memstore size calculation.
        region.put(new Put(b).addColumn(b, b, b));
        startHoldingForAppend.set(true);
        exec.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    region.put(new Put(b).addColumn(b, b, b));
                    putFinished.countDown();
                } catch (IOException e) {
                    LOG.error(e);
                }
            }
        });
        // give the put a chance to start
        Threads.sleep(3000);
        exec.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    Region.FlushResult flushResult = region.flush(true);
                    LOG.info("Flush result:" + flushResult.getResult());
                    LOG.info("Flush succeeded:" + flushResult.isFlushSucceeded());
                    flushFinished.countDown();
                } catch (IOException e) {
                    LOG.error(e);
                }
            }
        });
        // give the flush a chance to start. Flush should have got the region lock, and
        // should have been waiting on the mvcc complete after this.
        Threads.sleep(3000);
        // let the append to WAL go through now that the flush already started
        holdAppend.countDown();
        putFinished.await();
        flushFinished.await();
        // check whether flush went through
        assertEquals("Region did not flush?", 1, region.getStoreFileList(new byte[][] { b }).size());
        // now check the region's unflushed seqIds.
        long seqId = log.getEarliestMemstoreSeqNum(hri.getEncodedNameAsBytes());
        assertEquals("Found seqId for the region which is already flushed", HConstants.NO_SEQNUM, seqId);
        region.close();
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) WALKey(org.apache.hadoop.hbase.wal.WALKey) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ExecutorService(java.util.concurrent.ExecutorService) Test(org.junit.Test)

Example 49 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class WALUtil method doFullAppendTransaction.

/**
   * A 'full' WAL transaction involves starting an mvcc transaction followed by an append,
   * an optional sync, and then a call to complete the mvcc transaction. This method does it all.
   * Good for case of adding a single edit or marker to the WAL.
   *
   * <p>This write is for internal use only. Not for external client consumption.
   * @return WALKey that was added to the WAL.
   */
public static WALKey doFullAppendTransaction(final WAL wal, final NavigableMap<byte[], Integer> replicationScope, final HRegionInfo hri, final WALEdit edit, final MultiVersionConcurrencyControl mvcc, final boolean sync) throws IOException {
    // TODO: Pass in current time to use?
    WALKey walKey = new WALKey(hri.getEncodedNameAsBytes(), hri.getTable(), System.currentTimeMillis(), mvcc, replicationScope);
    long trx = MultiVersionConcurrencyControl.NONE;
    try {
        trx = wal.append(hri, walKey, edit, false);
        if (sync) {
            wal.sync(trx);
        }
        // Call complete only here because these are markers only. They are not for clients to read.
        mvcc.complete(walKey.getWriteEntry());
    } catch (IOException ioe) {
        mvcc.complete(walKey.getWriteEntry());
        throw ioe;
    }
    return walKey;
}
Also used : WALKey(org.apache.hadoop.hbase.wal.WALKey) IOException(java.io.IOException)

Example 50 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestWALLockup method testLockup16960.

/**
   * Reproduce locking up that happens when there's no further syncs after
   * append fails, and causing an isolated sync then infinite wait. See
   * HBASE-16960. If below is broken, we will see this test timeout because it
   * is locked up.
   * <p/>
   * Steps for reproduce:<br/>
   * 1. Trigger server abort through dodgyWAL1<br/>
   * 2. Add a {@link DummyWALActionsListener} to dodgyWAL2 to cause ringbuffer
   * event handler thread sleep for a while thus keeping {@code endOfBatch}
   * false<br/>
   * 3. Publish a sync then an append which will throw exception, check whether
   * the sync could return
   */
@Test(timeout = 20000)
public void testLockup16960() throws IOException {
    // A WAL that we can have throw exceptions when a flag is set.
    class DodgyFSLog extends FSHLog {

        // Set this when want the WAL to start throwing exceptions.
        volatile boolean throwException = false;

        public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) throws IOException {
            super(fs, root, logDir, conf);
        }

        @Override
        protected Writer createWriterInstance(Path path) throws IOException {
            final Writer w = super.createWriterInstance(path);
            return new Writer() {

                @Override
                public void close() throws IOException {
                    w.close();
                }

                @Override
                public void sync() throws IOException {
                    if (throwException) {
                        throw new IOException("FAKE! Failed to replace a bad datanode...SYNC");
                    }
                    w.sync();
                }

                @Override
                public void append(Entry entry) throws IOException {
                    if (throwException) {
                        throw new IOException("FAKE! Failed to replace a bad datanode...APPEND");
                    }
                    w.append(entry);
                }

                @Override
                public long getLength() {
                    return w.getLength();
                }
            };
        }

        @Override
        protected long doReplaceWriter(Path oldPath, Path newPath, Writer nextWriter) throws IOException {
            if (throwException) {
                throw new FailedLogCloseException("oldPath=" + oldPath + ", newPath=" + newPath);
            }
            long oldFileLen = 0L;
            oldFileLen = super.doReplaceWriter(oldPath, newPath, nextWriter);
            return oldFileLen;
        }
    }
    // Mocked up server and regionserver services. Needed below.
    Server server = new DummyServer(CONF, ServerName.valueOf("hostname1.example.org", 1234, 1L).toString());
    RegionServerServices services = Mockito.mock(RegionServerServices.class);
    CONF.setLong("hbase.regionserver.hlog.sync.timeout", 10000);
    // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL,
    // go ahead with test.
    FileSystem fs = FileSystem.get(CONF);
    Path rootDir = new Path(dir + getName());
    DodgyFSLog dodgyWAL1 = new DodgyFSLog(fs, rootDir, getName(), CONF);
    Path rootDir2 = new Path(dir + getName() + "2");
    final DodgyFSLog dodgyWAL2 = new DodgyFSLog(fs, rootDir2, getName() + "2", CONF);
    // Add a listener to force ringbuffer event handler sleep for a while
    dodgyWAL2.registerWALActionsListener(new DummyWALActionsListener());
    // I need a log roller running.
    LogRoller logRoller = new LogRoller(server, services);
    logRoller.addWAL(dodgyWAL1);
    logRoller.addWAL(dodgyWAL2);
    // There is no 'stop' once a logRoller is running.. it just dies.
    logRoller.start();
    // Now get a region and start adding in edits.
    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
    final HRegion region = initHRegion(tableName, null, null, dodgyWAL1);
    byte[] bytes = Bytes.toBytes(getName());
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    scopes.put(COLUMN_FAMILY_BYTES, 0);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    try {
        Put put = new Put(bytes);
        put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
        WALKey key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(), mvcc, scopes);
        WALEdit edit = new WALEdit();
        CellScanner CellScanner = put.cellScanner();
        assertTrue(CellScanner.advance());
        edit.add(CellScanner.current());
        LOG.info("SET throwing of exception on append");
        dodgyWAL1.throwException = true;
        // This append provokes a WAL roll request
        dodgyWAL1.append(region.getRegionInfo(), key, edit, true);
        boolean exception = false;
        try {
            dodgyWAL1.sync();
        } catch (Exception e) {
            exception = true;
        }
        assertTrue("Did not get sync exception", exception);
        // cause server abort.
        try {
            // wait LogRoller exit.
            Thread.sleep(50);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        final CountDownLatch latch = new CountDownLatch(1);
        // make RingBufferEventHandler sleep 1s, so the following sync
        // endOfBatch=false
        key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), TableName.valueOf("sleep"), System.currentTimeMillis(), mvcc, scopes);
        dodgyWAL2.append(region.getRegionInfo(), key, edit, true);
        Thread t = new Thread("Sync") {

            public void run() {
                try {
                    dodgyWAL2.sync();
                } catch (IOException e) {
                    LOG.info("In sync", e);
                }
                latch.countDown();
                LOG.info("Sync exiting");
            }

            ;
        };
        t.setDaemon(true);
        t.start();
        try {
            // make sure sync have published.
            Thread.sleep(100);
        } catch (InterruptedException e1) {
            e1.printStackTrace();
        }
        // make append throw DamagedWALException
        key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), TableName.valueOf("DamagedWALException"), System.currentTimeMillis(), mvcc, scopes);
        dodgyWAL2.append(region.getRegionInfo(), key, edit, true);
        while (latch.getCount() > 0) {
            Threads.sleep(100);
        }
        assertTrue(server.isAborted());
    } finally {
        if (logRoller != null) {
            logRoller.close();
        }
        try {
            if (region != null) {
                region.close();
            }
            if (dodgyWAL1 != null) {
                dodgyWAL1.close();
            }
            if (dodgyWAL2 != null) {
                dodgyWAL2.close();
            }
        } catch (Exception e) {
            LOG.info("On way out", e);
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.hbase.Server) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) CellScanner(org.apache.hadoop.hbase.CellScanner) FSHLog(org.apache.hadoop.hbase.regionserver.wal.FSHLog) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) TreeMap(java.util.TreeMap) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) DamagedWALException(org.apache.hadoop.hbase.regionserver.wal.DamagedWALException) IOException(java.io.IOException) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Test(org.junit.Test)

Aggregations

WALKey (org.apache.hadoop.hbase.wal.WALKey)51 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)29 Test (org.junit.Test)26 WAL (org.apache.hadoop.hbase.wal.WAL)22 TreeMap (java.util.TreeMap)17 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)17 KeyValue (org.apache.hadoop.hbase.KeyValue)16 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 IOException (java.io.IOException)14 Path (org.apache.hadoop.fs.Path)14 TableName (org.apache.hadoop.hbase.TableName)12 ArrayList (java.util.ArrayList)10 Cell (org.apache.hadoop.hbase.Cell)10 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)10 FileSystem (org.apache.hadoop.fs.FileSystem)9 Get (org.apache.hadoop.hbase.client.Get)9 Result (org.apache.hadoop.hbase.client.Result)9 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)8 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)8 Put (org.apache.hadoop.hbase.client.Put)7