Search in sources :

Example 1 with FlushResult

use of org.apache.hadoop.hbase.regionserver.Region.FlushResult in project hbase by apache.

the class MemStoreFlusher method flushRegion.

/**
   * Flush a region.
   * @param region Region to flush.
   * @param emergencyFlush Set if we are being force flushed. If true the region
   * needs to be removed from the flush queue. If false, when we were called
   * from the main flusher run loop and we got the entry to flush by calling
   * poll on the flush queue (which removed it).
   * @param forceFlushAllStores whether we want to flush all store.
   * @return true if the region was successfully flushed, false otherwise. If
   * false, there will be accompanying log messages explaining why the region was
   * not flushed.
   */
private boolean flushRegion(final Region region, final boolean emergencyFlush, boolean forceFlushAllStores) {
    synchronized (this.regionsInQueue) {
        FlushRegionEntry fqe = this.regionsInQueue.remove(region);
        // Use the start time of the FlushRegionEntry if available
        if (fqe != null && emergencyFlush) {
            // Need to remove from region from delay queue.  When NOT an
            // emergencyFlush, then item was removed via a flushQueue.poll.
            flushQueue.remove(fqe);
        }
    }
    lock.readLock().lock();
    try {
        notifyFlushRequest(region, emergencyFlush);
        FlushResult flushResult = region.flush(forceFlushAllStores);
        boolean shouldCompact = flushResult.isCompactionNeeded();
        // We just want to check the size
        boolean shouldSplit = ((HRegion) region).checkSplit() != null;
        if (shouldSplit) {
            this.server.compactSplitThread.requestSplit(region);
        } else if (shouldCompact) {
            server.compactSplitThread.requestSystemCompaction(region, Thread.currentThread().getName());
        }
    } catch (DroppedSnapshotException ex) {
        // Cache flush can fail in a few places. If it fails in a critical
        // section, we get a DroppedSnapshotException and a replay of wal
        // is required. Currently the only way to do this is a restart of
        // the server. Abort because hdfs is probably bad (HBASE-644 is a case
        // where hdfs was bad but passed the hdfs check).
        server.abort("Replay of WAL required. Forcing server shutdown", ex);
        return false;
    } catch (IOException ex) {
        ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
        LOG.error("Cache flush failed" + (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionInfo().getRegionName())) : ""), ex);
        if (!server.checkFileSystem()) {
            return false;
        }
    } finally {
        lock.readLock().unlock();
        wakeUpIfBlocking();
    }
    return true;
}
Also used : DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) IOException(java.io.IOException) FlushResult(org.apache.hadoop.hbase.regionserver.Region.FlushResult) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 2 with FlushResult

use of org.apache.hadoop.hbase.regionserver.Region.FlushResult in project hbase by apache.

the class TestSplitWalDataLoss method test.

@Test
public void test() throws IOException, InterruptedException {
    final HRegionServer rs = testUtil.getRSForFirstRegionInTable(tableName);
    final HRegion region = (HRegion) rs.getOnlineRegions(tableName).get(0);
    HRegion spiedRegion = spy(region);
    final MutableBoolean flushed = new MutableBoolean(false);
    final MutableBoolean reported = new MutableBoolean(false);
    doAnswer(new Answer<FlushResult>() {

        @Override
        public FlushResult answer(InvocationOnMock invocation) throws Throwable {
            synchronized (flushed) {
                flushed.setValue(true);
                flushed.notifyAll();
            }
            synchronized (reported) {
                while (!reported.booleanValue()) {
                    reported.wait();
                }
            }
            rs.getWAL(region.getRegionInfo()).abortCacheFlush(region.getRegionInfo().getEncodedNameAsBytes());
            throw new DroppedSnapshotException("testcase");
        }
    }).when(spiedRegion).internalFlushCacheAndCommit(Matchers.<WAL>any(), Matchers.<MonitoredTask>any(), Matchers.<PrepareFlushResult>any(), Matchers.<Collection<Store>>any());
    // Find region key; don't pick up key for hbase:meta by mistake.
    String key = null;
    for (Map.Entry<String, Region> entry : rs.onlineRegions.entrySet()) {
        if (entry.getValue().getRegionInfo().getTable().equals(this.tableName)) {
            key = entry.getKey();
            break;
        }
    }
    rs.onlineRegions.put(key, spiedRegion);
    Connection conn = testUtil.getConnection();
    try (Table table = conn.getTable(tableName)) {
        table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0")));
    }
    long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family);
    LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore);
    assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM);
    rs.cacheFlusher.requestFlush(spiedRegion, false);
    synchronized (flushed) {
        while (!flushed.booleanValue()) {
            flushed.wait();
        }
    }
    try (Table table = conn.getTable(tableName)) {
        table.put(new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, Bytes.toBytes("val1")));
    }
    long now = EnvironmentEdgeManager.currentTime();
    rs.tryRegionServerReport(now - 500, now);
    synchronized (reported) {
        reported.setValue(true);
        reported.notifyAll();
    }
    while (testUtil.getRSForFirstRegionInTable(tableName) == rs) {
        Thread.sleep(100);
    }
    try (Table table = conn.getTable(tableName)) {
        Result result = table.get(new Get(Bytes.toBytes("row0")));
        assertArrayEquals(Bytes.toBytes("val0"), result.getValue(family, qualifier));
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) MutableBoolean(org.apache.commons.lang.mutable.MutableBoolean) Connection(org.apache.hadoop.hbase.client.Connection) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) FlushResult(org.apache.hadoop.hbase.regionserver.Region.FlushResult) Put(org.apache.hadoop.hbase.client.Put) PrepareFlushResult(org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult) Result(org.apache.hadoop.hbase.client.Result) FlushResult(org.apache.hadoop.hbase.regionserver.Region.FlushResult) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) Test(org.junit.Test)

Aggregations

DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)2 FlushResult (org.apache.hadoop.hbase.regionserver.Region.FlushResult)2 IOException (java.io.IOException)1 Map (java.util.Map)1 MutableBoolean (org.apache.commons.lang.mutable.MutableBoolean)1 Connection (org.apache.hadoop.hbase.client.Connection)1 Get (org.apache.hadoop.hbase.client.Get)1 Put (org.apache.hadoop.hbase.client.Put)1 Result (org.apache.hadoop.hbase.client.Result)1 Table (org.apache.hadoop.hbase.client.Table)1 PrepareFlushResult (org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 Test (org.junit.Test)1 InvocationOnMock (org.mockito.invocation.InvocationOnMock)1