use of org.apache.hadoop.hbase.regionserver.HRegion.FlushResult in project hbase by apache.
the class RegionReplicationBufferManager method flush.
private void flush() {
long max = Long.MIN_VALUE;
HRegion toFlush = null;
for (HRegion region : rsServices.getRegions()) {
Optional<RegionReplicationSink> sink = region.getRegionReplicationSink();
if (sink.isPresent()) {
RegionReplicationSink s = sink.get();
long p = s.pendingSize();
if (p > max) {
max = p;
toFlush = region;
}
}
}
if (toFlush != null) {
// replication sink.
try {
LOG.info("Going to flush {} with {} pending entry size", toFlush.getRegionInfo(), StringUtils.TraditionalBinaryPrefix.long2String(max, "", 1));
FlushResult result = toFlush.flushcache(true, true, FlushLifeCycleTracker.DUMMY);
if (!result.isFlushSucceeded()) {
LOG.warn("Failed to flush {}, the result is {}", toFlush.getRegionInfo(), result.getResult());
}
} catch (IOException e) {
LOG.warn("Failed to flush {}", toFlush.getRegionInfo(), e);
}
} else {
// usually this should not happen but since the flush operation is async, theoretically it
// could happen. Let's log it, no real harm.
LOG.warn("Can not find a region to flush");
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion.FlushResult in project hbase by apache.
the class SingleProcessHBaseCluster method executeFlush.
private void executeFlush(HRegion region) throws IOException {
if (!RegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) {
return;
}
// retry 5 times if we can not flush
for (int i = 0; i < 5; i++) {
FlushResult result = region.flush(true);
if (result.getResult() != FlushResult.Result.CANNOT_FLUSH) {
return;
}
Threads.sleep(1000);
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion.FlushResult in project hbase by apache.
the class TestSplitWalDataLoss method test.
@Test
public void test() throws IOException, InterruptedException {
final HRegionServer rs = testUtil.getRSForFirstRegionInTable(tableName);
final HRegion region = (HRegion) rs.getRegions(tableName).get(0);
HRegion spiedRegion = spy(region);
final MutableBoolean flushed = new MutableBoolean(false);
final MutableBoolean reported = new MutableBoolean(false);
doAnswer(new Answer<FlushResult>() {
@Override
public FlushResult answer(InvocationOnMock invocation) throws Throwable {
synchronized (flushed) {
flushed.setValue(true);
flushed.notifyAll();
}
synchronized (reported) {
while (!reported.booleanValue()) {
reported.wait();
}
}
rs.getWAL(region.getRegionInfo()).abortCacheFlush(region.getRegionInfo().getEncodedNameAsBytes());
throw new DroppedSnapshotException("testcase");
}
}).when(spiedRegion).internalFlushCacheAndCommit(Matchers.<WAL>any(), Matchers.<MonitoredTask>any(), Matchers.<PrepareFlushResult>any(), Matchers.<Collection<HStore>>any());
// Find region key; don't pick up key for hbase:meta by mistake.
String key = null;
for (Map.Entry<String, HRegion> entry : rs.getOnlineRegions().entrySet()) {
if (entry.getValue().getRegionInfo().getTable().equals(this.tableName)) {
key = entry.getKey();
break;
}
}
rs.getOnlineRegions().put(key, spiedRegion);
Connection conn = testUtil.getConnection();
try (Table table = conn.getTable(tableName)) {
table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0")));
}
long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family);
LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore);
assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM);
rs.getMemStoreFlusher().requestFlush(spiedRegion, FlushLifeCycleTracker.DUMMY);
synchronized (flushed) {
while (!flushed.booleanValue()) {
flushed.wait();
}
}
try (Table table = conn.getTable(tableName)) {
table.put(new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, Bytes.toBytes("val1")));
}
long now = EnvironmentEdgeManager.currentTime();
rs.tryRegionServerReport(now - 500, now);
synchronized (reported) {
reported.setValue(true);
reported.notifyAll();
}
while (testUtil.getRSForFirstRegionInTable(tableName) == rs) {
Thread.sleep(100);
}
try (Table table = conn.getTable(tableName)) {
Result result = table.get(new Get(Bytes.toBytes("row0")));
assertArrayEquals(Bytes.toBytes("val0"), result.getValue(family, qualifier));
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion.FlushResult in project hbase by apache.
the class MemStoreFlusher method flushRegion.
/**
* Flush a region.
* @param region Region to flush.
* @param emergencyFlush Set if we are being force flushed. If true the region
* needs to be removed from the flush queue. If false, when we were called
* from the main flusher run loop and we got the entry to flush by calling
* poll on the flush queue (which removed it).
* @param families stores of region to flush.
* @return true if the region was successfully flushed, false otherwise. If
* false, there will be accompanying log messages explaining why the region was
* not flushed.
*/
private boolean flushRegion(HRegion region, boolean emergencyFlush, List<byte[]> families, FlushLifeCycleTracker tracker) {
synchronized (this.regionsInQueue) {
FlushRegionEntry fqe = this.regionsInQueue.remove(region);
// Use the start time of the FlushRegionEntry if available
if (fqe != null && emergencyFlush) {
// Need to remove from region from delay queue. When NOT an
// emergencyFlush, then item was removed via a flushQueue.poll.
flushQueue.remove(fqe);
}
}
tracker.beforeExecution();
lock.readLock().lock();
final CompactSplit compactSplitThread = server.getCompactSplitThread();
try {
notifyFlushRequest(region, emergencyFlush);
FlushResult flushResult = region.flushcache(families, false, tracker);
boolean shouldCompact = flushResult.isCompactionNeeded();
// We just want to check the size
boolean shouldSplit = region.checkSplit().isPresent();
if (shouldSplit) {
compactSplitThread.requestSplit(region);
} else if (shouldCompact) {
compactSplitThread.requestSystemCompaction(region, Thread.currentThread().getName());
}
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
// section, we get a DroppedSnapshotException and a replay of wal
// is required. Currently the only way to do this is a restart of
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
// where hdfs was bad but passed the hdfs check).
server.abort("Replay of WAL required. Forcing server shutdown", ex);
return false;
} catch (IOException ex) {
ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
LOG.error("Cache flush failed" + (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionInfo().getRegionName())) : ""), ex);
if (!server.checkFileSystem()) {
return false;
}
} finally {
lock.readLock().unlock();
wakeUpIfBlocking();
tracker.afterExecution();
}
return true;
}
use of org.apache.hadoop.hbase.regionserver.HRegion.FlushResult in project hbase by apache.
the class TestReplicateToReplica method testCatchUpWithCannotFlush.
@Test
public void testCatchUpWithCannotFlush() throws IOException, InterruptedException {
byte[] row = Bytes.toBytes(0);
primary.put(new Put(row).addColumn(FAMILY, QUAL, Bytes.toBytes(1)));
failOne();
verify(flushRequester, times(1)).requestFlush(any(), anyList(), any());
flushPrimary();
failAll();
Thread.sleep(2000);
// we will request flush the second time
verify(flushRequester, times(2)).requestFlush(any(), anyList(), any());
// we can not flush because no content in memstore
FlushResult result = flushPrimary();
assertEquals(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.getResult());
// the secondary replica does not have this row yet
assertFalse(secondary.get(new Get(row).setCheckExistenceOnly(true)).getExists().booleanValue());
// replicate the can not flush edit
replicateOne();
// we should have the row now
assertEquals(1, Bytes.toInt(secondary.get(new Get(row)).getValue(FAMILY, QUAL)));
}
Aggregations