use of org.apache.hadoop.hbase.replication.regionserver.WALEntryStream.WALEntryStreamRuntimeException in project hbase by apache.
the class ReplicationSourceWALReaderThread method run.
@Override
public void run() {
int sleepMultiplier = 1;
while (isReaderRunning()) {
// we only loop back here if something fatal happened to our stream
try (WALEntryStream entryStream = new WALEntryStream(logQueue, fs, conf, currentPosition, metrics)) {
while (isReaderRunning()) {
// loop here to keep reusing stream while we can
if (!checkQuota()) {
continue;
}
WALEntryBatch batch = null;
while (entryStream.hasNext()) {
if (batch == null) {
batch = new WALEntryBatch(replicationBatchCountCapacity, entryStream.getCurrentPath());
}
Entry entry = entryStream.next();
if (updateSerialReplPos(batch, entry)) {
batch.lastWalPosition = entryStream.getPosition();
break;
}
entry = filterEntry(entry);
if (entry != null) {
WALEdit edit = entry.getEdit();
if (edit != null && !edit.isEmpty()) {
long entrySize = getEntrySize(entry);
batch.addEntry(entry);
updateBatchStats(batch, entry, entryStream.getPosition(), entrySize);
boolean totalBufferTooLarge = acquireBufferQuota(entrySize);
// Stop if too many entries or too big
if (totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity || batch.getNbEntries() >= replicationBatchCountCapacity) {
break;
}
}
}
}
if (batch != null && (!batch.getLastSeqIds().isEmpty() || batch.getNbEntries() > 0)) {
if (LOG.isTraceEnabled()) {
LOG.trace(String.format("Read %s WAL entries eligible for replication", batch.getNbEntries()));
}
entryBatchQueue.put(batch);
sleepMultiplier = 1;
} else {
// got no entries and didn't advance position in WAL
LOG.trace("Didn't read any new entries from WAL");
if (replicationQueueInfo.isQueueRecovered()) {
// we're done with queue recovery, shut ourself down
setReaderRunning(false);
// shuts down shipper thread immediately
entryBatchQueue.put(batch != null ? batch : new WALEntryBatch(replicationBatchCountCapacity, entryStream.getCurrentPath()));
} else {
Thread.sleep(sleepForRetries);
}
}
currentPosition = entryStream.getPosition();
// reuse stream
entryStream.reset();
}
} catch (IOException | WALEntryStreamRuntimeException e) {
// stream related
if (sleepMultiplier < maxRetriesMultiplier) {
LOG.debug("Failed to read stream of replication entries: " + e);
sleepMultiplier++;
} else {
LOG.error("Failed to read stream of replication entries", e);
}
Threads.sleep(sleepForRetries * sleepMultiplier);
} catch (InterruptedException e) {
LOG.trace("Interrupted while sleeping between WAL reads");
Thread.currentThread().interrupt();
}
}
}
Aggregations