use of org.apache.distributedlog.exceptions.DLIllegalStateException in project bookkeeper by apache.
the class ReadAheadEntryReader method unsafeReinitializeLogSegments.
/**
* Reinitialize the log segments.
*/
private void unsafeReinitializeLogSegments(List<LogSegmentMetadata> segments) {
logger.info("Reinitialize log segments with {}", segments);
int segmentIdx = 0;
for (; segmentIdx < segments.size(); segmentIdx++) {
LogSegmentMetadata segment = segments.get(segmentIdx);
if (segment.getLogSegmentSequenceNumber() < currentSegmentSequenceNumber) {
continue;
}
break;
}
if (segmentIdx >= segments.size()) {
return;
}
LogSegmentMetadata segment = segments.get(segmentIdx);
if (null != currentSegmentReader) {
if (!updateLogSegmentMetadata(currentSegmentReader, segment)) {
return;
}
} else {
if (currentSegmentSequenceNumber != segment.getLogSegmentSequenceNumber()) {
logger.error("Inconsistent state found in entry reader for {} : " + "current segment sn = {}, new segment sn = {}", new Object[] { streamName, currentSegmentSequenceNumber, segment.getLogSegmentSequenceNumber() });
setLastException(new DLIllegalStateException("Inconsistent state found in entry reader for " + streamName + " : current segment sn = " + currentSegmentSequenceNumber + ", new segment sn = " + segment.getLogSegmentSequenceNumber()));
return;
}
}
segmentIdx++;
if (segmentIdx >= segments.size()) {
return;
}
// check next segment
segment = segments.get(segmentIdx);
if (null != nextSegmentReader) {
if (!updateLogSegmentMetadata(nextSegmentReader, segment)) {
return;
}
segmentIdx++;
}
// check the segment readers in the queue
for (int readerIdx = 0; readerIdx < segmentReaders.size() && segmentIdx < segments.size(); readerIdx++, segmentIdx++) {
SegmentReader reader = segmentReaders.get(readerIdx);
segment = segments.get(segmentIdx);
if (!updateLogSegmentMetadata(reader, segment)) {
return;
}
}
// add the remaining segments to the reader queue
for (; segmentIdx < segments.size(); segmentIdx++) {
segment = segments.get(segmentIdx);
SegmentReader reader = new SegmentReader(segment, 0L);
reader.openReader();
segmentReaders.add(reader);
}
if (null == currentSegmentReader) {
unsafeMoveToNextLogSegment();
}
// resume readahead if necessary
invokeReadAhead();
}
use of org.apache.distributedlog.exceptions.DLIllegalStateException in project bookkeeper by apache.
the class ReadAheadEntryReader method updateLogSegmentMetadata.
/**
* Update the log segment metadata.
*
* @param reader the reader to update the metadata
* @param newMetadata the new metadata received
* @return true if successfully, false on encountering errors
*/
private boolean updateLogSegmentMetadata(SegmentReader reader, LogSegmentMetadata newMetadata) {
if (reader.getSegment().getLogSegmentSequenceNumber() != newMetadata.getLogSegmentSequenceNumber()) {
logger.error("Inconsistent state found in entry reader for {} : " + "current segment = {}, new segment = {}", new Object[] { streamName, reader.getSegment(), newMetadata });
setLastException(new DLIllegalStateException("Inconsistent state found in entry reader for " + streamName + " : current segment = " + reader.getSegment() + ", new segment = " + newMetadata));
return false;
}
if (!reader.getSegment().isInProgress() && newMetadata.isInProgress()) {
setLastException(new DLIllegalStateException("An inprogress log segment " + newMetadata + " received after a closed log segment " + reader.getSegment() + " on reading segment " + newMetadata.getLogSegmentSequenceNumber() + " @ stream " + streamName));
return false;
}
if (reader.getSegment().isInProgress() && !newMetadata.isInProgress()) {
reader.updateLogSegmentMetadata(newMetadata);
}
return true;
}
use of org.apache.distributedlog.exceptions.DLIllegalStateException in project bookkeeper by apache.
the class TestBKDistributedLogManager method deleteDuringRead.
@Test(timeout = 60000)
public void deleteDuringRead() throws Exception {
String name = "distrlog-delete-with-reader";
DistributedLogManager dlm = createNewDLM(conf, name);
long txid = 1;
for (long i = 0; i < 3; i++) {
long start = txid;
BKSyncLogWriter writer = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
for (long j = 1; j <= DEFAULT_SEGMENT_SIZE; j++) {
writer.write(DLMTestUtil.getLogRecordInstance(txid++));
}
BKLogSegmentWriter perStreamLogWriter = writer.getCachedLogWriter();
writer.closeAndComplete();
BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true);
assertNotNull(zkc.exists(blplm.completedLedgerZNode(start, txid - 1, perStreamLogWriter.getLogSegmentSequenceNumber()), false));
Utils.ioResult(blplm.asyncClose());
}
LogReader reader = dlm.getInputStream(1);
LogRecord record = reader.readNext(false);
assert (null != record);
DLMTestUtil.verifyLogRecord(record);
long lastTxId = record.getTransactionId();
dlm.delete();
boolean exceptionEncountered;
try {
record = reader.readNext(false);
while (null != record) {
DLMTestUtil.verifyLogRecord(record);
assert (lastTxId < record.getTransactionId());
lastTxId = record.getTransactionId();
record = reader.readNext(false);
}
// make sure the exception is thrown from readahead
while (true) {
reader.readNext(false);
}
} catch (LogReadException | LogNotFoundException | DLIllegalStateException e) {
exceptionEncountered = true;
}
assertTrue(exceptionEncountered);
reader.close();
}
use of org.apache.distributedlog.exceptions.DLIllegalStateException in project bookkeeper by apache.
the class TestLogSegmentsZK method testCreateLogSegmentUnmatchMaxSequenceNumber.
/**
* Create Log Segment while max sequence number isn't match with list of log segments.
*/
@Test(timeout = 60000)
public void testCreateLogSegmentUnmatchMaxSequenceNumber() throws Exception {
URI uri = createURI();
String streamName = testName.getMethodName();
DistributedLogConfiguration conf = new DistributedLogConfiguration().setLockTimeout(99999).setOutputBufferSize(0).setImmediateFlushEnabled(true).setEnableLedgerAllocatorPool(true).setLedgerAllocatorPoolName("test");
Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build();
namespace.createLog(streamName);
MaxLogSegmentSequenceNo max1 = getMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf);
assertEquals(DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO, max1.getSequenceNumber());
DistributedLogManager dlm = namespace.openLog(streamName);
final int numSegments = 3;
for (int i = 0; i < numSegments; i++) {
BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
out.write(DLMTestUtil.getLogRecordInstance(i));
out.closeAndComplete();
}
MaxLogSegmentSequenceNo max2 = getMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf);
assertEquals(3, max2.getSequenceNumber());
// update the max ledger sequence number
updateMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf, DLUtils.serializeLogSegmentSequenceNumber(99));
DistributedLogManager dlm1 = namespace.openLog(streamName);
try {
BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonPartitioned();
out1.write(DLMTestUtil.getLogRecordInstance(numSegments + 1));
out1.closeAndComplete();
fail("Should fail creating new log segment when encountered unmatch max ledger sequence number");
} catch (DLIllegalStateException lse) {
// expected
} finally {
dlm1.close();
}
DistributedLogManager dlm2 = namespace.openLog(streamName);
List<LogSegmentMetadata> segments = dlm2.getLogSegments();
try {
assertEquals(3, segments.size());
assertEquals(1L, segments.get(0).getLogSegmentSequenceNumber());
assertEquals(2L, segments.get(1).getLogSegmentSequenceNumber());
assertEquals(3L, segments.get(2).getLogSegmentSequenceNumber());
} finally {
dlm2.close();
}
dlm.close();
namespace.close();
}
use of org.apache.distributedlog.exceptions.DLIllegalStateException in project bookkeeper by apache.
the class BKAsyncLogReader method safeRun.
@Override
public void safeRun() {
synchronized (scheduleLock) {
if (scheduleDelayStopwatch.isRunning()) {
scheduleLatency.registerSuccessfulEvent(scheduleDelayStopwatch.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
}
Stopwatch runTime = Stopwatch.createStarted();
int iterations = 0;
long scheduleCountLocal = scheduleCountUpdater.get(this);
LOG.debug("{}: Scheduled Background Reader", readHandler.getFullyQualifiedName());
while (true) {
if (LOG.isTraceEnabled()) {
LOG.trace("{}: Executing Iteration: {}", readHandler.getFullyQualifiedName(), iterations++);
}
PendingReadRequest nextRequest = null;
synchronized (this) {
nextRequest = pendingRequests.peek();
// Queue is empty, nothing to read, return
if (null == nextRequest) {
LOG.trace("{}: Queue Empty waiting for Input", readHandler.getFullyQualifiedName());
scheduleCountUpdater.set(this, 0);
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
return;
}
if (disableProcessingReadRequests) {
LOG.info("Reader of {} is forced to stop processing read requests", readHandler.getFullyQualifiedName());
return;
}
}
lastProcessTime.reset().start();
// know the last consumed read
if (null == lastExceptionUpdater.get(this)) {
if (nextRequest.getPromise().isCancelled()) {
setLastException(new DLInterruptedException("Interrupted on reading " + readHandler.getFullyQualifiedName()));
}
}
if (checkClosedOrInError("readNext")) {
Throwable lastException = lastExceptionUpdater.get(this);
if (lastException != null && !(lastException.getCause() instanceof LogNotFoundException)) {
LOG.warn("{}: Exception", readHandler.getFullyQualifiedName(), lastException);
}
backgroundReaderRunTime.registerFailedEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
return;
}
try {
// Fail 10% of the requests when asked to simulate errors
if (bkDistributedLogManager.getFailureInjector().shouldInjectErrors()) {
throw new IOException("Reader Simulated Exception");
}
LogRecordWithDLSN record;
while (!nextRequest.hasReadEnoughRecords()) {
// read single record
do {
record = readNextRecord();
} while (null != record && (record.isControl() || (record.getDlsn().compareTo(getStartDLSN()) < 0)));
if (null == record) {
break;
} else {
if (record.isEndOfStream() && !returnEndOfStreamRecord) {
setLastException(new EndOfStreamException("End of Stream Reached for " + readHandler.getFullyQualifiedName()));
break;
}
// gap detection
if (recordPositionsContainsGap(record, lastPosition)) {
bkDistributedLogManager.raiseAlert("Gap detected between records at record = {}", record);
if (positionGapDetectionEnabled) {
throw new DLIllegalStateException("Gap detected between records at record = " + record);
}
}
lastPosition = record.getLastPositionWithinLogSegment();
nextRequest.addRecord(record);
}
}
} catch (IOException exc) {
setLastException(exc);
if (!(exc instanceof LogNotFoundException)) {
LOG.warn("{} : read with skip Exception", readHandler.getFullyQualifiedName(), lastExceptionUpdater.get(this));
}
continue;
}
if (nextRequest.hasReadRecords()) {
long remainingWaitTime = nextRequest.getRemainingWaitTime();
if (remainingWaitTime > 0 && !nextRequest.hasReadEnoughRecords()) {
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
scheduleDelayStopwatch.reset().start();
scheduleCountUpdater.set(this, 0);
// the request could still wait for more records
backgroundScheduleTask = scheduler.scheduleOrdered(streamName, BACKGROUND_READ_SCHEDULER, remainingWaitTime, nextRequest.deadlineTimeUnit);
return;
}
PendingReadRequest request = pendingRequests.poll();
if (null != request && nextRequest == request) {
request.complete();
if (null != backgroundScheduleTask) {
backgroundScheduleTask.cancel(true);
backgroundScheduleTask = null;
}
} else {
DLIllegalStateException ise = new DLIllegalStateException("Unexpected condition at dlsn = " + nextRequest.records.get(0).getDlsn());
nextRequest.completeExceptionally(ise);
if (null != request) {
request.completeExceptionally(ise);
}
// We should never get here as we should have exited the loop if
// pendingRequests were empty
bkDistributedLogManager.raiseAlert("Unexpected condition at dlsn = {}", nextRequest.records.get(0).getDlsn());
setLastException(ise);
}
} else {
if (0 == scheduleCountLocal) {
LOG.trace("Schedule count dropping to zero", lastExceptionUpdater.get(this));
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
return;
}
scheduleCountLocal = scheduleCountUpdater.decrementAndGet(this);
}
}
}
}
Aggregations