use of com.twitter.distributedlog.exceptions.DLIllegalStateException in project distributedlog by twitter.
the class TestAsyncReaderWriter method testReadBrokenEntriesWithGapDetection.
@Test(timeout = 60000)
public void testReadBrokenEntriesWithGapDetection() throws Exception {
String name = runtime.getMethodName();
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(testConf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(true);
confLocal.setReadAheadWaitTime(10);
confLocal.setReadAheadBatchSize(1);
confLocal.setPositionGapDetectionEnabled(true);
confLocal.setReadAheadSkipBrokenEntries(true);
confLocal.setEIInjectReadAheadBrokenEntries(true);
DistributedLogManager dlm = createNewDLM(confLocal, name);
int numLogSegments = 1;
int numRecordsPerLogSegment = 100;
long txid = 1L;
txid = writeRecords(dlm, numLogSegments, numRecordsPerLogSegment, txid, false);
AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InvalidDLSN);
try {
// record in each ledger is discarded, for 30 - 3 = 27 records.
for (int i = 0; i < 30; i++) {
LogRecordWithDLSN record = Await.result(reader.readNext());
assertFalse(record.getDlsn().getEntryId() % 10 == 0);
}
fail("should have thrown");
} catch (DLIllegalStateException e) {
}
reader.asyncClose();
dlm.close();
}
use of com.twitter.distributedlog.exceptions.DLIllegalStateException in project distributedlog by twitter.
the class TestLogSegmentsZK method testCreateLogSegmentUnmatchMaxSequenceNumber.
/**
* Create Log Segment while max sequence number isn't match with list of log segments.
*/
@Test(timeout = 60000)
public void testCreateLogSegmentUnmatchMaxSequenceNumber() throws Exception {
URI uri = createURI();
String streamName = testName.getMethodName();
DistributedLogConfiguration conf = new DistributedLogConfiguration().setLockTimeout(99999).setOutputBufferSize(0).setImmediateFlushEnabled(true).setEnableLedgerAllocatorPool(true).setLedgerAllocatorPoolName("test");
BKDistributedLogNamespace namespace = BKDistributedLogNamespace.newBuilder().conf(conf).uri(uri).build();
namespace.createLog(streamName);
MaxLogSegmentSequenceNo max1 = getMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf);
assertEquals(DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO, max1.getSequenceNumber());
DistributedLogManager dlm = namespace.openLog(streamName);
final int numSegments = 3;
for (int i = 0; i < numSegments; i++) {
BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
out.write(DLMTestUtil.getLogRecordInstance(i));
out.closeAndComplete();
}
MaxLogSegmentSequenceNo max2 = getMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf);
assertEquals(3, max2.getSequenceNumber());
// update the max ledger sequence number
updateMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf, DLUtils.serializeLogSegmentSequenceNumber(99));
DistributedLogManager dlm1 = namespace.openLog(streamName);
try {
BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonPartitioned();
out1.write(DLMTestUtil.getLogRecordInstance(numSegments + 1));
out1.closeAndComplete();
fail("Should fail creating new log segment when encountered unmatch max ledger sequence number");
} catch (DLIllegalStateException lse) {
// expected
} finally {
dlm1.close();
}
DistributedLogManager dlm2 = namespace.openLog(streamName);
List<LogSegmentMetadata> segments = dlm2.getLogSegments();
try {
assertEquals(3, segments.size());
assertEquals(1L, segments.get(0).getLogSegmentSequenceNumber());
assertEquals(2L, segments.get(1).getLogSegmentSequenceNumber());
assertEquals(3L, segments.get(2).getLogSegmentSequenceNumber());
} finally {
dlm2.close();
}
dlm.close();
namespace.close();
}
use of com.twitter.distributedlog.exceptions.DLIllegalStateException in project distributedlog by twitter.
the class BKAsyncLogReaderDLSN method run.
@Override
public void run() {
synchronized (scheduleLock) {
if (scheduleDelayStopwatch.isRunning()) {
scheduleLatency.registerSuccessfulEvent(scheduleDelayStopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
}
Stopwatch runTime = Stopwatch.createStarted();
int iterations = 0;
long scheduleCountLocal = scheduleCount.get();
LOG.debug("{}: Scheduled Background Reader", bkLedgerManager.getFullyQualifiedName());
while (true) {
if (LOG.isTraceEnabled()) {
LOG.trace("{}: Executing Iteration: {}", bkLedgerManager.getFullyQualifiedName(), iterations++);
}
PendingReadRequest nextRequest = null;
synchronized (this) {
nextRequest = pendingRequests.peek();
// Queue is empty, nothing to read, return
if (null == nextRequest) {
LOG.trace("{}: Queue Empty waiting for Input", bkLedgerManager.getFullyQualifiedName());
scheduleCount.set(0);
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS));
return;
}
if (disableProcessingReadRequests) {
LOG.info("Reader of {} is forced to stop processing read requests", bkLedgerManager.getFullyQualifiedName());
return;
}
}
// know the last consumed read
if (null == lastException.get()) {
if (nextRequest.getPromise().isInterrupted().isDefined()) {
setLastException(new DLInterruptedException("Interrupted on reading " + bkLedgerManager.getFullyQualifiedName() + " : ", nextRequest.getPromise().isInterrupted().get()));
}
}
if (checkClosedOrInError("readNext")) {
if (!(lastException.get().getCause() instanceof LogNotFoundException)) {
LOG.warn("{}: Exception", bkLedgerManager.getFullyQualifiedName(), lastException.get());
}
backgroundReaderRunTime.registerFailedEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS));
return;
}
try {
// Fail 10% of the requests when asked to simulate errors
if (failureInjector.shouldInjectErrors()) {
throw new IOException("Reader Simulated Exception");
}
LogRecordWithDLSN record;
while (!nextRequest.hasReadEnoughRecords()) {
// read single record
do {
record = bkLedgerManager.getNextReadAheadRecord();
} while (null != record && (record.isControl() || (record.getDlsn().compareTo(getStartDLSN()) < 0)));
if (null == record) {
break;
} else {
if (record.isEndOfStream() && !returnEndOfStreamRecord) {
setLastException(new EndOfStreamException("End of Stream Reached for " + bkLedgerManager.getFullyQualifiedName()));
break;
}
// gap detection
if (recordPositionsContainsGap(record, lastPosition)) {
bkDistributedLogManager.raiseAlert("Gap detected between records at dlsn = {}", record.getDlsn());
if (positionGapDetectionEnabled) {
throw new DLIllegalStateException("Gap detected between records at dlsn = " + record.getDlsn());
}
}
lastPosition = record.getLastPositionWithinLogSegment();
nextRequest.addRecord(record);
}
}
;
} catch (IOException exc) {
setLastException(exc);
if (!(exc instanceof LogNotFoundException)) {
LOG.warn("{} : read with skip Exception", bkLedgerManager.getFullyQualifiedName(), lastException.get());
}
continue;
}
if (nextRequest.hasReadRecords()) {
long remainingWaitTime = nextRequest.getRemainingWaitTime();
if (remainingWaitTime > 0 && !nextRequest.hasReadEnoughRecords()) {
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS));
scheduleDelayStopwatch.reset().start();
scheduleCount.set(0);
// the request could still wait for more records
backgroundScheduleTask = executorService.schedule(BACKGROUND_READ_SCHEDULER, remainingWaitTime, nextRequest.deadlineTimeUnit);
return;
}
PendingReadRequest request = pendingRequests.poll();
if (null != request && nextRequest == request) {
request.complete();
if (null != backgroundScheduleTask) {
backgroundScheduleTask.cancel(true);
backgroundScheduleTask = null;
}
} else {
DLIllegalStateException ise = new DLIllegalStateException("Unexpected condition at dlsn = " + nextRequest.records.get(0).getDlsn());
nextRequest.setException(ise);
if (null != request) {
request.setException(ise);
}
// We should never get here as we should have exited the loop if
// pendingRequests were empty
bkDistributedLogManager.raiseAlert("Unexpected condition at dlsn = {}", nextRequest.records.get(0).getDlsn());
setLastException(ise);
}
} else {
if (0 == scheduleCountLocal) {
LOG.trace("Schedule count dropping to zero", lastException.get());
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS));
return;
}
scheduleCountLocal = scheduleCount.decrementAndGet();
}
}
}
}
use of com.twitter.distributedlog.exceptions.DLIllegalStateException in project distributedlog by twitter.
the class TestZKTransaction method testAbortTransaction.
@Test(timeout = 60000)
public void testAbortTransaction() throws Exception {
ZooKeeperClient zkc = mock(ZooKeeperClient.class);
ZKTransaction transaction = new ZKTransaction(zkc);
int numOps = 3;
final CountDownLatch commitLatch = new CountDownLatch(numOps);
final CountDownLatch abortLatch = new CountDownLatch(numOps);
for (int i = 0; i < numOps; i++) {
transaction.addOp(new CountDownZKOp(commitLatch, abortLatch));
}
transaction.abort(new DLIllegalStateException("Illegal State"));
abortLatch.await();
assertEquals(0, abortLatch.getCount());
assertEquals(numOps, commitLatch.getCount());
}
use of com.twitter.distributedlog.exceptions.DLIllegalStateException in project distributedlog by twitter.
the class BKLogWriteHandler method assignLogSegmentSequenceNumber.
protected long assignLogSegmentSequenceNumber() throws IOException {
// For any active stream we will always make sure that there is at least one
// active ledger (except when the stream first starts out). Therefore when we
// see no ledger metadata for a stream, we assume that this is the first ledger
// in the stream
long logSegmentSeqNo = DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO;
boolean logSegmentsFound = false;
if (LogSegmentMetadata.supportsLogSegmentSequenceNo(conf.getDLLedgerMetadataLayoutVersion())) {
List<LogSegmentMetadata> ledgerListDesc = getFilteredLedgerListDesc(false, false);
Long nextLogSegmentSeqNo = DLUtils.nextLogSegmentSequenceNumber(ledgerListDesc);
if (null == nextLogSegmentSeqNo) {
logSegmentsFound = false;
// we don't find last assigned log segment sequence number
// then we start the log segment with configured FirstLogSegmentSequenceNumber.
logSegmentSeqNo = conf.getFirstLogSegmentSequenceNumber();
} else {
logSegmentsFound = true;
// latest log segment is assigned with a sequence number, start with next sequence number
logSegmentSeqNo = nextLogSegmentSeqNo;
}
}
// the maximum log segment sequence number is "UNASSIGNED".
if (!logSegmentsFound && (DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO == maxLogSegmentSequenceNo.getSequenceNumber())) {
// no ledger seqno stored in /ledgers before
LOG.info("No max ledger sequence number found while creating log segment {} for {}.", logSegmentSeqNo, getFullyQualifiedName());
} else if (maxLogSegmentSequenceNo.getSequenceNumber() + 1 != logSegmentSeqNo) {
LOG.warn("Unexpected max log segment sequence number {} for {} : list of cached segments = {}", new Object[] { maxLogSegmentSequenceNo.getSequenceNumber(), getFullyQualifiedName(), getCachedLogSegments(LogSegmentMetadata.DESC_COMPARATOR) });
// there is max log segment number recorded there and it isn't match. throw exception.
throw new DLIllegalStateException("Unexpected max log segment sequence number " + maxLogSegmentSequenceNo.getSequenceNumber() + " for " + getFullyQualifiedName() + ", expected " + (logSegmentSeqNo - 1));
}
return logSegmentSeqNo;
}
Aggregations