use of com.twitter.distributedlog.annotations.DistributedLogAnnotations.FlakyTest in project distributedlog by twitter.
the class TestRollLogSegments method testCaughtUpReaderOnLogSegmentRolling.
@FlakyTest
@Test(timeout = 60000)
public void testCaughtUpReaderOnLogSegmentRolling() throws Exception {
String name = "distrlog-caughtup-reader-on-logsegment-rolling";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(4 * 1024 * 1024);
confLocal.setTraceReadAheadMetadataChanges(true);
confLocal.setEnsembleSize(1);
confLocal.setWriteQuorumSize(1);
confLocal.setAckQuorumSize(1);
confLocal.setReadLACLongPollTimeout(99999999);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKSyncLogWriter writer = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
// 1) writer added 5 entries.
final int numEntries = 5;
for (int i = 1; i <= numEntries; i++) {
writer.write(DLMTestUtil.getLogRecordInstance(i));
writer.setReadyToFlush();
writer.flushAndSync();
}
BKDistributedLogManager readDLM = (BKDistributedLogManager) createNewDLM(confLocal, name);
final BKAsyncLogReaderDLSN reader = (BKAsyncLogReaderDLSN) readDLM.getAsyncLogReader(DLSN.InitialDLSN);
// 2) reader should be able to read 5 entries.
for (long i = 1; i <= numEntries; i++) {
LogRecordWithDLSN record = Await.result(reader.readNext());
DLMTestUtil.verifyLogRecord(record);
assertEquals(i, record.getTransactionId());
assertEquals(record.getTransactionId() - 1, record.getSequenceId());
}
BKLogSegmentWriter perStreamWriter = writer.segmentWriter;
BookKeeperClient bkc = readDLM.getReaderBKC();
LedgerHandle readLh = bkc.get().openLedgerNoRecovery(getLedgerHandle(perStreamWriter).getId(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
// Writer moved to lac = 9, while reader knows lac = 8 and moving to wait on 9
checkAndWaitWriterReaderPosition(perStreamWriter, 9, reader, 9, readLh, 8);
// write 6th record
writer.write(DLMTestUtil.getLogRecordInstance(numEntries + 1));
writer.setReadyToFlush();
// Writer moved to lac = 10, while reader knows lac = 9 and moving to wait on 10
checkAndWaitWriterReaderPosition(perStreamWriter, 10, reader, 10, readLh, 9);
// write records without commit to simulate similar failure cases
writer.write(DLMTestUtil.getLogRecordInstance(numEntries + 2));
writer.setReadyToFlush();
// Writer moved to lac = 11, while reader knows lac = 10 and moving to wait on 11
checkAndWaitWriterReaderPosition(perStreamWriter, 11, reader, 11, readLh, 10);
while (null == reader.bkLedgerManager.readAheadWorker.getMetadataNotification()) {
Thread.sleep(1000);
}
logger.info("Waiting for long poll getting interrupted with metadata changed");
// simulate a recovery without closing ledger causing recording wrong last dlsn
BKLogWriteHandler writeHandler = writer.getCachedWriteHandler();
writeHandler.completeAndCloseLogSegment(writeHandler.inprogressZNodeName(perStreamWriter.getLogSegmentId(), perStreamWriter.getStartTxId(), perStreamWriter.getLogSegmentSequenceNumber()), perStreamWriter.getLogSegmentSequenceNumber(), perStreamWriter.getLogSegmentId(), perStreamWriter.getStartTxId(), perStreamWriter.getLastTxId(), perStreamWriter.getPositionWithinLogSegment() - 1, 9, 0);
BKSyncLogWriter anotherWriter = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
anotherWriter.write(DLMTestUtil.getLogRecordInstance(numEntries + 3));
anotherWriter.setReadyToFlush();
anotherWriter.flushAndSync();
anotherWriter.closeAndComplete();
for (long i = numEntries + 1; i <= numEntries + 3; i++) {
LogRecordWithDLSN record = Await.result(reader.readNext());
DLMTestUtil.verifyLogRecord(record);
assertEquals(i, record.getTransactionId());
}
Utils.close(reader);
readDLM.close();
}
Aggregations