use of org.apache.bookkeeper.bookie.Journal.LastLogMark in project bookkeeper by apache.
the class BookieJournalForceTest method testAckAfterSync.
@Test
public void testAckAfterSync() throws Exception {
File journalDir = tempDir.newFolder();
Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(journalDir));
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setJournalDirName(journalDir.getPath()).setZkServers(null);
JournalChannel jc = spy(new JournalChannel(journalDir, 1));
whenNew(JournalChannel.class).withAnyArguments().thenReturn(jc);
LedgerDirsManager ledgerDirsManager = mock(LedgerDirsManager.class);
Journal journal = new Journal(0, journalDir, conf, ledgerDirsManager);
// machinery to suspend ForceWriteThread
CountDownLatch forceWriteThreadSuspendedLatch = new CountDownLatch(1);
LinkedBlockingQueue<ForceWriteRequest> supportQueue = enableForceWriteThreadSuspension(forceWriteThreadSuspendedLatch, journal);
journal.start();
LogMark lastLogMarkBeforeWrite = journal.getLastLogMark().markLog().getCurMark();
CountDownLatch latch = new CountDownLatch(1);
long ledgerId = 1;
long entryId = 0;
journal.logAddEntry(ledgerId, entryId, DATA, false, /* ackBeforeSync */
new WriteCallback() {
@Override
public void writeComplete(int rc, long ledgerId, long entryId, BookieSocketAddress addr, Object ctx) {
latch.countDown();
}
}, null);
// wait that an entry is written to the ForceWriteThread queue
while (supportQueue.isEmpty()) {
Thread.sleep(100);
}
assertEquals(1, latch.getCount());
assertEquals(1, supportQueue.size());
// in constructor of JournalChannel we are calling forceWrite(true) but it is not tracked by PowerMock
// because the 'spy' is applied only on return from the constructor
verify(jc, times(0)).forceWrite(true);
// let ForceWriteThread work
forceWriteThreadSuspendedLatch.countDown();
// callback should complete now
assertTrue(latch.await(20, TimeUnit.SECONDS));
verify(jc, atLeast(1)).forceWrite(false);
assertEquals(0, supportQueue.size());
// verify that log marker advanced
LastLogMark lastLogMarkAfterForceWrite = journal.getLastLogMark();
assertTrue(lastLogMarkAfterForceWrite.getCurMark().compare(lastLogMarkBeforeWrite) > 0);
journal.shutdown();
}
use of org.apache.bookkeeper.bookie.Journal.LastLogMark in project bookkeeper by apache.
the class CheckpointOnNewLedgersTest method testCheckpoint.
@Test
public void testCheckpoint() throws Exception {
int entrySize = 1024;
long l1 = 1L;
long l2 = 2L;
final CountDownLatch writeL1Latch = new CountDownLatch(1);
Thread t1 = new Thread(() -> {
ByteBuf entry = createByteBuf(l1, 0L, entrySize);
try {
bookie.addEntry(entry, false, (rc, ledgerId, entryId, addr, ctx) -> writeL1Latch.countDown(), null, new byte[0]);
} catch (Exception e) {
log.info("Failed to write entry to l1", e);
}
}, "ledger-1-writer");
t1.start();
// wait until the ledger desc is opened
getLedgerDescCalledLatch.await();
LastLogMark logMark = bookie.journals.get(0).getLastLogMark().markLog();
// keep write entries to l2 to trigger entry log rolling to checkpoint
int numEntries = 10;
final CountDownLatch writeL2Latch = new CountDownLatch(numEntries);
for (int i = 0; i < numEntries; i++) {
ByteBuf entry = createByteBuf(l2, i, entrySize);
bookie.addEntry(entry, false, (rc, ledgerId, entryId, addr, ctx) -> writeL2Latch.countDown(), null, new byte[0]);
}
writeL2Latch.await();
// wait until checkpoint to complete and journal marker is rolled.
bookie.syncThread.getExecutor().submit(() -> {
}).get();
log.info("Wait until checkpoint is completed");
// the journal mark is rolled.
LastLogMark newLogMark = bookie.journals.get(0).getLastLogMark().markLog();
assertTrue(newLogMark.getCurMark().compare(logMark.getCurMark()) > 0);
// resume l1-writer to continue writing the entries
getLedgerDescWaitLatch.countDown();
// wait until the l1 entry is written
writeL1Latch.await();
t1.join();
// construct a new bookie to simulate "bookie restart from crash"
Bookie newBookie = new Bookie(conf);
newBookie.start();
for (int i = 0; i < numEntries; i++) {
ByteBuf entry = newBookie.readEntry(l2, i);
assertNotNull(entry);
assertEquals(l2, entry.readLong());
assertEquals((long) i, entry.readLong());
entry.release();
}
ByteBuf entry = newBookie.readEntry(l1, 0L);
assertNotNull(entry);
assertEquals(l1, entry.readLong());
assertEquals(0L, entry.readLong());
entry.release();
newBookie.shutdown();
}
use of org.apache.bookkeeper.bookie.Journal.LastLogMark in project bookkeeper by apache.
the class BookieJournalForceTest method testAckBeforeSyncWithJournalBufferedEntriesThreshold.
@Test
public void testAckBeforeSyncWithJournalBufferedEntriesThreshold() throws Exception {
File journalDir = tempDir.newFolder();
Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(journalDir));
final int journalBufferedEntriesThreshold = 10;
// sending a burst of entries, more than journalBufferedEntriesThreshold
final int numEntries = journalBufferedEntriesThreshold + 50;
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setJournalDirName(journalDir.getPath()).setJournalBufferedEntriesThreshold(journalBufferedEntriesThreshold).setZkServers(null);
JournalChannel jc = spy(new JournalChannel(journalDir, 1));
whenNew(JournalChannel.class).withAnyArguments().thenReturn(jc);
LedgerDirsManager ledgerDirsManager = mock(LedgerDirsManager.class);
Journal journal = new Journal(0, journalDir, conf, ledgerDirsManager);
// machinery to suspend ForceWriteThread
CountDownLatch forceWriteThreadSuspendedLatch = new CountDownLatch(1);
enableForceWriteThreadSuspension(forceWriteThreadSuspendedLatch, journal);
TestStatsProvider testStatsProvider = new TestStatsProvider();
Counter flushMaxOutstandingBytesCounter = testStatsProvider.getStatsLogger("test").getCounter("flushMaxOutstandingBytesCounter");
Whitebox.setInternalState(journal, "flushMaxOutstandingBytesCounter", flushMaxOutstandingBytesCounter);
journal.start();
LogMark lastLogMarkBeforeWrite = journal.getLastLogMark().markLog().getCurMark();
CountDownLatch latch = new CountDownLatch(numEntries);
long ledgerId = 1;
for (long entryId = 0; entryId < numEntries; entryId++) {
journal.logAddEntry(ledgerId, entryId, DATA, true, /* ackBeforeSync */
new WriteCallback() {
@Override
public void writeComplete(int rc, long ledgerId, long entryId, BookieSocketAddress addr, Object ctx) {
latch.countDown();
}
}, null);
}
// logAddEntry should complete even if ForceWriteThread is suspended
latch.await(20, TimeUnit.SECONDS);
// in constructor of JournalChannel we are calling forceWrite(true) but it is not tracked by PowerMock
// because the 'spy' is applied only on return from the constructor
verify(jc, times(0)).forceWrite(true);
// anyway we are never calling forceWrite
verify(jc, times(0)).forceWrite(false);
// verify that log marker did not advance
LastLogMark lastLogMarkAfterForceWrite = journal.getLastLogMark();
assertEquals(0, lastLogMarkAfterForceWrite.getCurMark().compare(lastLogMarkBeforeWrite));
// let the forceWriteThread exit
forceWriteThreadSuspendedLatch.countDown();
assertTrue(flushMaxOutstandingBytesCounter.get() > 1);
journal.shutdown();
}
use of org.apache.bookkeeper.bookie.Journal.LastLogMark in project bookkeeper by apache.
the class BookieJournalForceTest method testAckBeforeSync.
@Test
public void testAckBeforeSync() throws Exception {
File journalDir = tempDir.newFolder();
Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(journalDir));
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setJournalDirName(journalDir.getPath()).setZkServers(null);
JournalChannel jc = spy(new JournalChannel(journalDir, 1));
whenNew(JournalChannel.class).withAnyArguments().thenReturn(jc);
LedgerDirsManager ledgerDirsManager = mock(LedgerDirsManager.class);
Journal journal = new Journal(0, journalDir, conf, ledgerDirsManager);
// machinery to suspend ForceWriteThread
CountDownLatch forceWriteThreadSuspendedLatch = new CountDownLatch(1);
enableForceWriteThreadSuspension(forceWriteThreadSuspendedLatch, journal);
journal.start();
LogMark lastLogMarkBeforeWrite = journal.getLastLogMark().markLog().getCurMark();
CountDownLatch latch = new CountDownLatch(1);
long ledgerId = 1;
long entryId = 0;
journal.logAddEntry(ledgerId, entryId, DATA, true, /* ackBeforeSync */
new WriteCallback() {
@Override
public void writeComplete(int rc, long ledgerId, long entryId, BookieSocketAddress addr, Object ctx) {
latch.countDown();
}
}, null);
// logAddEntry should complete even if ForceWriteThread is suspended
latch.await(20, TimeUnit.SECONDS);
// in constructor of JournalChannel we are calling forceWrite(true) but it is not tracked by PowerMock
// because the 'spy' is applied only on return from the constructor
verify(jc, times(0)).forceWrite(true);
// we are never calling forceWrite
verify(jc, times(0)).forceWrite(false);
// verify that log marker did not advance
LastLogMark lastLogMarkAfterForceWrite = journal.getLastLogMark();
assertEquals(0, lastLogMarkAfterForceWrite.getCurMark().compare(lastLogMarkBeforeWrite));
// let the forceWriteThread exit
forceWriteThreadSuspendedLatch.countDown();
journal.shutdown();
}
use of org.apache.bookkeeper.bookie.Journal.LastLogMark in project bookkeeper by apache.
the class LedgerStorageCheckpointTest method testPeriodicCheckpointForLedgerStorage.
public void testPeriodicCheckpointForLedgerStorage(String ledgerStorageClassName) throws Exception {
File tmpDir = createTempDir("DiskCheck", "test");
final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration().setZkServers(zkUtil.getZooKeeperConnectString()).setZkTimeout(5000).setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() }).setAutoRecoveryDaemonEnabled(false).setFlushInterval(2000).setBookiePort(PortManager.nextFreePort()).setEntryLogPerLedgerEnabled(true).setLedgerStorageClass(ledgerStorageClassName);
Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
// we know there is only one ledgerDir
File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
BookieServer server = new BookieServer(conf);
server.start();
ClientConfiguration clientConf = new ClientConfiguration();
clientConf.setZkServers(zkUtil.getZooKeeperConnectString());
BookKeeper bkClient = new BookKeeper(clientConf);
int numOfLedgers = 2;
int numOfEntries = 5;
byte[] dataBytes = "data".getBytes();
for (int i = 0; i < numOfLedgers; i++) {
int ledgerIndex = i;
LedgerHandle handle = bkClient.createLedgerAdv((long) i, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
for (int j = 0; j < numOfEntries; j++) {
handle.addEntry(j, dataBytes);
}
handle.close();
}
LastLogMark lastLogMarkAfterFirstSetOfAdds = server.getBookie().journals.get(0).getLastLogMark();
LogMark curMarkAfterFirstSetOfAdds = lastLogMarkAfterFirstSetOfAdds.getCurMark();
File lastMarkFile = new File(ledgerDir, "lastMark");
// lastMark file should be zero, because checkpoint hasn't happenend
LogMark logMarkFileBeforeCheckpoint = readLastMarkFile(lastMarkFile);
Assert.assertEquals("lastMarkFile before checkpoint should be zero", 0, logMarkFileBeforeCheckpoint.compare(new LogMark()));
// wait for flushInterval for SyncThread to do next iteration of checkpoint
executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
/*
* since we have waited for more than flushInterval SyncThread should
* have checkpointed. if entrylogperledger is not enabled, then we
* checkpoint only when currentLog in EntryLogger is rotated. but if
* entrylogperledger is enabled, then we checkpoint for every
* flushInterval period
*/
Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened", lastMarkFile.exists());
LastLogMark lastLogMarkAfterCheckpoint = server.getBookie().journals.get(0).getLastLogMark();
LogMark curMarkAfterCheckpoint = lastLogMarkAfterCheckpoint.getCurMark();
LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0, rolledLogMark.compare(new LogMark()));
/*
* Curmark should be equal before and after checkpoint, because we didnt
* add new entries during this period
*/
Assert.assertTrue("Curmark should be equal before and after checkpoint", curMarkAfterCheckpoint.compare(curMarkAfterFirstSetOfAdds) == 0);
/*
* Curmark after checkpoint should be equal to rolled logmark, because
* we checkpointed
*/
Assert.assertTrue("Curmark after first set of adds should be equal to rolled logmark", curMarkAfterCheckpoint.compare(rolledLogMark) == 0);
// add more ledger/entries
for (int i = numOfLedgers; i < 2 * numOfLedgers; i++) {
int ledgerIndex = i;
LedgerHandle handle = bkClient.createLedgerAdv((long) i, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
for (int j = 0; j < numOfEntries; j++) {
handle.addEntry(j, dataBytes);
}
handle.close();
}
// wait for flushInterval for SyncThread to do next iteration of checkpoint
executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
LastLogMark lastLogMarkAfterSecondSetOfAdds = server.getBookie().journals.get(0).getLastLogMark();
LogMark curMarkAfterSecondSetOfAdds = lastLogMarkAfterSecondSetOfAdds.getCurMark();
rolledLogMark = readLastMarkFile(lastMarkFile);
/*
* Curmark after checkpoint should be equal to rolled logmark, because
* we checkpointed
*/
Assert.assertTrue("Curmark after second set of adds should be equal to rolled logmark", curMarkAfterSecondSetOfAdds.compare(rolledLogMark) == 0);
server.shutdown();
bkClient.close();
}
Aggregations