use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class BookieJournalNoSyncTest method testWriteToJournal.
@Test
public void testWriteToJournal() throws Exception {
LedgerHandle lh = bkc.createLedger(1, 1, DigestType.CRC32, new byte[0]);
int n = 10;
long ledgerId = lh.getId();
for (int i = 0; i < n; i++) {
lh.addEntry(("entry-" + i).getBytes());
}
restartBookies();
LedgerHandle readLh = bkc.openLedger(ledgerId, DigestType.CRC32, new byte[0]);
Enumeration<LedgerEntry> entries = readLh.readEntries(0, n - 1);
for (int i = 0; i < n; i++) {
LedgerEntry entry = entries.nextElement();
assertEquals("entry-" + i, new String(entry.getEntry()));
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class CompactionTest method testMajorCompactionAboveThreshold.
@Test
public void testMajorCompactionAboveThreshold() throws Exception {
// prepare data
LedgerHandle[] lhs = prepareData(3, false);
for (LedgerHandle lh : lhs) {
lh.close();
}
long lastMinorCompactionTime = getGCThread().lastMinorCompactionTime;
long lastMajorCompactionTime = getGCThread().lastMajorCompactionTime;
assertTrue(getGCThread().enableMajorCompaction);
assertTrue(getGCThread().enableMinorCompaction);
// remove ledger1 and ledger2
bkc.deleteLedger(lhs[0].getId());
bkc.deleteLedger(lhs[1].getId());
LOG.info("Finished deleting the ledgers contains less entries.");
getGCThread().enableForceGC();
getGCThread().triggerGC().get();
// after garbage collection, minor compaction should not be executed
assertTrue(getGCThread().lastMinorCompactionTime > lastMinorCompactionTime);
assertTrue(getGCThread().lastMajorCompactionTime > lastMajorCompactionTime);
// entry logs ([0,1,2].log) should not be compacted
for (File ledgerDirectory : tmpDirs) {
assertTrue("Not Found entry log file ([1,2].log that should have been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, false, 0, 1, 2));
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class CompactionTest method testRecoverIndexWhenIndexIsPartiallyFlush.
@Test
public void testRecoverIndexWhenIndexIsPartiallyFlush() throws Exception {
// prepare data
LedgerHandle[] lhs = prepareData(3, false);
for (LedgerHandle lh : lhs) {
lh.close();
}
// disable compaction
baseConf.setMinorCompactionThreshold(0.0f);
baseConf.setMajorCompactionThreshold(0.0f);
baseConf.setGcWaitTime(600000);
// restart bookies
restartBookies(baseConf);
Bookie bookie = bs.get(0).getBookie();
InterleavedLedgerStorage storage = (InterleavedLedgerStorage) bookie.ledgerStorage;
// remove ledger2 and ledger3
bkc.deleteLedger(lhs[1].getId());
bkc.deleteLedger(lhs[2].getId());
LOG.info("Finished deleting the ledgers contains most entries.");
MockTransactionalEntryLogCompactor partialCompactionWorker = new MockTransactionalEntryLogCompactor(((InterleavedLedgerStorage) bookie.ledgerStorage).gcThread);
for (long logId = 0; logId < 3; logId++) {
EntryLogMetadata meta = storage.entryLogger.getEntryLogMetadata(logId);
partialCompactionWorker.compactWithIndexFlushFailure(meta);
}
// entry logs ([0,1,2].log) should not be compacted because of partial flush throw IOException
for (File ledgerDirectory : tmpDirs) {
assertTrue("Entry log file ([0,1,2].log should not be compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2));
}
// entries should be available
verifyLedger(lhs[0].getId(), 0, lhs[0].getLastAddConfirmed());
// But we should see .compacted file with index flush failure
assertEquals(findCompactedEntryLogFiles().size(), 3);
// Now try to recover those flush failed index files
partialCompactionWorker.cleanUpAndRecover();
// There should be no .compacted files after recovery
assertEquals(findCompactedEntryLogFiles().size(), 0);
// compaction worker should recover partial flushed index and delete [0,1,2].log
for (File ledgerDirectory : tmpDirs) {
assertFalse("Entry log file ([0,1,2].log should have been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2));
}
// even entry log files are removed, we still can access entries for ledger1
// since those entries has been compacted to new entry log
verifyLedger(lhs[0].getId(), 0, lhs[0].getLastAddConfirmed());
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class CompactionTest method testMinorCompactionWithNoWritableLedgerDirs.
@Test
public void testMinorCompactionWithNoWritableLedgerDirs() throws Exception {
// prepare data
LedgerHandle[] lhs = prepareData(3, false);
for (LedgerHandle lh : lhs) {
lh.close();
}
// disable major compaction
baseConf.setMajorCompactionThreshold(0.0f);
baseConf.setGcWaitTime(60000);
baseConf.setMinorCompactionInterval(120000);
baseConf.setMajorCompactionInterval(240000);
// restart bookies
restartBookies(baseConf);
long lastMinorCompactionTime = getGCThread().lastMinorCompactionTime;
long lastMajorCompactionTime = getGCThread().lastMajorCompactionTime;
assertFalse(getGCThread().enableMajorCompaction);
assertTrue(getGCThread().enableMinorCompaction);
for (BookieServer bookieServer : bs) {
Bookie bookie = bookieServer.getBookie();
LedgerDirsManager ledgerDirsManager = bookie.getLedgerDirsManager();
List<File> ledgerDirs = ledgerDirsManager.getAllLedgerDirs();
// 'isForceGCAllowWhenNoSpace' is not enabled. Check LedgerDirsListener of interleavedLedgerStorage.
for (File ledgerDir : ledgerDirs) {
ledgerDirsManager.addToFilledDirs(ledgerDir);
}
}
// remove ledger2 and ledger3
bkc.deleteLedger(lhs[1].getId());
bkc.deleteLedger(lhs[2].getId());
LOG.info("Finished deleting the ledgers contains most entries.");
getGCThread().triggerGC().get();
// after garbage collection, major compaction should not be executed
assertEquals(lastMajorCompactionTime, getGCThread().lastMajorCompactionTime);
assertEquals(lastMinorCompactionTime, getGCThread().lastMinorCompactionTime);
// entry logs ([0,1,2].log) should still remain, because both major and Minor compaction are disabled.
for (File ledgerDirectory : tmpDirs) {
assertTrue("All the entry log files ([0,1,2].log are not available, which is not expected" + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, false, 0, 1, 2));
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class CompactionTest method testMajorCompaction.
@Test
public void testMajorCompaction() throws Exception {
// prepare data
LedgerHandle[] lhs = prepareData(3, true);
for (LedgerHandle lh : lhs) {
lh.close();
}
// disable minor compaction
baseConf.setMinorCompactionThreshold(0.0f);
baseConf.setGcWaitTime(60000);
baseConf.setMinorCompactionInterval(120000);
baseConf.setMajorCompactionInterval(240000);
// restart bookies
restartBookies(baseConf);
long lastMinorCompactionTime = getGCThread().lastMinorCompactionTime;
long lastMajorCompactionTime = getGCThread().lastMajorCompactionTime;
assertTrue(getGCThread().enableMajorCompaction);
assertFalse(getGCThread().enableMinorCompaction);
// remove ledger1 and ledger3
bkc.deleteLedger(lhs[0].getId());
bkc.deleteLedger(lhs[2].getId());
LOG.info("Finished deleting the ledgers contains most entries.");
getGCThread().enableForceGC();
getGCThread().triggerGC().get();
// after garbage collection, minor compaction should not be executed
assertTrue(getGCThread().lastMinorCompactionTime > lastMinorCompactionTime);
assertTrue(getGCThread().lastMajorCompactionTime > lastMajorCompactionTime);
// entry logs ([0,1,2].log) should be compacted
for (File ledgerDirectory : tmpDirs) {
assertFalse("Found entry log file ([0,1,2].log that should have not been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2));
}
// even entry log files are removed, we still can access entries for ledger2
// since those entries has been compacted to new entry log
verifyLedger(lhs[1].getId(), 0, lhs[1].getLastAddConfirmed());
}
Aggregations