use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class BookieInitializationTest method testWithDiskError.
/**
* Check disk error for file. Expected to throw DiskErrorException.
*/
@Test
public void testWithDiskError() throws Exception {
File parent = createTempDir("DiskCheck", "test");
File child = File.createTempFile("DiskCheck", "test", parent);
final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration().setJournalDirName(child.getPath()).setLedgerDirNames(new String[] { child.getPath() });
conf.setZkServers(zkUtil.getZooKeeperConnectString()).setZkTimeout(5000);
try {
// LedgerDirsManager#init() is used in Bookie instantiation.
// Simulating disk errors by directly calling #init
LedgerDirsManager ldm = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
LedgerDirsMonitor ledgerMonitor = new LedgerDirsMonitor(conf, new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()), ldm);
ledgerMonitor.init();
fail("should throw exception");
} catch (Exception e) {
// expected
}
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class CompactionTest method testExtractMetaFromEntryLogs.
/**
* Test extractMetaFromEntryLogs optimized method to avoid excess memory usage.
*/
public void testExtractMetaFromEntryLogs() throws Exception {
// Always run this test with Throttle enabled.
baseConf.setIsThrottleByBytes(true);
// restart bookies
restartBookies(baseConf);
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
File tmpDir = createTempDir("bkTest", ".dir");
File curDir = Bookie.getCurrentDirectory(tmpDir);
Bookie.checkDirectoryStructure(curDir);
conf.setLedgerDirNames(new String[] { tmpDir.toString() });
LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
LedgerManager manager = getLedgerManager(ledgers);
CheckpointSource checkpointSource = new CheckpointSource() {
@Override
public Checkpoint newCheckpoint() {
return null;
}
@Override
public void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException {
}
};
InterleavedLedgerStorage storage = new InterleavedLedgerStorage();
storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE);
for (long ledger = 0; ledger <= 10; ledger++) {
ledgers.add(ledger);
for (int entry = 1; entry <= 50; entry++) {
try {
storage.addEntry(genEntry(ledger, entry, ENTRY_SIZE));
} catch (IOException e) {
// ignore exception on failure to add entry.
}
}
}
storage.flush();
storage.shutdown();
storage = new InterleavedLedgerStorage();
storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE);
long startingEntriesCount = storage.gcThread.entryLogger.getLeastUnflushedLogId() - storage.gcThread.scannedLogId;
LOG.info("The old Log Entry count is: " + startingEntriesCount);
Map<Long, EntryLogMetadata> entryLogMetaData = new HashMap<>();
long finalEntriesCount = storage.gcThread.entryLogger.getLeastUnflushedLogId() - storage.gcThread.scannedLogId;
LOG.info("The latest Log Entry count is: " + finalEntriesCount);
assertTrue("The GC did not clean up entries...", startingEntriesCount != finalEntriesCount);
assertTrue("Entries Count is zero", finalEntriesCount == 0);
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class CompactionTest method testForceGarbageCollection.
@Test
public void testForceGarbageCollection() throws Exception {
ServerConfiguration conf = newServerConfiguration();
conf.setGcWaitTime(60000);
conf.setMinorCompactionInterval(120000);
conf.setMajorCompactionInterval(240000);
LedgerDirsManager dirManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
CheckpointSource cp = new CheckpointSource() {
@Override
public Checkpoint newCheckpoint() {
// Do nothing.
return null;
}
@Override
public void checkpointComplete(Checkpoint checkPoint, boolean compact) throws IOException {
// Do nothing.
}
};
for (File journalDir : conf.getJournalDirs()) {
Bookie.checkDirectoryStructure(journalDir);
}
for (File dir : dirManager.getAllLedgerDirs()) {
Bookie.checkDirectoryStructure(dir);
}
runFunctionWithLedgerManagerFactory(conf, lmf -> {
try (LedgerManager lm = lmf.newLedgerManager()) {
InterleavedLedgerStorage storage = new InterleavedLedgerStorage();
storage.initialize(conf, lm, dirManager, dirManager, null, cp, Checkpointer.NULL, NullStatsLogger.INSTANCE);
storage.start();
long startTime = MathUtils.now();
storage.gcThread.enableForceGC();
// major
storage.gcThread.triggerGC().get();
// minor
storage.gcThread.triggerGC().get();
// Minor and Major compaction times should be larger than when we started
// this test.
assertTrue("Minor or major compaction did not trigger even on forcing.", storage.gcThread.lastMajorCompactionTime > startTime && storage.gcThread.lastMinorCompactionTime > startTime);
storage.shutdown();
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
return null;
});
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class CompactionTest method testSuspendGarbageCollection.
private void testSuspendGarbageCollection(ServerConfiguration conf, LedgerManager lm) throws Exception {
LedgerDirsManager dirManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
CheckpointSource cp = new CheckpointSource() {
@Override
public Checkpoint newCheckpoint() {
// Do nothing.
return null;
}
@Override
public void checkpointComplete(Checkpoint checkPoint, boolean compact) throws IOException {
// Do nothing.
}
};
for (File journalDir : conf.getJournalDirs()) {
Bookie.checkDirectoryStructure(journalDir);
}
for (File dir : dirManager.getAllLedgerDirs()) {
Bookie.checkDirectoryStructure(dir);
}
InterleavedLedgerStorage storage = new InterleavedLedgerStorage();
TestStatsProvider stats = new TestStatsProvider();
storage.initialize(conf, lm, dirManager, dirManager, null, cp, Checkpointer.NULL, stats.getStatsLogger("storage"));
storage.start();
int majorCompactions = stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get().intValue();
int minorCompactions = stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get().intValue();
Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("Major compaction should have happened", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() > majorCompactions);
// test suspend Major GC.
storage.gcThread.suspendMajorGC();
Thread.sleep(1000);
long startTime = MathUtils.now();
majorCompactions = stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get().intValue();
Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("major compaction triggered while suspended", storage.gcThread.lastMajorCompactionTime < startTime);
assertTrue("major compaction triggered while suspended", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() == majorCompactions);
// test suspend Major GC.
Thread.sleep(conf.getMinorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("Minor compaction should have happened", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() > minorCompactions);
// test suspend Minor GC.
storage.gcThread.suspendMinorGC();
Thread.sleep(1000);
startTime = MathUtils.now();
minorCompactions = stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get().intValue();
Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("minor compaction triggered while suspended", storage.gcThread.lastMinorCompactionTime < startTime);
assertTrue("minor compaction triggered while suspended", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() == minorCompactions);
// test resume
storage.gcThread.resumeMinorGC();
storage.gcThread.resumeMajorGC();
Thread.sleep((conf.getMajorCompactionInterval() + conf.getMinorCompactionInterval()) * 1000 + (conf.getGcWaitTime() * 2));
assertTrue("Major compaction should have happened", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() > majorCompactions);
assertTrue("Minor compaction should have happened", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() > minorCompactions);
assertTrue("gcThreadRunttime should be non-zero", stats.getOpStatsLogger("storage.gc." + THREAD_RUNTIME).getSuccessCount() > 0);
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class IndexPersistenceMgrTest method setUp.
@Before
public void setUp() throws Exception {
journalDir = File.createTempFile("IndexPersistenceMgr", "Journal");
journalDir.delete();
journalDir.mkdir();
ledgerDir = File.createTempFile("IndexPersistenceMgr", "Ledger");
ledgerDir.delete();
ledgerDir.mkdir();
// Create current directories
Bookie.getCurrentDirectory(journalDir).mkdir();
Bookie.getCurrentDirectory(ledgerDir).mkdir();
conf = new ServerConfiguration();
conf.setZkServers(null);
conf.setJournalDirName(journalDir.getPath());
conf.setLedgerDirNames(new String[] { ledgerDir.getPath() });
ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
ledgerMonitor = new LedgerDirsMonitor(conf, new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()), ledgerDirsManager);
ledgerMonitor.init();
}
Aggregations