use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class ConversionTest method test.
@Test
public void test() throws Exception {
File tmpDir = File.createTempFile("bkTest", ".dir");
tmpDir.delete();
tmpDir.mkdir();
File curDir = Bookie.getCurrentDirectory(tmpDir);
Bookie.checkDirectoryStructure(curDir);
System.out.println(tmpDir);
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setLedgerDirNames(new String[] { tmpDir.toString() });
LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
InterleavedLedgerStorage interleavedStorage = new InterleavedLedgerStorage();
interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
// Insert some ledger & entries in the interleaved storage
for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
interleavedStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes());
interleavedStorage.setFenced(ledgerId);
for (long entryId = 0; entryId < 10000; entryId++) {
ByteBuf entry = Unpooled.buffer(128);
entry.writeLong(ledgerId);
entry.writeLong(entryId);
entry.writeBytes(("entry-" + entryId).getBytes());
interleavedStorage.addEntry(entry);
}
}
interleavedStorage.flush();
interleavedStorage.shutdown();
// Run conversion tool
BookieShell shell = new BookieShell();
shell.setConf(conf);
int res = shell.run(new String[] { "convert-to-db-storage" });
Assert.assertEquals(0, res);
// Verify that db index has the same entries
DbLedgerStorage dbStorage = new DbLedgerStorage();
dbStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
interleavedStorage = new InterleavedLedgerStorage();
interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
Set<Long> ledgers = Sets.newTreeSet(dbStorage.getActiveLedgersInRange(0, Long.MAX_VALUE));
Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers);
ledgers = Sets.newTreeSet(interleavedStorage.getActiveLedgersInRange(0, Long.MAX_VALUE));
Assert.assertEquals(Sets.newTreeSet(), ledgers);
for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
Assert.assertEquals(true, dbStorage.isFenced(ledgerId));
Assert.assertEquals("ledger-" + ledgerId, new String(dbStorage.readMasterKey(ledgerId)));
for (long entryId = 0; entryId < 10000; entryId++) {
ByteBuf entry = Unpooled.buffer(1024);
entry.writeLong(ledgerId);
entry.writeLong(entryId);
entry.writeBytes(("entry-" + entryId).getBytes());
ByteBuf result = dbStorage.getEntry(ledgerId, entryId);
Assert.assertEquals(entry, result);
result.release();
try {
interleavedStorage.getEntry(ledgerId, entryId);
Assert.fail("entry should not exist");
} catch (NoLedgerException e) {
// Ok
}
}
}
interleavedStorage.shutdown();
dbStorage.shutdown();
FileUtils.forceDelete(tmpDir);
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class LocationsIndexRebuildTest method test.
@Test
public void test() throws Exception {
File tmpDir = File.createTempFile("bkTest", ".dir");
tmpDir.delete();
tmpDir.mkdir();
File curDir = Bookie.getCurrentDirectory(tmpDir);
Bookie.checkDirectoryStructure(curDir);
System.out.println(tmpDir);
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setLedgerDirNames(new String[] { tmpDir.toString() });
conf.setLedgerStorageClass(DbLedgerStorage.class.getName());
LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
DbLedgerStorage ledgerStorage = new DbLedgerStorage();
ledgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
// Insert some ledger & entries in the storage
for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
ledgerStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes());
ledgerStorage.setFenced(ledgerId);
for (long entryId = 0; entryId < 100; entryId++) {
ByteBuf entry = Unpooled.buffer(128);
entry.writeLong(ledgerId);
entry.writeLong(entryId);
entry.writeBytes(("entry-" + entryId).getBytes());
ledgerStorage.addEntry(entry);
}
}
ledgerStorage.flush();
ledgerStorage.shutdown();
// Rebuild index through the tool
BookieShell shell = new BookieShell();
shell.setConf(conf);
int res = shell.run(new String[] { "rebuild-db-ledger-locations-index" });
Assert.assertEquals(0, res);
// Verify that db index has the same entries
ledgerStorage = new DbLedgerStorage();
ledgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
Set<Long> ledgers = Sets.newTreeSet(ledgerStorage.getActiveLedgersInRange(0, Long.MAX_VALUE));
Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers);
for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
Assert.assertEquals(true, ledgerStorage.isFenced(ledgerId));
Assert.assertEquals("ledger-" + ledgerId, new String(ledgerStorage.readMasterKey(ledgerId)));
ByteBuf lastEntry = ledgerStorage.getLastEntry(ledgerId);
assertEquals(ledgerId, lastEntry.readLong());
long lastEntryId = lastEntry.readLong();
assertEquals(99, lastEntryId);
for (long entryId = 0; entryId < 100; entryId++) {
ByteBuf entry = Unpooled.buffer(1024);
entry.writeLong(ledgerId);
entry.writeLong(entryId);
entry.writeBytes(("entry-" + entryId).getBytes());
ByteBuf result = ledgerStorage.getEntry(ledgerId, entryId);
Assert.assertEquals(entry, result);
}
}
ledgerStorage.shutdown();
FileUtils.forceDelete(tmpDir);
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class ConversionRollbackTest method convertFromDbStorageToInterleaved.
@Test
public void convertFromDbStorageToInterleaved() throws Exception {
File tmpDir = File.createTempFile("bkTest", ".dir");
tmpDir.delete();
tmpDir.mkdir();
File curDir = Bookie.getCurrentDirectory(tmpDir);
Bookie.checkDirectoryStructure(curDir);
log.info("Using temp directory: {}", tmpDir);
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
conf.setLedgerDirNames(new String[] { tmpDir.toString() });
LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
DbLedgerStorage dbStorage = new DbLedgerStorage();
dbStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
// Insert some ledger & entries in the dbStorage
for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
dbStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes());
dbStorage.setFenced(ledgerId);
for (long entryId = 0; entryId < 10000; entryId++) {
ByteBuf entry = Unpooled.buffer(128);
entry.writeLong(ledgerId);
entry.writeLong(entryId);
entry.writeBytes(("entry-" + entryId).getBytes());
dbStorage.addEntry(entry);
}
}
dbStorage.flush();
dbStorage.shutdown();
// Run conversion tool
BookieShell shell = new BookieShell();
shell.setConf(conf);
int res = shell.run(new String[] { "convert-to-interleaved-storage" });
Assert.assertEquals(0, res);
// Verify that interleaved storage index has the same entries
InterleavedLedgerStorage interleavedStorage = new InterleavedLedgerStorage();
interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE);
Set<Long> ledgers = Sets.newTreeSet(interleavedStorage.getActiveLedgersInRange(0, Long.MAX_VALUE));
Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers);
for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
Assert.assertEquals(true, interleavedStorage.isFenced(ledgerId));
Assert.assertEquals("ledger-" + ledgerId, new String(interleavedStorage.readMasterKey(ledgerId)));
for (long entryId = 0; entryId < 10000; entryId++) {
ByteBuf entry = Unpooled.buffer(1024);
entry.writeLong(ledgerId);
entry.writeLong(entryId);
entry.writeBytes(("entry-" + entryId).getBytes());
ByteBuf result = interleavedStorage.getEntry(ledgerId, entryId);
Assert.assertEquals(entry, result);
}
}
interleavedStorage.shutdown();
FileUtils.forceDelete(tmpDir);
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class BookieShell method getJournals.
private synchronized List<Journal> getJournals() throws IOException {
if (null == journals) {
journals = Lists.newArrayListWithCapacity(bkConf.getJournalDirs().length);
int idx = 0;
for (File journalDir : bkConf.getJournalDirs()) {
journals.add(new Journal(idx++, new File(journalDir, BookKeeperConstants.CURRENT_DIR), bkConf, new LedgerDirsManager(bkConf, bkConf.getLedgerDirs(), new DiskChecker(bkConf.getDiskUsageThreshold(), bkConf.getDiskUsageWarnThreshold()))));
}
}
return journals;
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class CompactionTest method testCompactionSafety.
/**
* Test that compaction doesnt add to index without having persisted
* entrylog first. This is needed because compaction doesn't go through the journal.
* {@see https://issues.apache.org/jira/browse/BOOKKEEPER-530}
* {@see https://issues.apache.org/jira/browse/BOOKKEEPER-664}
*/
@Test
public void testCompactionSafety() throws Exception {
// I dont want the test infrastructure
tearDown();
ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
LedgerManager manager = getLedgerManager(ledgers);
File tmpDir = createTempDir("bkTest", ".dir");
File curDir = Bookie.getCurrentDirectory(tmpDir);
Bookie.checkDirectoryStructure(curDir);
conf.setLedgerDirNames(new String[] { tmpDir.toString() });
conf.setEntryLogSizeLimit(EntryLogger.LOGFILE_HEADER_SIZE + 3 * (4 + ENTRY_SIZE));
conf.setGcWaitTime(100);
conf.setMinorCompactionThreshold(0.7f);
conf.setMajorCompactionThreshold(0.0f);
conf.setMinorCompactionInterval(1);
conf.setMajorCompactionInterval(10);
conf.setPageLimit(1);
CheckpointSource checkpointSource = new CheckpointSource() {
AtomicInteger idGen = new AtomicInteger(0);
class MyCheckpoint implements CheckpointSource.Checkpoint {
int id = idGen.incrementAndGet();
@Override
public int compareTo(CheckpointSource.Checkpoint o) {
if (o == CheckpointSource.Checkpoint.MAX) {
return -1;
} else if (o == CheckpointSource.Checkpoint.MIN) {
return 1;
}
return id - ((MyCheckpoint) o).id;
}
}
@Override
public CheckpointSource.Checkpoint newCheckpoint() {
return new MyCheckpoint();
}
public void checkpointComplete(CheckpointSource.Checkpoint checkpoint, boolean compact) throws IOException {
}
};
final byte[] key = "foobar".getBytes();
File log0 = new File(curDir, "0.log");
LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
assertFalse("Log shouldnt exist", log0.exists());
InterleavedLedgerStorage storage = new InterleavedLedgerStorage();
storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE);
ledgers.add(1L);
ledgers.add(2L);
ledgers.add(3L);
storage.setMasterKey(1, key);
storage.setMasterKey(2, key);
storage.setMasterKey(3, key);
storage.addEntry(genEntry(1, 1, ENTRY_SIZE));
storage.addEntry(genEntry(2, 1, ENTRY_SIZE));
storage.addEntry(genEntry(2, 2, ENTRY_SIZE));
storage.addEntry(genEntry(3, 2, ENTRY_SIZE));
storage.flush();
storage.shutdown();
assertTrue("Log should exist", log0.exists());
ledgers.remove(2L);
ledgers.remove(3L);
storage = new InterleavedLedgerStorage();
storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE);
storage.start();
for (int i = 0; i < 10; i++) {
if (!log0.exists()) {
break;
}
Thread.sleep(1000);
// simulate sync thread
storage.entryLogger.flush();
}
assertFalse("Log shouldnt exist", log0.exists());
ledgers.add(4L);
storage.setMasterKey(4, key);
// force ledger 1 page to flush
storage.addEntry(genEntry(4, 1, ENTRY_SIZE));
storage.shutdown();
storage = new InterleavedLedgerStorage();
storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE);
// entry should exist
storage.getEntry(1, 1);
}
Aggregations