use of org.apache.bookkeeper.bookie.EntryLogger in project bookkeeper by apache.
the class LocationsIndexRebuildOp method initiate.
public void initiate() throws IOException {
LOG.info("Starting index rebuilding");
// Move locations index to a backup directory
String basePath = Bookie.getCurrentDirectory(conf.getLedgerDirs()[0]).toString();
Path currentPath = FileSystems.getDefault().getPath(basePath, "locations");
String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
Path backupPath = FileSystems.getDefault().getPath(basePath, "locations.BACKUP-" + timestamp);
Files.move(currentPath, backupPath);
LOG.info("Created locations index backup at {}", backupPath);
long startTime = System.nanoTime();
EntryLogger entryLogger = new EntryLogger(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
String locationsDbPath = FileSystems.getDefault().getPath(basePath, "locations").toFile().toString();
Set<Long> activeLedgers = getActiveLedgers(conf, KeyValueStorageRocksDB.factory, basePath);
LOG.info("Found {} active ledgers in ledger manager", activeLedgers.size());
KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(locationsDbPath, DbConfigType.Huge, conf);
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
LOG.info("Scanning {} entry logs", totalEntryLogs);
for (long entryLogId : entryLogs) {
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
long entryId = entry.getLong(8);
// Actual location indexed is pointing past the entry size
long location = (entryLogId << 32L) | (offset + 4);
if (LOG.isDebugEnabled()) {
LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32, location & (Integer.MAX_VALUE - 1));
}
// Update the ledger index page
LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
LongWrapper value = LongWrapper.get(location);
newIndex.put(key.array, value.array);
}
@Override
public boolean accept(long ledgerId) {
return activeLedgers.contains(ledgerId);
}
});
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs, totalEntryLogs);
}
newIndex.sync();
newIndex.close();
LOG.info("Rebuilding index is done. Total time: {}", DurationFormatUtils.formatDurationHMS(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
use of org.apache.bookkeeper.bookie.EntryLogger in project bookkeeper by apache.
the class DbLedgerStorageTest method testBookieCompaction.
@Test
public void testBookieCompaction() throws Exception {
storage.setMasterKey(4, "key".getBytes());
ByteBuf entry3 = Unpooled.buffer(1024);
// ledger id
entry3.writeLong(4);
// entry id
entry3.writeLong(3);
entry3.writeBytes("entry-3".getBytes());
storage.addEntry(entry3);
// Simulate bookie compaction
EntryLogger entryLogger = ((DbLedgerStorage) storage).getEntryLogger();
// Rewrite entry-3
ByteBuf newEntry3 = Unpooled.buffer(1024);
// ledger id
newEntry3.writeLong(4);
// entry id
newEntry3.writeLong(3);
newEntry3.writeBytes("new-entry-3".getBytes());
long location = entryLogger.addEntry(4, newEntry3, false);
List<EntryLocation> locations = Lists.newArrayList(new EntryLocation(4, 3, location));
storage.updateEntriesLocations(locations);
ByteBuf res = storage.getEntry(4, 3);
System.out.println("res: " + ByteBufUtil.hexDump(res));
System.out.println("newEntry3: " + ByteBufUtil.hexDump(newEntry3));
assertEquals(newEntry3, res);
}
use of org.apache.bookkeeper.bookie.EntryLogger in project bookkeeper by apache.
the class DbLedgerStorage method initialize.
@Override
public void initialize(ServerConfiguration conf, LedgerManager ledgerManager, LedgerDirsManager ledgerDirsManager, LedgerDirsManager indexDirsManager, StateManager stateManager, CheckpointSource checkpointSource, Checkpointer checkpointer, StatsLogger statsLogger) throws IOException {
checkArgument(ledgerDirsManager.getAllLedgerDirs().size() == 1, "Db implementation only allows for one storage dir");
String baseDir = ledgerDirsManager.getAllLedgerDirs().get(0).toString();
writeCacheMaxSize = conf.getLong(WRITE_CACHE_MAX_SIZE_MB, DEFAULT_WRITE_CACHE_MAX_SIZE_MB) * MB;
writeCache = new WriteCache(writeCacheMaxSize / 2);
writeCacheBeingFlushed = new WriteCache(writeCacheMaxSize / 2);
this.checkpointSource = checkpointSource;
readCacheMaxSize = conf.getLong(READ_AHEAD_CACHE_MAX_SIZE_MB, DEFAULT_READ_CACHE_MAX_SIZE_MB) * MB;
readAheadCacheBatchSize = conf.getInt(READ_AHEAD_CACHE_BATCH_SIZE, DEFAULT_READ_AHEAD_CACHE_BATCH_SIZE);
long maxThrottleTimeMillis = conf.getLong(MAX_THROTTLE_TIME_MILLIS, DEFAUL_MAX_THROTTLE_TIME_MILLIS);
maxThrottleTimeNanos = TimeUnit.MILLISECONDS.toNanos(maxThrottleTimeMillis);
readCache = new ReadCache(readCacheMaxSize);
this.stats = statsLogger;
log.info("Started Db Ledger Storage");
log.info(" - Write cache size: {} MB", writeCacheMaxSize / MB);
log.info(" - Read Cache: {} MB", readCacheMaxSize / MB);
log.info(" - Read Ahead Batch size: : {}", readAheadCacheBatchSize);
ledgerIndex = new LedgerMetadataIndex(conf, KeyValueStorageRocksDB.factory, baseDir, stats);
entryLocationIndex = new EntryLocationIndex(conf, KeyValueStorageRocksDB.factory, baseDir, stats);
transientLedgerInfoCache = new ConcurrentLongHashMap<>(16 * 1024, Runtime.getRuntime().availableProcessors() * 2);
cleanupExecutor.scheduleAtFixedRate(this::cleanupStaleTransientLedgerInfo, LEDGER_INFO_CACHING_TIME_MINUTES, LEDGER_INFO_CACHING_TIME_MINUTES, TimeUnit.MINUTES);
entryLogger = new EntryLogger(conf, ledgerDirsManager);
gcThread = new GarbageCollectorThread(conf, ledgerManager, this, statsLogger);
registerStats();
}
Aggregations