use of org.apache.bookkeeper.bookie.DefaultEntryLogger in project bookkeeper by apache.
the class LocationsIndexRebuildOp method initiate.
public void initiate() throws IOException {
LOG.info("Starting locations index rebuilding");
// Move locations index to a backup directory
String basePath = BookieImpl.getCurrentDirectory(conf.getLedgerDirs()[0]).toString();
Path currentPath = FileSystems.getDefault().getPath(basePath, "locations");
String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
Path backupPath = FileSystems.getDefault().getPath(basePath, "locations.BACKUP-" + timestamp);
Files.move(currentPath, backupPath);
LOG.info("Created locations index backup at {}", backupPath);
long startTime = System.nanoTime();
DefaultEntryLogger entryLogger = new DefaultEntryLogger(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
Set<Long> activeLedgers = getActiveLedgers(conf, KeyValueStorageRocksDB.factory, basePath);
LOG.info("Found {} active ledgers in ledger manager", activeLedgers.size());
KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(basePath, "locations", DbConfigType.Default, conf);
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
LOG.info("Scanning {} entry logs", totalEntryLogs);
for (long entryLogId : entryLogs) {
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
long entryId = entry.getLong(8);
// Actual location indexed is pointing past the entry size
long location = (entryLogId << 32L) | (offset + 4);
if (LOG.isDebugEnabled()) {
LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32, location & (Integer.MAX_VALUE - 1));
}
// Update the ledger index page
LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
LongWrapper value = LongWrapper.get(location);
newIndex.put(key.array, value.array);
}
@Override
public boolean accept(long ledgerId) {
return activeLedgers.contains(ledgerId);
}
});
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs, totalEntryLogs);
}
newIndex.sync();
newIndex.close();
LOG.info("Rebuilding index is done. Total time: {}", DurationFormatUtils.formatDurationHMS(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
use of org.apache.bookkeeper.bookie.DefaultEntryLogger in project bookkeeper by apache.
the class EntryLogTestUtils method newLegacyEntryLogger.
public static EntryLogger newLegacyEntryLogger(int logSizeLimit, File... ledgerDir) throws Exception {
ServerConfiguration conf = new ServerConfiguration();
conf.setEntryLogSizeLimit(logSizeLimit);
return new DefaultEntryLogger(conf, newDirsManager(ledgerDir), null, NullStatsLogger.INSTANCE, ByteBufAllocator.DEFAULT);
}
use of org.apache.bookkeeper.bookie.DefaultEntryLogger in project bookkeeper by apache.
the class LedgersIndexRebuildOp method scanEntryLogFiles.
private void scanEntryLogFiles(Set<Long> ledgers) throws IOException {
DefaultEntryLogger entryLogger = new DefaultEntryLogger(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
LOG.info("Scanning {} entry logs", totalEntryLogs);
for (long entryLogId : entryLogs) {
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
if (ledgers.add(ledgerId)) {
if (verbose) {
LOG.info("Found ledger {} in entry log", ledgerId);
}
}
}
@Override
public boolean accept(long ledgerId) {
return true;
}
});
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs, totalEntryLogs);
}
}
use of org.apache.bookkeeper.bookie.DefaultEntryLogger in project bookkeeper by apache.
the class ListActiveLedgersCommand method handler.
public void handler(ServerConfiguration bkConf, ActiveLedgerFlags cmdFlags) throws ExecutionException, MetadataException {
runFunctionWithLedgerManagerFactory(bkConf, mFactory -> {
try (LedgerManager ledgerManager = mFactory.newLedgerManager()) {
Set<Long> activeLedgersOnMetadata = new HashSet<Long>();
BookkeeperInternalCallbacks.Processor<Long> ledgerProcessor = (ledger, cb) -> {
activeLedgersOnMetadata.add(ledger);
cb.processResult(BKException.Code.OK, null, null);
};
CountDownLatch done = new CountDownLatch(1);
AtomicInteger resultCode = new AtomicInteger(BKException.Code.OK);
VoidCallback endCallback = (rs, s, obj) -> {
resultCode.set(rs);
done.countDown();
};
ledgerManager.asyncProcessLedgers(ledgerProcessor, endCallback, null, BKException.Code.OK, BKException.Code.ReadException);
if (done.await(cmdFlags.timeout, TimeUnit.MILLISECONDS)) {
if (resultCode.get() == BKException.Code.OK) {
DefaultEntryLogger entryLogger = new ReadOnlyDefaultEntryLogger(bkConf);
EntryLogMetadata entryLogMetadata = entryLogger.getEntryLogMetadata(cmdFlags.logId);
List<Long> ledgersOnEntryLog = entryLogMetadata.getLedgersMap().keys();
if (ledgersOnEntryLog.size() == 0) {
LOG.info("Ledgers on log file {} is empty", cmdFlags.logId);
}
List<Long> activeLedgersOnEntryLog = new ArrayList<Long>(ledgersOnEntryLog.size());
for (long ledger : ledgersOnEntryLog) {
if (activeLedgersOnMetadata.contains(ledger)) {
activeLedgersOnEntryLog.add(ledger);
}
}
printActiveLedgerOnEntryLog(cmdFlags.logId, activeLedgersOnEntryLog);
} else {
LOG.info("Read active ledgers id from metadata store,fail code {}", resultCode.get());
throw BKException.create(resultCode.get());
}
} else {
LOG.info("Read active ledgers id from metadata store timeout");
}
} catch (BKException | InterruptedException | IOException e) {
LOG.error("Received Exception while processing ledgers", e);
throw new UncheckedExecutionException(e);
}
return null;
});
}
Aggregations