Search in sources :

Example 1 with EntryLogger

use of org.apache.bookkeeper.bookie.EntryLogger in project bookkeeper by apache.

the class LocationsIndexRebuildOp method initiate.

public void initiate() throws IOException {
    LOG.info("Starting index rebuilding");
    // Move locations index to a backup directory
    String basePath = Bookie.getCurrentDirectory(conf.getLedgerDirs()[0]).toString();
    Path currentPath = FileSystems.getDefault().getPath(basePath, "locations");
    String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
    Path backupPath = FileSystems.getDefault().getPath(basePath, "locations.BACKUP-" + timestamp);
    Files.move(currentPath, backupPath);
    LOG.info("Created locations index backup at {}", backupPath);
    long startTime = System.nanoTime();
    EntryLogger entryLogger = new EntryLogger(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
    Set<Long> entryLogs = entryLogger.getEntryLogsSet();
    String locationsDbPath = FileSystems.getDefault().getPath(basePath, "locations").toFile().toString();
    Set<Long> activeLedgers = getActiveLedgers(conf, KeyValueStorageRocksDB.factory, basePath);
    LOG.info("Found {} active ledgers in ledger manager", activeLedgers.size());
    KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(locationsDbPath, DbConfigType.Huge, conf);
    int totalEntryLogs = entryLogs.size();
    int completedEntryLogs = 0;
    LOG.info("Scanning {} entry logs", totalEntryLogs);
    for (long entryLogId : entryLogs) {
        entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {

            @Override
            public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
                long entryId = entry.getLong(8);
                // Actual location indexed is pointing past the entry size
                long location = (entryLogId << 32L) | (offset + 4);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32, location & (Integer.MAX_VALUE - 1));
                }
                // Update the ledger index page
                LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
                LongWrapper value = LongWrapper.get(location);
                newIndex.put(key.array, value.array);
            }

            @Override
            public boolean accept(long ledgerId) {
                return activeLedgers.contains(ledgerId);
            }
        });
        ++completedEntryLogs;
        LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs, totalEntryLogs);
    }
    newIndex.sync();
    newIndex.close();
    LOG.info("Rebuilding index is done. Total time: {}", DurationFormatUtils.formatDurationHMS(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
Also used : Path(java.nio.file.Path) LedgerDirsManager(org.apache.bookkeeper.bookie.LedgerDirsManager) DiskChecker(org.apache.bookkeeper.util.DiskChecker) EntryLogger(org.apache.bookkeeper.bookie.EntryLogger) IOException(java.io.IOException) ByteBuf(io.netty.buffer.ByteBuf) Date(java.util.Date) EntryLogScanner(org.apache.bookkeeper.bookie.EntryLogger.EntryLogScanner) SimpleDateFormat(java.text.SimpleDateFormat)

Example 2 with EntryLogger

use of org.apache.bookkeeper.bookie.EntryLogger in project bookkeeper by apache.

the class DbLedgerStorageTest method testBookieCompaction.

@Test
public void testBookieCompaction() throws Exception {
    storage.setMasterKey(4, "key".getBytes());
    ByteBuf entry3 = Unpooled.buffer(1024);
    // ledger id
    entry3.writeLong(4);
    // entry id
    entry3.writeLong(3);
    entry3.writeBytes("entry-3".getBytes());
    storage.addEntry(entry3);
    // Simulate bookie compaction
    EntryLogger entryLogger = ((DbLedgerStorage) storage).getEntryLogger();
    // Rewrite entry-3
    ByteBuf newEntry3 = Unpooled.buffer(1024);
    // ledger id
    newEntry3.writeLong(4);
    // entry id
    newEntry3.writeLong(3);
    newEntry3.writeBytes("new-entry-3".getBytes());
    long location = entryLogger.addEntry(4, newEntry3, false);
    List<EntryLocation> locations = Lists.newArrayList(new EntryLocation(4, 3, location));
    storage.updateEntriesLocations(locations);
    ByteBuf res = storage.getEntry(4, 3);
    System.out.println("res:       " + ByteBufUtil.hexDump(res));
    System.out.println("newEntry3: " + ByteBufUtil.hexDump(newEntry3));
    assertEquals(newEntry3, res);
}
Also used : EntryLocation(org.apache.bookkeeper.bookie.EntryLocation) EntryLogger(org.apache.bookkeeper.bookie.EntryLogger) ByteBuf(io.netty.buffer.ByteBuf) Test(org.junit.Test)

Example 3 with EntryLogger

use of org.apache.bookkeeper.bookie.EntryLogger in project bookkeeper by apache.

the class DbLedgerStorage method initialize.

@Override
public void initialize(ServerConfiguration conf, LedgerManager ledgerManager, LedgerDirsManager ledgerDirsManager, LedgerDirsManager indexDirsManager, StateManager stateManager, CheckpointSource checkpointSource, Checkpointer checkpointer, StatsLogger statsLogger) throws IOException {
    checkArgument(ledgerDirsManager.getAllLedgerDirs().size() == 1, "Db implementation only allows for one storage dir");
    String baseDir = ledgerDirsManager.getAllLedgerDirs().get(0).toString();
    writeCacheMaxSize = conf.getLong(WRITE_CACHE_MAX_SIZE_MB, DEFAULT_WRITE_CACHE_MAX_SIZE_MB) * MB;
    writeCache = new WriteCache(writeCacheMaxSize / 2);
    writeCacheBeingFlushed = new WriteCache(writeCacheMaxSize / 2);
    this.checkpointSource = checkpointSource;
    readCacheMaxSize = conf.getLong(READ_AHEAD_CACHE_MAX_SIZE_MB, DEFAULT_READ_CACHE_MAX_SIZE_MB) * MB;
    readAheadCacheBatchSize = conf.getInt(READ_AHEAD_CACHE_BATCH_SIZE, DEFAULT_READ_AHEAD_CACHE_BATCH_SIZE);
    long maxThrottleTimeMillis = conf.getLong(MAX_THROTTLE_TIME_MILLIS, DEFAUL_MAX_THROTTLE_TIME_MILLIS);
    maxThrottleTimeNanos = TimeUnit.MILLISECONDS.toNanos(maxThrottleTimeMillis);
    readCache = new ReadCache(readCacheMaxSize);
    this.stats = statsLogger;
    log.info("Started Db Ledger Storage");
    log.info(" - Write cache size: {} MB", writeCacheMaxSize / MB);
    log.info(" - Read Cache: {} MB", readCacheMaxSize / MB);
    log.info(" - Read Ahead Batch size: : {}", readAheadCacheBatchSize);
    ledgerIndex = new LedgerMetadataIndex(conf, KeyValueStorageRocksDB.factory, baseDir, stats);
    entryLocationIndex = new EntryLocationIndex(conf, KeyValueStorageRocksDB.factory, baseDir, stats);
    transientLedgerInfoCache = new ConcurrentLongHashMap<>(16 * 1024, Runtime.getRuntime().availableProcessors() * 2);
    cleanupExecutor.scheduleAtFixedRate(this::cleanupStaleTransientLedgerInfo, LEDGER_INFO_CACHING_TIME_MINUTES, LEDGER_INFO_CACHING_TIME_MINUTES, TimeUnit.MINUTES);
    entryLogger = new EntryLogger(conf, ledgerDirsManager);
    gcThread = new GarbageCollectorThread(conf, ledgerManager, this, statsLogger);
    registerStats();
}
Also used : GarbageCollectorThread(org.apache.bookkeeper.bookie.GarbageCollectorThread) ByteString(com.google.protobuf.ByteString) EntryLogger(org.apache.bookkeeper.bookie.EntryLogger)

Aggregations

EntryLogger (org.apache.bookkeeper.bookie.EntryLogger)3 ByteBuf (io.netty.buffer.ByteBuf)2 ByteString (com.google.protobuf.ByteString)1 IOException (java.io.IOException)1 Path (java.nio.file.Path)1 SimpleDateFormat (java.text.SimpleDateFormat)1 Date (java.util.Date)1 EntryLocation (org.apache.bookkeeper.bookie.EntryLocation)1 EntryLogScanner (org.apache.bookkeeper.bookie.EntryLogger.EntryLogScanner)1 GarbageCollectorThread (org.apache.bookkeeper.bookie.GarbageCollectorThread)1 LedgerDirsManager (org.apache.bookkeeper.bookie.LedgerDirsManager)1 DiskChecker (org.apache.bookkeeper.util.DiskChecker)1 Test (org.junit.Test)1