use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class LocationsIndexRebuildOp method initiate.
public void initiate() throws IOException {
LOG.info("Starting index rebuilding");
// Move locations index to a backup directory
String basePath = Bookie.getCurrentDirectory(conf.getLedgerDirs()[0]).toString();
Path currentPath = FileSystems.getDefault().getPath(basePath, "locations");
String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
Path backupPath = FileSystems.getDefault().getPath(basePath, "locations.BACKUP-" + timestamp);
Files.move(currentPath, backupPath);
LOG.info("Created locations index backup at {}", backupPath);
long startTime = System.nanoTime();
EntryLogger entryLogger = new EntryLogger(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
String locationsDbPath = FileSystems.getDefault().getPath(basePath, "locations").toFile().toString();
Set<Long> activeLedgers = getActiveLedgers(conf, KeyValueStorageRocksDB.factory, basePath);
LOG.info("Found {} active ledgers in ledger manager", activeLedgers.size());
KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(locationsDbPath, DbConfigType.Huge, conf);
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
LOG.info("Scanning {} entry logs", totalEntryLogs);
for (long entryLogId : entryLogs) {
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
long entryId = entry.getLong(8);
// Actual location indexed is pointing past the entry size
long location = (entryLogId << 32L) | (offset + 4);
if (LOG.isDebugEnabled()) {
LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32, location & (Integer.MAX_VALUE - 1));
}
// Update the ledger index page
LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
LongWrapper value = LongWrapper.get(location);
newIndex.put(key.array, value.array);
}
@Override
public boolean accept(long ledgerId) {
return activeLedgers.contains(ledgerId);
}
});
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs, totalEntryLogs);
}
newIndex.sync();
newIndex.close();
LOG.info("Rebuilding index is done. Total time: {}", DurationFormatUtils.formatDurationHMS(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class LastMarkCommand method run.
@Override
public void run(ServerConfiguration conf) throws Exception {
LedgerDirsManager dirsManager = new LedgerDirsManager(conf, conf.getJournalDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
File[] journalDirs = conf.getJournalDirs();
for (int idx = 0; idx < journalDirs.length; idx++) {
Journal journal = new Journal(idx, journalDirs[idx], conf, dirsManager);
LogMark lastLogMark = journal.getLastLogMark().getCurMark();
System.out.println("LastLogMark : Journal Id - " + lastLogMark.getLogFileId() + "(" + Long.toHexString(lastLogMark.getLogFileId()) + ".txn), Pos - " + lastLogMark.getLogFileOffset());
}
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class GetLastLogMarkService method handle.
@Override
public HttpServiceResponse handle(HttpServiceRequest request) throws Exception {
HttpServiceResponse response = new HttpServiceResponse();
if (HttpServer.Method.GET == request.getMethod()) {
try {
/**
* output:
* {
* "<Journal_id>" : "<Pos>",
* ...
* }
*/
Map<String, String> output = Maps.newHashMap();
List<Journal> journals = Lists.newArrayListWithCapacity(conf.getJournalDirs().length);
int idx = 0;
for (File journalDir : conf.getJournalDirs()) {
journals.add(new Journal(idx++, journalDir, conf, new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()))));
}
for (Journal journal : journals) {
LogMark lastLogMark = journal.getLastLogMark().getCurMark();
LOG.debug("LastLogMark: Journal Id - " + lastLogMark.getLogFileId() + "(" + Long.toHexString(lastLogMark.getLogFileId()) + ".txn), Pos - " + lastLogMark.getLogFileOffset());
output.put("LastLogMark: Journal Id - " + lastLogMark.getLogFileId() + "(" + Long.toHexString(lastLogMark.getLogFileId()) + ".txn)", "Pos - " + lastLogMark.getLogFileOffset());
}
String jsonResponse = JsonUtil.toJson(output);
LOG.debug("output body:" + jsonResponse);
response.setBody(jsonResponse);
response.setCode(HttpServer.StatusCode.OK);
return response;
} catch (Exception e) {
LOG.error("Exception occurred while getting last log mark", e);
response.setCode(HttpServer.StatusCode.NOT_FOUND);
response.setBody("ERROR handling request: " + e.getMessage());
return response;
}
} else {
response.setCode(HttpServer.StatusCode.NOT_FOUND);
response.setBody("Not found method. Should be GET method");
return response;
}
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class LedgerStorageTestBase method setUp.
@Before
public void setUp() throws Exception {
journalDir = createTempDir("journal");
ledgerDir = createTempDir("ledger");
// create current directories
Bookie.getCurrentDirectory(journalDir).mkdir();
Bookie.getCurrentDirectory(ledgerDir).mkdir();
// build the configuration
conf.setZkServers(null);
conf.setJournalDirName(journalDir.getPath());
conf.setLedgerDirNames(new String[] { ledgerDir.getPath() });
// build the ledger monitor
DiskChecker checker = new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold());
ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), checker);
}
use of org.apache.bookkeeper.util.DiskChecker in project bookkeeper by apache.
the class TestLedgerDirsManager method testLedgerDirsMonitorHandlingWithMultipleLedgerDirectories.
@Test
public void testLedgerDirsMonitorHandlingWithMultipleLedgerDirectories() throws Exception {
ledgerMonitor.shutdown();
final float nospace = 0.90f;
final float lwm = 0.80f;
HashMap<File, Float> usageMap;
File tmpDir1 = createTempDir("bkTest", ".dir");
File curDir1 = Bookie.getCurrentDirectory(tmpDir1);
Bookie.checkDirectoryStructure(curDir1);
File tmpDir2 = createTempDir("bkTest", ".dir");
File curDir2 = Bookie.getCurrentDirectory(tmpDir2);
Bookie.checkDirectoryStructure(curDir2);
conf.setDiskUsageThreshold(nospace);
conf.setDiskLowWaterMarkUsageThreshold(lwm);
conf.setDiskUsageWarnThreshold(nospace);
conf.setLedgerDirNames(new String[] { tmpDir1.toString(), tmpDir2.toString() });
mockDiskChecker = new MockDiskChecker(nospace, warnThreshold);
dirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()), statsLogger);
ledgerMonitor = new LedgerDirsMonitor(conf, mockDiskChecker, dirsManager);
usageMap = new HashMap<File, Float>();
usageMap.put(curDir1, 0.1f);
usageMap.put(curDir2, 0.1f);
mockDiskChecker.setUsageMap(usageMap);
ledgerMonitor.init();
final MockLedgerDirsListener mockLedgerDirsListener = new MockLedgerDirsListener();
dirsManager.addLedgerDirsListener(mockLedgerDirsListener);
ledgerMonitor.start();
Thread.sleep((diskCheckInterval * 2) + 100);
assertFalse(mockLedgerDirsListener.readOnly);
// go above LWM but below threshold
// should still be writable
setUsageAndThenVerify(curDir1, lwm + 0.05f, curDir2, lwm + 0.05f, mockDiskChecker, mockLedgerDirsListener, false);
// one dir usagespace above storagethreshold, another dir below storagethreshold
// should still be writable
setUsageAndThenVerify(curDir1, nospace + 0.02f, curDir2, nospace - 0.05f, mockDiskChecker, mockLedgerDirsListener, false);
// should remain readonly
setUsageAndThenVerify(curDir1, nospace + 0.05f, curDir2, nospace + 0.02f, mockDiskChecker, mockLedgerDirsListener, true);
// bring the disk usages to less than the threshold,
// but more than the LWM.
// should still be readonly
setUsageAndThenVerify(curDir1, nospace - 0.05f, curDir2, nospace - 0.05f, mockDiskChecker, mockLedgerDirsListener, true);
// bring one dir diskusage to less than lwm,
// the other dir to be more than lwm, but the
// overall diskusage to be more than lwm
// should still be readonly
setUsageAndThenVerify(curDir1, lwm - 0.03f, curDir2, lwm + 0.07f, mockDiskChecker, mockLedgerDirsListener, true);
// bring one dir diskusage to much less than lwm,
// the other dir to be more than storage threahold, but the
// overall diskusage is less than lwm
// should goto readwrite
setUsageAndThenVerify(curDir1, lwm - 0.17f, curDir2, nospace + 0.03f, mockDiskChecker, mockLedgerDirsListener, false);
assertTrue("Only one LedgerDir should be writable", dirsManager.getWritableLedgerDirs().size() == 1);
// bring both the dirs below lwm
// should still be readwrite
setUsageAndThenVerify(curDir1, lwm - 0.03f, curDir2, lwm - 0.02f, mockDiskChecker, mockLedgerDirsListener, false);
assertTrue("Both the LedgerDirs should be writable", dirsManager.getWritableLedgerDirs().size() == 2);
// bring both the dirs above lwm but < threshold
// should still be readwrite
setUsageAndThenVerify(curDir1, lwm + 0.02f, curDir2, lwm + 0.08f, mockDiskChecker, mockLedgerDirsListener, false);
}
Aggregations