use of org.apache.bookkeeper.bookie.FileInfoBackingCache.CachedFileInfo in project bookkeeper by apache.
the class IndexPersistenceMgr method getPersistEntryBeyondInMem.
long getPersistEntryBeyondInMem(long ledgerId, long lastEntryInMem) throws IOException {
CachedFileInfo fi = null;
long lastEntry = lastEntryInMem;
try {
fi = getFileInfo(ledgerId, null);
long size = fi.size();
// otherwise we may read incorret data
if (0 != size % LedgerEntryPage.getIndexEntrySize()) {
LOG.warn("Index file of ledger {} is not aligned with index entry size.", ledgerId);
size = size - size % LedgerEntryPage.getIndexEntrySize();
}
// we may not have the last entry in the cache
if (size > lastEntry * LedgerEntryPage.getIndexEntrySize()) {
ByteBuffer bb = ByteBuffer.allocate(pageSize);
long position = size - pageSize;
if (position < 0) {
position = 0;
}
// immediately.
try {
fi.read(bb, position, false);
} catch (ShortReadException sre) {
// throw a more meaningful exception with ledger id
throw new ShortReadException("Short read on ledger " + ledgerId + " : ", sre);
}
bb.flip();
long startingEntryId = position / LedgerEntryPage.getIndexEntrySize();
for (int i = entriesPerPage - 1; i >= 0; i--) {
if (bb.getLong(i * LedgerEntryPage.getIndexEntrySize()) != 0) {
if (lastEntry < startingEntryId + i) {
lastEntry = startingEntryId + i;
}
break;
}
}
}
} finally {
if (fi != null) {
fi.release();
}
}
return lastEntry;
}
use of org.apache.bookkeeper.bookie.FileInfoBackingCache.CachedFileInfo in project bookkeeper by apache.
the class IndexPersistenceMgr method getFileInfo.
/**
* Get the FileInfo and increase reference count.
* When we get FileInfo from cache, we need to make sure it is synchronized
* with eviction, otherwise there might be a race condition as we get
* the FileInfo from cache, that FileInfo is then evicted and closed before we
* could even increase the reference counter.
*/
CachedFileInfo getFileInfo(final Long ledger, final byte[] masterKey) throws IOException {
try {
CachedFileInfo fi;
pendingGetFileInfoCounter.inc();
Callable<CachedFileInfo> loader = () -> {
CachedFileInfo fileInfo = fileInfoBackingCache.loadFileInfo(ledger, masterKey);
activeLedgers.put(ledger, true);
return fileInfo;
};
do {
if (null != masterKey) {
fi = writeFileInfoCache.get(ledger, loader);
} else {
fi = readFileInfoCache.get(ledger, loader);
}
if (!fi.tryRetain()) {
// defensively ensure that dead fileinfo objects don't exist in the
// cache. They shouldn't if refcounting is correct, but if someone
// does a double release, the fileinfo will be cleaned up, while
// remaining in the cache, which could cause a tight loop in this method.
boolean inWriteMap = writeFileInfoCache.asMap().remove(ledger, fi);
boolean inReadMap = readFileInfoCache.asMap().remove(ledger, fi);
if (inWriteMap || inReadMap) {
LOG.error("Dead fileinfo({}) forced out of cache (write:{}, read:{}). " + "It must have been double-released somewhere.", fi, inWriteMap, inReadMap);
}
fi = null;
}
} while (fi == null);
return fi;
} catch (ExecutionException | UncheckedExecutionException ee) {
if (ee.getCause() instanceof IOException) {
throw (IOException) ee.getCause();
} else {
throw new IOException("Failed to load file info for ledger " + ledger, ee);
}
} finally {
pendingGetFileInfoCounter.dec();
}
}
Aggregations