use of org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO in project ignite by apache.
the class CacheDataTree method scanDataPages.
/**
* @param rowData Required row data.
* @param c Optional MVCC closure.
* @return Cache row cursor.
* @throws IgniteCheckedException If failed.
*/
private GridCursor<CacheDataRow> scanDataPages(CacheDataRowAdapter.RowData rowData, MvccDataPageClosure c) throws IgniteCheckedException {
lastFindWithDataPageScan = TRUE;
checkDestroyed();
assert rowData != null;
assert grp.persistenceEnabled();
int partId = rowStore.getPartitionId();
GridCacheSharedContext shared = grp.shared();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) shared.database();
PageStore pageStore = db.getPageStore(grpId, partId);
boolean mvccEnabled = grp.mvccEnabled();
int pageSize = pageSize();
long startPageId = ((PageMemoryEx) pageMem).partitionMetaPageId(grp.groupId(), partId);
/**
*/
final class DataPageScanCursor implements GridCursor<CacheDataRow> {
/**
*/
int pagesCnt = pageStore.pages();
/**
*/
int curPage = -1;
/**
*/
CacheDataRow[] rows = EMPTY_ROWS;
/**
*/
int curRow = -1;
/**
* {@inheritDoc}
*/
@Override
public boolean next() throws IgniteCheckedException {
if (rows == null)
return false;
if (++curRow < rows.length && rows[curRow] != null)
return true;
return readNextDataPage();
}
/**
* @return {@code true} If new rows were fetched.
* @throws IgniteCheckedException If failed.
*/
private boolean readNextDataPage() throws IgniteCheckedException {
checkDestroyed();
for (; ; ) {
if (++curPage >= pagesCnt) {
// Reread number of pages when we reach it (it may grow).
int newPagesCnt = pageStore.pages();
if (newPagesCnt <= pagesCnt) {
rows = null;
return false;
}
pagesCnt = newPagesCnt;
}
long pageId = startPageId + curPage;
long page = pageMem.acquirePage(grpId, pageId);
try {
boolean skipVer = CacheDataRowStore.getSkipVersion();
long pageAddr = ((PageMemoryEx) pageMem).readLock(page, pageId, true, false);
try {
// Here we should also exclude fragmented pages that don't contain the head of the entry.
if (PageIO.getType(pageAddr) != T_DATA)
// Not a data page.
continue;
DataPageIO io = PageIO.getPageIO(T_DATA, PageIO.getVersion(pageAddr));
int rowsCnt = io.getRowsCount(pageAddr);
if (rowsCnt == 0)
// Empty page.
continue;
if (rowsCnt > rows.length)
rows = new CacheDataRow[rowsCnt];
else
clearTail(rows, rowsCnt);
int r = 0;
for (int i = 0; i < rowsCnt; i++) {
if (c == null || c.applyMvcc(io, pageAddr, i, pageSize)) {
DataRow row = mvccEnabled ? new MvccDataRow() : new DataRow();
row.initFromDataPage(io, pageAddr, i, grp, shared, pageMem, rowData, skipVer);
rows[r++] = row;
}
}
if (r == 0)
// No rows fetched in this page.
continue;
curRow = 0;
return true;
} finally {
pageMem.readUnlock(grpId, pageId, page);
}
} finally {
pageMem.releasePage(grpId, pageId, page);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public CacheDataRow get() {
return rows[curRow];
}
}
return new DataPageScanCursor();
}
use of org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO in project ignite by apache.
the class DataPageMvccMarkUpdatedRecord method applyDelta.
/**
* {@inheritDoc}
*/
@Override
public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException {
DataPageIO io = PageIO.getPageIO(pageAddr);
io.updateNewVersion(pageAddr, itemId, pageMem.realPageSize(groupId()), newMvccCrd, newMvccCntr, newMvccOpCntr);
}
use of org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO in project ignite by apache.
the class DataPageMvccUpdateTxStateHintRecord method applyDelta.
/**
* {@inheritDoc}
*/
@Override
public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException {
DataPageIO io = PageIO.getPageIO(pageAddr);
io.updateTxState(pageAddr, itemId, pageMem.realPageSize(groupId()), txState);
}
use of org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO in project ignite by apache.
the class CorruptedTreeFailureHandlingTest method testCorruptedPage.
/**
* Check that if a corrupted page exists, an {@link CorruptedTreeException}
* will be thrown and a diagnostic file will be generated.
*
* @throws Exception If failed.
*/
@Test
public void testCorruptedPage() throws Exception {
IgniteEx srv = startGrid(0);
File diagnosticDir = new File(srv.context().config().getWorkDirectory(), "diagnostic");
FileUtils.deleteDirectory(diagnosticDir);
srv.cluster().state(ClusterState.ACTIVE);
IgniteCache<Integer, Integer> cache = srv.getOrCreateCache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 10; i++) cache.put(i, i);
int pageSize = srv.configuration().getDataStorageConfiguration().getPageSize();
int grpId = srv.context().cache().cacheGroups().stream().filter(context -> context.cacheOrGroupName().equals(DEFAULT_CACHE_NAME)).findAny().orElseThrow(() -> new RuntimeException("Cache group not found")).groupId();
stopGrid(0, false);
// Node is stopped, we're ready to corrupt partition data.
long link = linkRef.get();
long pageId = PageIdUtils.pageId(link);
int itemId = PageIdUtils.itemId(link);
ByteBuffer pageBuf = ByteBuffer.allocateDirect(pageSize);
OpenOption[] options = { StandardOpenOption.READ, StandardOpenOption.WRITE };
try (RandomAccessFileIO fileIO = new RandomAccessFileIO(fileRef.get(), options)) {
DataPageIO dataPageIO = DataPageIO.VERSIONS.latest();
long pageOff = pageSize + PageIdUtils.pageIndex(pageId) * pageSize;
// Read index page.
fileIO.position(pageOff);
fileIO.readFully(pageBuf);
long pageAddr = GridUnsafe.bufferAddress(pageBuf);
// Remove existing item from index page.
dataPageIO.removeRow(pageAddr, itemId, pageSize);
// Recalculate CRC.
PageIO.setCrc(pageAddr, 0);
pageBuf.rewind();
PageIO.setCrc(pageAddr, FastCrc.calcCrc(pageBuf, pageSize));
// Write it back.
pageBuf.rewind();
fileIO.position(pageOff);
fileIO.writeFully(pageBuf);
}
LogListener logLsnr = LogListener.matches("CorruptedTreeException has occurred. " + "To diagnose it, make a backup of the following directories: ").build();
srv = startGrid(0, cfg -> {
cfg.setGridLogger(new ListeningTestLogger(cfg.getGridLogger(), logLsnr));
});
// Add modified page to WAL so it won't be restored to previous (valid) state.
pageBuf.rewind();
ByteBuffer cpBuf = ByteBuffer.allocate(pageBuf.capacity());
cpBuf.put(pageBuf);
PageSnapshot pageSnapshot = new PageSnapshot(new FullPageId(pageId, grpId), cpBuf.array(), pageSize);
srv.context().cache().context().wal().log(pageSnapshot);
// Access cache.
cache = srv.cache(DEFAULT_CACHE_NAME);
try {
for (int i = 0; i < CACHE_ENTRIES; i++) cache.get(i);
fail("Cache operations are expected to fail");
} catch (Throwable e) {
assertTrue(X.hasCause(e, CorruptedTreeException.class));
}
assertTrue(GridTestUtils.waitForCondition(() -> G.allGrids().isEmpty(), 10_000L));
assertTrue(diagnosticDir.exists());
assertTrue(diagnosticDir.isDirectory());
Pattern corruptedPagesFileNamePtrn = corruptedPagesFileNamePattern();
File[] txtFiles = diagnosticDir.listFiles((dir, name) -> corruptedPagesFileNamePtrn.matcher(name).matches());
assertFalse(F.isEmpty(txtFiles));
assertEquals(1, txtFiles.length);
assertTrue(logLsnr.check());
}
use of org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO in project ignite by apache.
the class CacheDataRowAdapter method initFromLink.
/**
* Read row from data pages.
* Can be called with cctx == null, if cache instance is unknown, but its ID is stored in the data row.
*
* @param grp Cache group.
* @param sharedCtx Shared context.
* @param pageMem Page memory.
* @param rowData Row data.
* @throws IgniteCheckedException If failed.
*/
public final void initFromLink(@Nullable CacheGroupContext grp, GridCacheSharedContext<?, ?> sharedCtx, PageMemory pageMem, RowData rowData) throws IgniteCheckedException {
assert link != 0 : "link";
assert key == null : "key";
CacheObjectContext coctx = grp != null ? grp.cacheObjectContext() : null;
boolean readCacheId = grp == null || grp.storeCacheIdInDataPage();
long nextLink = link;
IncompleteObject<?> incomplete = null;
boolean first = true;
do {
final long pageId = pageId(nextLink);
// Group is null if try evict page, with persistence evictions should be disabled.
assert grp != null || pageMem instanceof PageMemoryNoStoreImpl;
int grpId = grp != null ? grp.groupId() : 0;
final long page = pageMem.acquirePage(grpId, pageId);
try {
// Non-empty data page must not be recycled.
long pageAddr = pageMem.readLock(grpId, pageId, page);
assert pageAddr != 0L : nextLink;
try {
DataPageIO io = DataPageIO.VERSIONS.forPage(pageAddr);
DataPagePayload data = io.readPayload(pageAddr, itemId(nextLink), pageMem.pageSize());
nextLink = data.nextLink();
if (first) {
if (nextLink == 0) {
// Fast path for a single page row.
readFullRow(sharedCtx, coctx, pageAddr + data.offset(), rowData, readCacheId);
return;
}
first = false;
}
ByteBuffer buf = pageMem.pageBuffer(pageAddr);
buf.position(data.offset());
buf.limit(data.offset() + data.payloadSize());
boolean keyOnly = rowData == RowData.KEY_ONLY;
incomplete = readFragment(sharedCtx, coctx, buf, keyOnly, readCacheId, incomplete);
if (keyOnly && key != null)
return;
} finally {
pageMem.readUnlock(grpId, pageId, page);
}
} finally {
pageMem.releasePage(grpId, pageId, page);
}
} while (nextLink != 0);
assert isReady() : "ready";
}
Aggregations