use of org.apache.ignite.internal.pagemem.store.PageStore in project ignite by apache.
the class CachePartitionDefragmentationManager method defragmentOnePartition.
/**
* Defragment one given partition.
*/
private boolean defragmentOnePartition(CacheGroupContext oldGrpCtx, int grpId, File workDir, GridCacheOffheapManager offheap, FileVersionCheckingFactory pageStoreFactory, GridCompoundFuture<Object, Object> cmpFut, PageMemoryEx oldPageMem, CacheGroupContext newGrpCtx, CacheDataStore oldCacheDataStore) throws IgniteCheckedException {
TreeIterator treeIter = new TreeIterator(pageSize);
checkCancellation();
int partId = oldCacheDataStore.partId();
PartitionContext partCtx = new PartitionContext(workDir, grpId, partId, partDataRegion, mappingDataRegion, oldGrpCtx, newGrpCtx, oldCacheDataStore, pageStoreFactory);
if (skipAlreadyDefragmentedPartition(workDir, grpId, partId, log)) {
partCtx.createPageStore(() -> defragmentedPartMappingFile(workDir, partId).toPath(), partCtx.mappingPagesAllocated, partCtx.mappingPageMemory);
linkMapByPart.put(partId, partCtx.createLinkMapTree(false));
return false;
}
partCtx.createPageStore(() -> defragmentedPartMappingFile(workDir, partId).toPath(), partCtx.mappingPagesAllocated, partCtx.mappingPageMemory);
linkMapByPart.put(partId, partCtx.createLinkMapTree(true));
checkCancellation();
partCtx.createPageStore(() -> defragmentedPartTmpFile(workDir, partId).toPath(), partCtx.partPagesAllocated, partCtx.partPageMemory);
partCtx.createNewCacheDataStore(offheap);
copyPartitionData(partCtx, treeIter);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partCtx.partPageMemory.pageManager();
PageStore oldPageStore = filePageStoreMgr.getStore(grpId, partId);
status.onPartitionDefragmented(oldGrpCtx, oldPageStore.size(), // + file header.
pageSize + partCtx.partPagesAllocated.get() * pageSize);
// TODO Move inside of defragmentSinglePartition.
IgniteInClosure<IgniteInternalFuture<?>> cpLsnr = fut -> {
if (fut.error() == null) {
if (log.isDebugEnabled()) {
log.debug(S.toString("Partition defragmented", "grpId", grpId, false, "partId", partId, false, "oldPages", oldPageStore.pages(), false, "newPages", partCtx.partPagesAllocated.get() + 1, false, "mappingPages", partCtx.mappingPagesAllocated.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedPartFile(workDir, partId).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, partId);
partCtx.partPageMemory.invalidate(grpId, partId);
// Yes, it'll be invalid in a second.
pageMgr.pageStoreMap().removePageStore(grpId, partId);
renameTempPartitionFile(workDir, partId);
}
};
GridFutureAdapter<?> cpFut = defragmentationCheckpoint.forceCheckpoint("partition defragmented", null).futureFor(FINISHED);
cpFut.listen(cpLsnr);
cmpFut.add((IgniteInternalFuture<Object>) cpFut);
return true;
}
use of org.apache.ignite.internal.pagemem.store.PageStore in project ignite by apache.
the class PageStoreMap method getStore.
/**
* {@inheritDoc}
*/
@Override
public PageStore getStore(int grpId, int partId) throws IgniteCheckedException {
IntMap<PageStore> partPageStoresMap = grpPageStoresMap.get(grpId);
if (partPageStoresMap == null) {
throw new IgniteCheckedException(S.toString("Page store map not found. ", "grpId", grpId, false, "partId", partId, false, "keys", Arrays.toString(grpPageStoresMap.keys()), false, "this", hashCode(), false));
}
PageStore pageStore = partPageStoresMap.get(partId);
if (pageStore == null) {
throw new IgniteCheckedException(S.toString("Page store not found. ", "grpId", grpId, false, "partId", partId, false, "keys", Arrays.toString(partPageStoresMap.keys()), false, "this", hashCode(), false));
}
return pageStore;
}
use of org.apache.ignite.internal.pagemem.store.PageStore in project ignite by apache.
the class SnapshotFutureTask method addPartitionWriters.
/**
* @param grpId Cache group id.
* @param parts Set of partitions to be processed.
* @param dirName Directory name to init.
* @throws IgniteCheckedException If fails.
*/
private void addPartitionWriters(int grpId, Set<Integer> parts, String dirName) throws IgniteCheckedException {
Integer encGrpId = cctx.cache().isEncrypted(grpId) ? grpId : null;
for (int partId : parts) {
GroupPartitionId pair = new GroupPartitionId(grpId, partId);
PageStore store = pageStore.getStore(grpId, partId);
partDeltaWriters.put(pair, new PageStoreSerialWriter(store, partDeltaFile(cacheWorkDir(tmpConsIdDir, dirName), partId), encGrpId));
partFileLengths.put(pair, store.size());
}
}
use of org.apache.ignite.internal.pagemem.store.PageStore in project ignite by apache.
the class CheckpointBufferDeadlockTest method runDeadlockScenario.
/**
*/
private void runDeadlockScenario() throws Exception {
LogListener lsnr = LogListener.matches(s -> s.contains("AssertionError")).build();
log.registerListener(lsnr);
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) ig.context().cache().context().database();
FilePageStoreManager pageStoreMgr = (FilePageStoreManager) ig.context().cache().context().pageStore();
final String cacheName = "single-part";
CacheConfiguration<Object, Object> cacheCfg = new CacheConfiguration<>().setName(cacheName).setAffinity(new RendezvousAffinityFunction(false, 1));
IgniteCache<Object, Object> singlePartCache = ig.getOrCreateCache(cacheCfg);
db.enableCheckpoints(false).get();
Thread.sleep(1_000);
try (IgniteDataStreamer<Object, Object> streamer = ig.dataStreamer(singlePartCache.getName())) {
int entries = MAX_SIZE / ENTRY_BYTE_CHUNK_SIZE / 4;
for (int i = 0; i < entries; i++) streamer.addData(i, new byte[ENTRY_BYTE_CHUNK_SIZE]);
streamer.flush();
}
slowCheckpointEnabled.set(true);
log.info(">>> Slow checkpoints enabled");
db.enableCheckpoints(true).get();
AtomicBoolean fail = new AtomicBoolean(false);
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
int loops = 0;
while (!stop.get()) {
if (loops % 10 == 0 && loops > 0 && loops < 500 || loops % 500 == 0 && loops >= 500)
log.info("Successfully completed " + loops + " loops");
db.checkpointReadLock();
try {
Set<FullPageId> pickedPagesSet = new HashSet<>();
PageStore store = pageStoreMgr.getStore(CU.cacheId(cacheName), 0);
int pages = store.pages();
DataRegion region = db.dataRegion(DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME);
PageMemoryImpl pageMem = (PageMemoryImpl) region.pageMemory();
while (pickedPagesSet.size() < PAGES_TOUCHED_UNDER_CP_LOCK) {
int pageIdx = ThreadLocalRandom.current().nextInt(PAGES_TOUCHED_UNDER_CP_LOCK, pages - PAGES_TOUCHED_UNDER_CP_LOCK);
long pageId = PageIdUtils.pageId(0, PageIdAllocator.FLAG_DATA, pageIdx);
long page = pageMem.acquirePage(CU.cacheId(cacheName), pageId);
try {
// We do not know correct flag(FLAG_DATA or FLAG_AUX). Skip page if no luck.
if (pageId != PageIO.getPageId(page + PageMemoryImpl.PAGE_OVERHEAD))
continue;
} finally {
pageMem.releasePage(CU.cacheId(cacheName), pageId, page);
}
pickedPagesSet.add(new FullPageId(pageId, CU.cacheId(cacheName)));
}
List<FullPageId> pickedPages = new ArrayList<>(pickedPagesSet);
assertEquals(PAGES_TOUCHED_UNDER_CP_LOCK, pickedPages.size());
// Sort to avoid deadlocks on pages rw-locks.
pickedPages.sort(new Comparator<FullPageId>() {
@Override
public int compare(FullPageId o1, FullPageId o2) {
int cmp = Long.compare(o1.groupId(), o2.groupId());
if (cmp != 0)
return cmp;
return Long.compare(o1.effectivePageId(), o2.effectivePageId());
}
});
List<Long> readLockedPages = new ArrayList<>();
// Read lock many pages at once intentionally.
for (int i = 0; i < PAGES_TOUCHED_UNDER_CP_LOCK / 2; i++) {
FullPageId fpid = pickedPages.get(i);
long page = pageMem.acquirePage(fpid.groupId(), fpid.pageId());
long abs = pageMem.readLock(fpid.groupId(), fpid.pageId(), page);
assertFalse(fpid.toString(), abs == 0);
readLockedPages.add(page);
}
// Emulate writes to trigger throttling.
for (int i = PAGES_TOUCHED_UNDER_CP_LOCK / 2; i < PAGES_TOUCHED_UNDER_CP_LOCK && !stop.get(); i++) {
FullPageId fpid = pickedPages.get(i);
long page = pageMem.acquirePage(fpid.groupId(), fpid.pageId());
long abs = pageMem.writeLock(fpid.groupId(), fpid.pageId(), page);
assertFalse(fpid.toString(), abs == 0);
pageMem.writeUnlock(fpid.groupId(), fpid.pageId(), page, null, true);
pageMem.releasePage(fpid.groupId(), fpid.pageId(), page);
}
for (int i = 0; i < PAGES_TOUCHED_UNDER_CP_LOCK / 2; i++) {
FullPageId fpid = pickedPages.get(i);
pageMem.readUnlock(fpid.groupId(), fpid.pageId(), readLockedPages.get(i));
pageMem.releasePage(fpid.groupId(), fpid.pageId(), readLockedPages.get(i));
}
} catch (Throwable e) {
log.error("Error in loader thread", e);
fail.set(true);
} finally {
db.checkpointReadUnlock();
}
loops++;
}
}
}, 10, "load-runner");
// Await for the start of throttling.
Thread.sleep(10_000);
slowCheckpointEnabled.set(false);
log.info(">>> Slow checkpoints disabled");
assertFalse(fail.get());
// Previous checkpoint should eventually finish.
forceCheckpoint();
stop.set(true);
fut.get();
db.enableCheckpoints(true).get();
// check that there is no problem with pinned pages
ig.destroyCache(cacheName);
assertFalse(lsnr.check());
log.unregisterListener(lsnr);
}
use of org.apache.ignite.internal.pagemem.store.PageStore in project ignite by apache.
the class IgnitePdsRecoveryAfterFileCorruptionTest method eraseDataFromDisk.
/**
* @param pageStore Page store.
* @param cacheId Cache id.
* @param page Page.
*/
private void eraseDataFromDisk(FilePageStoreManager pageStore, int cacheId, FullPageId page) throws IgniteCheckedException, IOException {
PageStore store = pageStore.getStore(cacheId, PageIdUtils.partId(page.pageId()));
FilePageStore filePageStore = (FilePageStore) store;
FileIO fileIO = U.field(filePageStore, "fileIO");
long size = fileIO.size();
fileIO.writeFully(ByteBuffer.allocate((int) size - filePageStore.headerSize()), filePageStore.headerSize());
fileIO.force();
}
Aggregations