use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class CachePartitionDefragmentationManager method defragmentOnePartition.
/**
* Defragment one given partition.
*/
private boolean defragmentOnePartition(CacheGroupContext oldGrpCtx, int grpId, File workDir, GridCacheOffheapManager offheap, FileVersionCheckingFactory pageStoreFactory, GridCompoundFuture<Object, Object> cmpFut, PageMemoryEx oldPageMem, CacheGroupContext newGrpCtx, CacheDataStore oldCacheDataStore) throws IgniteCheckedException {
TreeIterator treeIter = new TreeIterator(pageSize);
checkCancellation();
int partId = oldCacheDataStore.partId();
PartitionContext partCtx = new PartitionContext(workDir, grpId, partId, partDataRegion, mappingDataRegion, oldGrpCtx, newGrpCtx, oldCacheDataStore, pageStoreFactory);
if (skipAlreadyDefragmentedPartition(workDir, grpId, partId, log)) {
partCtx.createPageStore(() -> defragmentedPartMappingFile(workDir, partId).toPath(), partCtx.mappingPagesAllocated, partCtx.mappingPageMemory);
linkMapByPart.put(partId, partCtx.createLinkMapTree(false));
return false;
}
partCtx.createPageStore(() -> defragmentedPartMappingFile(workDir, partId).toPath(), partCtx.mappingPagesAllocated, partCtx.mappingPageMemory);
linkMapByPart.put(partId, partCtx.createLinkMapTree(true));
checkCancellation();
partCtx.createPageStore(() -> defragmentedPartTmpFile(workDir, partId).toPath(), partCtx.partPagesAllocated, partCtx.partPageMemory);
partCtx.createNewCacheDataStore(offheap);
copyPartitionData(partCtx, treeIter);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partCtx.partPageMemory.pageManager();
PageStore oldPageStore = filePageStoreMgr.getStore(grpId, partId);
status.onPartitionDefragmented(oldGrpCtx, oldPageStore.size(), // + file header.
pageSize + partCtx.partPagesAllocated.get() * pageSize);
// TODO Move inside of defragmentSinglePartition.
IgniteInClosure<IgniteInternalFuture<?>> cpLsnr = fut -> {
if (fut.error() == null) {
if (log.isDebugEnabled()) {
log.debug(S.toString("Partition defragmented", "grpId", grpId, false, "partId", partId, false, "oldPages", oldPageStore.pages(), false, "newPages", partCtx.partPagesAllocated.get() + 1, false, "mappingPages", partCtx.mappingPagesAllocated.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedPartFile(workDir, partId).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, partId);
partCtx.partPageMemory.invalidate(grpId, partId);
// Yes, it'll be invalid in a second.
pageMgr.pageStoreMap().removePageStore(grpId, partId);
renameTempPartitionFile(workDir, partId);
}
};
GridFutureAdapter<?> cpFut = defragmentationCheckpoint.forceCheckpoint("partition defragmented", null).futureFor(FINISHED);
cpFut.listen(cpLsnr);
cmpFut.add((IgniteInternalFuture<Object>) cpFut);
return true;
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class Checkpointer method destroyEvictedPartitions.
/**
* Processes all evicted partitions scheduled for destroy.
*
* @return The number of destroyed partition files.
* @throws IgniteCheckedException If failed.
*/
private int destroyEvictedPartitions() throws IgniteCheckedException {
PartitionDestroyQueue destroyQueue = curCpProgress.getDestroyQueue();
if (destroyQueue.pendingReqs().isEmpty())
return 0;
List<PartitionDestroyRequest> reqs = null;
for (final PartitionDestroyRequest req : destroyQueue.pendingReqs().values()) {
if (!req.beginDestroy())
continue;
final int grpId = req.groupId();
final int partId = req.partitionId();
CacheGroupContext grp = cacheProcessor.cacheGroup(grpId);
assert grp != null : "Cache group is not initialized [grpId=" + grpId + "]";
assert grp.offheap() instanceof GridCacheOffheapManager : "Destroying partition files when persistence is off " + grp.offheap();
final GridCacheOffheapManager offheap = (GridCacheOffheapManager) grp.offheap();
Runnable destroyPartTask = () -> {
try {
offheap.destroyPartitionStore(partId);
req.onDone(null);
grp.metrics().decrementInitializedLocalPartitions();
if (log.isDebugEnabled())
log.debug("Partition file has destroyed [grpId=" + grpId + ", partId=" + partId + "]");
} catch (Exception e) {
req.onDone(new IgniteCheckedException("Partition file destroy has failed [grpId=" + grpId + ", partId=" + partId + "]", e));
}
};
IgniteThreadPoolExecutor pool = checkpointWritePagesPool;
if (pool != null) {
try {
pool.execute(destroyPartTask);
} catch (RejectedExecutionException ignore) {
// Run the task synchronously.
destroyPartTask.run();
}
} else
destroyPartTask.run();
if (reqs == null)
reqs = new ArrayList<>();
reqs.add(req);
}
if (reqs != null)
for (PartitionDestroyRequest req : reqs) req.waitCompleted();
destroyQueue.pendingReqs().clear();
return reqs != null ? reqs.size() : 0;
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class ReleaseSegmentOnHistoricalRebalanceTest method testReleaseBeforeRebalanceIterator.
/**
* Checks that if release the segment before {@link IgniteCacheOffheapManagerImpl#rebalanceIterator},
* there will be no errors and the rebalance will be completed.
*
* @throws Exception If failed.
*/
@Test
public void testReleaseBeforeRebalanceIterator() throws Exception {
checkHistoricalRebalance(n -> {
IgniteInternalCache<?, ?> cachex = n.cachex(DEFAULT_CACHE_NAME);
GridCacheOffheapManager spy = spy(offheapManager(cachex));
doAnswer(m -> {
CheckpointHistory cpHist = dbMgr(n).checkpointHistory();
for (Long cp : cpHist.checkpoints()) release(n, entry(cpHist, cp).checkpointMark());
return m.callRealMethod();
}).when(spy).rebalanceIterator(any(), any());
offheapManager(cachex, spy);
});
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class CheckpointFreeListTest method testFreeListRestoredCorrectly.
/**
* @throws Exception if fail.
*/
@Test
public void testFreeListRestoredCorrectly() throws Exception {
IgniteEx ignite0 = startGrid(0);
ignite0.cluster().active(true);
IgniteEx igniteClient = startClientGrid(getClientConfiguration("client"));
Random random = new Random();
IgniteCache<Integer, Object> cache = igniteClient.cache(CACHE_NAME);
for (int j = 0; j < CACHE_SIZE; j++) {
cache.put(j, new byte[random.nextInt(SF.apply(3072))]);
if (random.nextBoolean())
cache.remove(j);
}
GridCacheOffheapManager offheap = cacheOffheapManager();
HashMap<Integer, AtomicReferenceArray<PagesList.Stripe[]>> bucketsStorage = new HashMap<>();
offheap.cacheDataStores().forEach(cacheData -> bucketsStorage.put(cacheData.partId(), U.field(cacheData.rowStore().freeList(), "buckets")));
forceCheckpoint();
stopGrid(0);
ignite0 = startGrid(0);
ignite0.cluster().active(true);
GridCacheOffheapManager offheap2 = cacheOffheapManager();
offheap2.cacheDataStores().forEach(cacheData -> {
AtomicReferenceArray<PagesList.Stripe[]> restoredBuckets = U.field(cacheData.rowStore().freeList(), "buckets");
AtomicReferenceArray<PagesList.Stripe[]> savedBuckets = bucketsStorage.get(cacheData.partId());
if (savedBuckets != null && restoredBuckets != null) {
assertEquals(restoredBuckets.length(), savedBuckets.length());
for (int i = 0; i < restoredBuckets.length(); i++) assertTrue(Objects.deepEquals(restoredBuckets.get(i), savedBuckets.get(i)));
} else
assertTrue(savedBuckets == null && restoredBuckets == null);
});
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class UpgradePendingTreeToPerPartitionTask method processCacheGroup.
/**
* Converts CacheGroup pending tree to per-partition basis.
*
* @param grp Cache group.
* @throws IgniteCheckedException If error occurs.
*/
private void processCacheGroup(CacheGroupContext grp) throws IgniteCheckedException {
assert grp.offheap() instanceof GridCacheOffheapManager;
PendingEntriesTree oldPendingTree;
final IgniteCacheDatabaseSharedManager db = grp.shared().database();
db.checkpointReadLock();
try {
IndexStorage indexStorage = ((GridCacheOffheapManager) grp.offheap()).getIndexStorage();
// TODO: IGNITE-5874: replace with some check-method to avoid unnecessary page allocation.
RootPage pendingRootPage = indexStorage.allocateIndex(PENDING_ENTRIES_TREE_NAME);
if (pendingRootPage.isAllocated()) {
log.info("No pending tree found for cache group: [grpId=" + grp.groupId() + ", grpName=" + grp.name() + ']');
// Nothing to do here as just allocated tree is obviously empty.
indexStorage.dropIndex(PENDING_ENTRIES_TREE_NAME);
return;
}
oldPendingTree = new PendingEntriesTree(grp, PENDING_ENTRIES_TREE_NAME, grp.dataRegion().pageMemory(), pendingRootPage.pageId().pageId(), ((GridCacheOffheapManager) grp.offheap()).reuseListForIndex(null), false, grp.shared().diagnostic().pageLockTracker(), PageIdAllocator.FLAG_IDX);
} finally {
db.checkpointReadUnlock();
}
processPendingTree(grp, oldPendingTree);
if (Thread.currentThread().isInterrupted())
return;
db.checkpointReadLock();
try {
oldPendingTree.destroy();
} finally {
db.checkpointReadUnlock();
}
}
Aggregations