use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class VisorFindAndDeleteGarbageInPersistenceClosure method cleanup.
/**
* By calling this method we would delete found garbage in partitions and would try to
* cleanup indexes.
*
* @param grpIdToPartIdToGarbageCount GrpId -> PartId -> Garbage count.
*/
private void cleanup(Map<Integer, Map<Integer, Long>> grpIdToPartIdToGarbageCount) throws IgniteCheckedException {
for (Map.Entry<Integer, Map<Integer, Long>> e : grpIdToPartIdToGarbageCount.entrySet()) {
int grpId = e.getKey();
CacheGroupContext groupContext = ignite.context().cache().cacheGroup(grpId);
assert groupContext != null;
for (Integer cacheId : e.getValue().keySet()) {
groupContext.shared().database().checkpointReadLock();
try {
groupContext.offheap().stopCache(cacheId, true);
} finally {
groupContext.shared().database().checkpointReadUnlock();
}
((GridCacheOffheapManager) groupContext.offheap()).findAndCleanupLostIndexesForStoppedCache(cacheId);
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class CacheGroupContext method start.
/**
* @throws IgniteCheckedException If failed.
*/
public void start() throws IgniteCheckedException {
GridAffinityAssignmentCache affCache = ctx.affinity().groupAffinity(grpId);
aff = affCache == null ? GridAffinityAssignmentCache.create(ctx.kernalContext(), ccfg.getAffinity(), ccfg) : affCache;
if (ccfg.getCacheMode() != LOCAL) {
top = ctx.kernalContext().resource().resolve(new GridDhtPartitionTopologyImpl(ctx, this));
metrics.onTopologyInitialized();
}
try {
offheapMgr = ctx.kernalContext().resource().resolve(persistenceEnabled ? new GridCacheOffheapManager() : new IgniteCacheOffheapManagerImpl());
} catch (Exception e) {
throw new IgniteCheckedException("Failed to initialize offheap manager", e);
}
offheapMgr.start(ctx, this);
if (!isRecoveryMode()) {
initializeIO();
ctx.affinity().onCacheGroupCreated(this);
ctx.evict().onCacheGroupStarted(this);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class CachePartitionDefragmentationManager method executeDefragmentation.
/**
*/
public void executeDefragmentation() throws IgniteCheckedException {
Map<Integer, List<CacheDataStore>> oldStores = new HashMap<>();
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
final IgniteCacheOffheapManager offheap = oldGrpCtx.offheap();
List<CacheDataStore> oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false).filter(store -> {
try {
return filePageStoreMgr.exists(grpId, store.partId());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}).collect(Collectors.toList());
oldStores.put(grpId, oldCacheDataStores);
}
int partitionCount = oldStores.values().stream().mapToInt(List::size).sum();
status.onStart(cacheGrpCtxsForDefragmentation, partitionCount);
try {
// Now the actual process starts.
IgniteInternalFuture<?> idxDfrgFut = null;
DataPageEvictionMode prevPageEvictionMode = null;
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName());
List<CacheDataStore> oldCacheDataStores = oldStores.get(grpId);
if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) {
status.onCacheGroupSkipped(oldGrpCtx, oldCacheDataStores.size());
continue;
}
try {
GridCacheOffheapManager offheap = (GridCacheOffheapManager) oldGrpCtx.offheap();
status.onCacheGroupStart(oldGrpCtx, oldCacheDataStores.size());
if (workDir == null || oldCacheDataStores.isEmpty()) {
status.onCacheGroupFinish(oldGrpCtx);
continue;
}
// We can't start defragmentation of new group on the region that has wrong eviction mode.
// So waiting of the previous cache group defragmentation is inevitable.
DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode();
if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) {
prevPageEvictionMode = curPageEvictionMode;
partDataRegion.config().setPageEvictionMode(curPageEvictionMode);
if (idxDfrgFut != null)
idxDfrgFut.get();
}
IntMap<CacheDataStore> cacheDataStores = new IntHashMap<>();
for (CacheDataStore store : offheap.cacheDataStores()) {
// This would mean that these partitions are empty.
assert store.tree() == null || store.tree().groupId() == grpId;
if (store.tree() != null)
cacheDataStores.put(store.partId(), store);
}
dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion());
// Another cheat. Ttl cleanup manager knows too much shit.
oldGrpCtx.caches().stream().filter(cacheCtx -> cacheCtx.groupId() == grpId).forEach(cacheCtx -> cacheCtx.ttl().unregister());
// Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care
// and WAL records will be allocated anyway just to be ignored later if we don't disable WAL for
// cache group explicitly.
oldGrpCtx.localWalEnabled(false, false);
boolean encrypted = oldGrpCtx.config().isEncryptionEnabled();
FileVersionCheckingFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted);
AtomicLong idxAllocationTracker = new GridAtomicLong();
createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, idxAllocationTracker::addAndGet);
checkCancellation();
GridCompoundFuture<Object, Object> cmpFut = new GridCompoundFuture<>();
PageMemoryEx oldPageMem = (PageMemoryEx) oldGrpCtx.dataRegion().pageMemory();
CacheGroupContext newGrpCtx = new CacheGroupContext(sharedCtx, grpId, oldGrpCtx.receivedFrom(), CacheType.USER, oldGrpCtx.config(), oldGrpCtx.affinityNode(), partDataRegion, oldGrpCtx.cacheObjectContext(), null, null, oldGrpCtx.localStartVersion(), true, false, true);
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock();
try {
// This will initialize partition meta in index partition - meta tree and reuse list.
newGrpCtx.start();
} finally {
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock();
}
IgniteUtils.doInParallel(defragmentationThreadPool, oldCacheDataStores, oldCacheDataStore -> defragmentOnePartition(oldGrpCtx, grpId, workDir, offheap, pageStoreFactory, cmpFut, oldPageMem, newGrpCtx, oldCacheDataStore));
// A bit too general for now, but I like it more then saving only the last checkpoint future.
cmpFut.markInitialized().get();
idxDfrgFut = new GridFinishedFuture<>();
if (filePageStoreMgr.hasIndexStore(grpId)) {
defragmentIndexPartition(oldGrpCtx, newGrpCtx);
idxDfrgFut = defragmentationCheckpoint.forceCheckpoint("index defragmented", null).futureFor(FINISHED);
}
PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION);
idxDfrgFut = idxDfrgFut.chain(fut -> {
if (log.isDebugEnabled()) {
log.debug(S.toString("Index partition defragmented", "grpId", grpId, false, "oldPages", oldIdxPageStore.pages(), false, "newPages", idxAllocationTracker.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedIndexFile(workDir).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, INDEX_PARTITION);
PageMemoryEx partPageMem = (PageMemoryEx) partDataRegion.pageMemory();
partPageMem.invalidate(grpId, INDEX_PARTITION);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partPageMem.pageManager();
pageMgr.pageStoreMap().removePageStore(grpId, INDEX_PARTITION);
PageMemoryEx mappingPageMem = (PageMemoryEx) mappingDataRegion.pageMemory();
pageMgr = (DefragmentationPageReadWriteManager) mappingPageMem.pageManager();
pageMgr.pageStoreMap().clear(grpId);
renameTempIndexFile(workDir);
writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log);
batchRenameDefragmentedCacheGroupPartitions(workDir, log);
return null;
});
status.onIndexDefragmented(oldGrpCtx, oldIdxPageStore.size(), // + file header.
pageSize + idxAllocationTracker.get() * pageSize);
} catch (DefragmentationCancelledException e) {
DefragmentationFileUtils.deleteLeftovers(workDir);
throw e;
}
status.onCacheGroupFinish(oldGrpCtx);
}
if (idxDfrgFut != null)
idxDfrgFut.get();
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
status.onFinish();
completionFut.onDone();
} catch (DefragmentationCancelledException e) {
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
log.info("Defragmentation process has been cancelled.");
status.onFinish();
completionFut.onDone();
} catch (Throwable t) {
log.error("Defragmentation process failed.", t);
status.onFinish();
completionFut.onDone(t);
throw t;
} finally {
defragmentationCheckpoint.stop(true);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class FreeListCachingTest method testFreeListCaching.
/**
*/
@Test
public void testFreeListCaching() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
int partCnt = 10;
GridCacheProcessor cacheProc = ignite.context().cache();
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) cacheProc.context().database();
dbMgr.enableCheckpoints(false).get();
IgniteCache<Object, Object> cache = ignite.createCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setAffinity(new RendezvousAffinityFunction().setPartitions(partCnt)).setAtomicityMode(CacheAtomicityMode.ATOMIC));
GridCacheOffheapManager offheap = (GridCacheOffheapManager) cacheProc.cache(DEFAULT_CACHE_NAME).context().group().offheap();
for (int i = 0; i < 5_000; i++) {
for (int p = 0; p < partCnt; p++) {
Integer key = i * partCnt + p;
cache.put(key, new byte[i + 1]);
cache.remove(key);
}
}
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
AtomicLongArray bucketsSize = list.bucketsSize;
// All buckets except reuse bucket must be empty after puts and removes of the same key.
for (int i = 0; i < bucketsSize.length(); i++) {
if (list.isReuseBucket(i))
assertTrue(bucketsSize.get(i) > 0);
else
assertEquals(0, bucketsSize.get(i));
}
});
for (int i = 0; i < 100; i++) {
for (int p = 0; p < partCnt; p++) cache.put(i * partCnt + p, new byte[(i + p) * 10]);
}
for (int i = 0; i < 50; i += 2) {
for (int p = 0; p < partCnt; p++) cache.remove(i * partCnt + p);
}
Map<Integer, List<Long>> partsBucketsSize = new HashMap<>();
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
AtomicLongArray bucketsSize = list.bucketsSize;
List<Long> bucketsSizeList = new ArrayList<>(bucketsSize.length());
partsBucketsSize.put(cacheData.partId(), bucketsSizeList);
long notReuseSize = 0;
for (int i = 0; i < bucketsSize.length(); i++) {
bucketsSizeList.add(bucketsSize.get(i));
PagesList.Stripe[] bucket = list.getBucket(i);
// by indexes bypassing caching.
if (!list.isReuseBucket(i)) {
notReuseSize += bucketsSize.get(i);
assertNull("Expected null bucket [partId=" + cacheData.partId() + ", i=" + i + ", bucket=" + bucket + ']', bucket);
PagesList.PagesCache pagesCache = list.getBucketCache(i, false);
assertEquals("Wrong pages cache size [partId=" + cacheData.partId() + ", i=" + i + ']', bucketsSize.get(i), pagesCache == null ? 0 : pagesCache.size());
}
}
assertTrue(notReuseSize > 0);
});
dbMgr.enableCheckpoints(true).get();
forceCheckpoint(ignite);
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
AtomicLongArray bucketsSize = list.bucketsSize;
for (int i = 0; i < bucketsSize.length(); i++) {
long bucketSize = bucketsSize.get(i);
PagesList.Stripe[] bucket = list.getBucket(i);
// After checkpoint all buckets must flush onheap cache to page memory.
if (bucketSize > 0) {
assertNotNull("Expected not null bucket [partId=" + cacheData.partId() + ", i=" + i + ']', bucket);
}
PagesList.PagesCache pagesCache = list.getBucketCache(i, false);
assertEquals("Wrong pages cache size [partId=" + cacheData.partId() + ", i=" + i + ']', 0, pagesCache == null ? 0 : pagesCache.size());
assertEquals("Bucket size changed after checkpoint [partId=" + cacheData.partId() + ", i=" + i + ']', (long) partsBucketsSize.get(cacheData.partId()).get(i), bucketSize);
}
});
dbMgr.enableCheckpoints(false).get();
for (int i = 0; i < 50; i++) {
for (int p = 0; p < partCnt; p++) cache.put(i * partCnt + p, new byte[(i + p) * 10]);
}
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
int totalCacheSize = 0;
for (int i = 0; i < list.bucketsSize.length(); i++) {
PagesList.PagesCache pagesCache = list.getBucketCache(i, false);
totalCacheSize += pagesCache == null ? 0 : pagesCache.size();
}
assertTrue("Some buckets should be cached [partId=" + cacheData.partId() + ']', totalCacheSize > 0);
});
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager in project ignite by apache.
the class FreeListCachingTest method testPageListCacheLimit.
/**
* @throws Exception If test failed.
*/
@Test
public void testPageListCacheLimit() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
ignite.getOrCreateCache("cache1");
ignite.getOrCreateCache("cache2");
GridCacheContext<?, ?> cctx1 = ignite.context().cache().cache("cache1").context();
GridCacheContext<?, ?> cctx2 = ignite.context().cache().cache("cache2").context();
GridCacheOffheapManager offheap1 = (GridCacheOffheapManager) cctx1.offheap();
GridCacheOffheapManager offheap2 = (GridCacheOffheapManager) cctx2.offheap();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) ignite.context().cache().context().database();
assertEquals(db.pageListCacheLimitHolder(cctx1.dataRegion()), db.pageListCacheLimitHolder(cctx2.dataRegion()));
long limit = db.pageListCacheLimitHolder(cctx1.dataRegion()).get();
try (IgniteDataStreamer<Object, Object> streamer1 = ignite.dataStreamer("cache1");
IgniteDataStreamer<Object, Object> streamer2 = ignite.dataStreamer("cache2")) {
// Fill caches to trigger "too many dirty pages" checkpoint.
for (int i = 0; i < 50_000; i++) {
streamer1.addData(i, new byte[i % 2048]);
streamer2.addData(i, new byte[i % 2048]);
// Calculates page list caches count and validate this value periodically.
if (i % 5_000 == 0) {
streamer1.flush();
streamer2.flush();
AtomicInteger pageCachesCnt = new AtomicInteger();
for (GridCacheOffheapManager offheap : F.asList(offheap1, offheap2)) {
offheap.cacheDataStores().forEach(cacheData -> {
if (cacheData.rowStore() == null)
return;
PagesList list = (PagesList) cacheData.rowStore().freeList();
for (int b = 0; b < list.bucketsSize.length(); b++) {
PagesList.PagesCache pagesCache = list.getBucketCache(b, false);
if (pagesCache != null && pagesCache.size() > 0)
pageCachesCnt.incrementAndGet();
}
});
}
// There can be a race and actual page list caches count can exceed the limit in very rare cases.
assertTrue("Page list caches count is more than expected [count: " + pageCachesCnt.get() + ", limit=" + limit + ']', pageCachesCnt.get() <= limit + ignite.configuration().getDataStreamerThreadPoolSize() - 1);
}
}
}
}
Aggregations