use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class CachePartitionDefragmentationManager method beforeDefragmentation.
/**
*/
public void beforeDefragmentation() throws IgniteCheckedException {
// Checkpointer must be enabled so all pages on disk are in their latest valid state.
dbMgr.resumeWalLogging();
dbMgr.onStateRestored(null);
nodeCheckpoint.forceCheckpoint("beforeDefragmentation", null).futureFor(FINISHED).get();
dbMgr.preserveWalTailPointer();
sharedCtx.wal().onDeActivate(sharedCtx.kernalContext());
for (CacheGroupContext oldGrpCtx : sharedCtx.cache().cacheGroups()) {
if (!oldGrpCtx.userCache() || cacheGrpCtxsForDefragmentation.contains(oldGrpCtx))
continue;
if (!cachesForDefragmentation.isEmpty()) {
if (oldGrpCtx.caches().stream().noneMatch(cctx -> cachesForDefragmentation.contains(cctx.name())))
continue;
}
cacheGrpCtxsForDefragmentation.add(oldGrpCtx);
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class CachePartitionDefragmentationManager method executeDefragmentation.
/**
*/
public void executeDefragmentation() throws IgniteCheckedException {
Map<Integer, List<CacheDataStore>> oldStores = new HashMap<>();
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
final IgniteCacheOffheapManager offheap = oldGrpCtx.offheap();
List<CacheDataStore> oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false).filter(store -> {
try {
return filePageStoreMgr.exists(grpId, store.partId());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}).collect(Collectors.toList());
oldStores.put(grpId, oldCacheDataStores);
}
int partitionCount = oldStores.values().stream().mapToInt(List::size).sum();
status.onStart(cacheGrpCtxsForDefragmentation, partitionCount);
try {
// Now the actual process starts.
IgniteInternalFuture<?> idxDfrgFut = null;
DataPageEvictionMode prevPageEvictionMode = null;
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName());
List<CacheDataStore> oldCacheDataStores = oldStores.get(grpId);
if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) {
status.onCacheGroupSkipped(oldGrpCtx, oldCacheDataStores.size());
continue;
}
try {
GridCacheOffheapManager offheap = (GridCacheOffheapManager) oldGrpCtx.offheap();
status.onCacheGroupStart(oldGrpCtx, oldCacheDataStores.size());
if (workDir == null || oldCacheDataStores.isEmpty()) {
status.onCacheGroupFinish(oldGrpCtx);
continue;
}
// We can't start defragmentation of new group on the region that has wrong eviction mode.
// So waiting of the previous cache group defragmentation is inevitable.
DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode();
if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) {
prevPageEvictionMode = curPageEvictionMode;
partDataRegion.config().setPageEvictionMode(curPageEvictionMode);
if (idxDfrgFut != null)
idxDfrgFut.get();
}
IntMap<CacheDataStore> cacheDataStores = new IntHashMap<>();
for (CacheDataStore store : offheap.cacheDataStores()) {
// This would mean that these partitions are empty.
assert store.tree() == null || store.tree().groupId() == grpId;
if (store.tree() != null)
cacheDataStores.put(store.partId(), store);
}
dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion());
// Another cheat. Ttl cleanup manager knows too much shit.
oldGrpCtx.caches().stream().filter(cacheCtx -> cacheCtx.groupId() == grpId).forEach(cacheCtx -> cacheCtx.ttl().unregister());
// Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care
// and WAL records will be allocated anyway just to be ignored later if we don't disable WAL for
// cache group explicitly.
oldGrpCtx.localWalEnabled(false, false);
boolean encrypted = oldGrpCtx.config().isEncryptionEnabled();
FileVersionCheckingFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted);
AtomicLong idxAllocationTracker = new GridAtomicLong();
createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, idxAllocationTracker::addAndGet);
checkCancellation();
GridCompoundFuture<Object, Object> cmpFut = new GridCompoundFuture<>();
PageMemoryEx oldPageMem = (PageMemoryEx) oldGrpCtx.dataRegion().pageMemory();
CacheGroupContext newGrpCtx = new CacheGroupContext(sharedCtx, grpId, oldGrpCtx.receivedFrom(), CacheType.USER, oldGrpCtx.config(), oldGrpCtx.affinityNode(), partDataRegion, oldGrpCtx.cacheObjectContext(), null, null, oldGrpCtx.localStartVersion(), true, false, true);
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock();
try {
// This will initialize partition meta in index partition - meta tree and reuse list.
newGrpCtx.start();
} finally {
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock();
}
IgniteUtils.doInParallel(defragmentationThreadPool, oldCacheDataStores, oldCacheDataStore -> defragmentOnePartition(oldGrpCtx, grpId, workDir, offheap, pageStoreFactory, cmpFut, oldPageMem, newGrpCtx, oldCacheDataStore));
// A bit too general for now, but I like it more then saving only the last checkpoint future.
cmpFut.markInitialized().get();
idxDfrgFut = new GridFinishedFuture<>();
if (filePageStoreMgr.hasIndexStore(grpId)) {
defragmentIndexPartition(oldGrpCtx, newGrpCtx);
idxDfrgFut = defragmentationCheckpoint.forceCheckpoint("index defragmented", null).futureFor(FINISHED);
}
PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION);
idxDfrgFut = idxDfrgFut.chain(fut -> {
if (log.isDebugEnabled()) {
log.debug(S.toString("Index partition defragmented", "grpId", grpId, false, "oldPages", oldIdxPageStore.pages(), false, "newPages", idxAllocationTracker.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedIndexFile(workDir).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, INDEX_PARTITION);
PageMemoryEx partPageMem = (PageMemoryEx) partDataRegion.pageMemory();
partPageMem.invalidate(grpId, INDEX_PARTITION);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partPageMem.pageManager();
pageMgr.pageStoreMap().removePageStore(grpId, INDEX_PARTITION);
PageMemoryEx mappingPageMem = (PageMemoryEx) mappingDataRegion.pageMemory();
pageMgr = (DefragmentationPageReadWriteManager) mappingPageMem.pageManager();
pageMgr.pageStoreMap().clear(grpId);
renameTempIndexFile(workDir);
writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log);
batchRenameDefragmentedCacheGroupPartitions(workDir, log);
return null;
});
status.onIndexDefragmented(oldGrpCtx, oldIdxPageStore.size(), // + file header.
pageSize + idxAllocationTracker.get() * pageSize);
} catch (DefragmentationCancelledException e) {
DefragmentationFileUtils.deleteLeftovers(workDir);
throw e;
}
status.onCacheGroupFinish(oldGrpCtx);
}
if (idxDfrgFut != null)
idxDfrgFut.get();
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
status.onFinish();
completionFut.onDone();
} catch (DefragmentationCancelledException e) {
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
log.info("Defragmentation process has been cancelled.");
status.onFinish();
completionFut.onDone();
} catch (Throwable t) {
log.error("Defragmentation process failed.", t);
status.onFinish();
completionFut.onDone(t);
throw t;
} finally {
defragmentationCheckpoint.stop(true);
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class LoadAllWarmUpStrategy method warmUp.
/**
* {@inheritDoc}
*/
@Override
public void warmUp(LoadAllWarmUpConfiguration cfg, DataRegion region) throws IgniteCheckedException {
if (stop)
return;
assert region.config().isPersistenceEnabled();
Map<CacheGroupContext, List<LoadPartition>> loadDataInfo = loadDataInfo(region);
long availableLoadPageCnt = availableLoadPageCount(region);
if (log.isInfoEnabled()) {
Collection<List<LoadPartition>> parts = loadDataInfo.values();
log.info("Order of cache groups loaded into data region [name=" + region.config().getName() + ", partCnt=" + parts.stream().mapToLong(Collection::size).sum() + ", pageCnt=" + parts.stream().flatMap(Collection::stream).mapToLong(LoadPartition::pages).sum() + ", availablePageCnt=" + availableLoadPageCnt + ", grpNames=" + loadDataInfo.keySet().stream().map(CacheGroupContext::cacheOrGroupName).collect(toList()) + ']');
}
long loadedPageCnt = 0;
for (Map.Entry<CacheGroupContext, List<LoadPartition>> e : loadDataInfo.entrySet()) {
CacheGroupContext grp = e.getKey();
List<LoadPartition> parts = e.getValue();
if (log.isInfoEnabled()) {
log.info("Start warm-up cache group, with estimated statistics [name=" + grp.cacheOrGroupName() + ", partCnt=" + parts.size() + ", pageCnt=" + parts.stream().mapToLong(LoadPartition::pages).sum() + ']');
}
PageMemoryEx pageMemEx = (PageMemoryEx) region.pageMemory();
for (LoadPartition part : parts) {
long pageId = pageMemEx.partitionMetaPageId(grp.groupId(), part.part());
for (int i = 0; i < part.pages(); i++, pageId++, loadedPageCnt++) {
if (stop) {
if (log.isInfoEnabled()) {
log.info("Stop warm-up cache group with loaded statistics [name=" + grp.cacheOrGroupName() + ", pageCnt=" + loadedPageCnt + ", remainingPageCnt=" + (availableLoadPageCnt - loadedPageCnt) + ']');
}
return;
}
long pagePtr = -1;
try {
pagePtr = pageMemEx.acquirePage(grp.groupId(), pageId);
} finally {
if (pagePtr != -1)
pageMemEx.releasePage(grp.groupId(), pageId, pagePtr);
}
}
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class IgnitePdsConsistencyOnDelayedPartitionOwning method checkConsistencyNodeLeft.
/**
*/
@Test
public void checkConsistencyNodeLeft() throws Exception {
IgniteEx crd = (IgniteEx) startGridsMultiThreaded(4);
crd.cluster().active(true);
for (int i = 0; i < PARTS; i++) crd.cache(DEFAULT_CACHE_NAME).put(i, i);
forceCheckpoint();
stopGrid(1);
for (int i = 0; i < PARTS; i++) crd.cache(DEFAULT_CACHE_NAME).put(i, i + 1);
// Block supply messages from all owners.
TestRecordingCommunicationSpi spi0 = TestRecordingCommunicationSpi.spi(grid(0));
TestRecordingCommunicationSpi spi2 = TestRecordingCommunicationSpi.spi(grid(2));
TestRecordingCommunicationSpi spi3 = TestRecordingCommunicationSpi.spi(grid(3));
IgniteBiPredicate<ClusterNode, Message> pred = new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode clusterNode, Message msg) {
return msg instanceof GridDhtPartitionSupplyMessage;
}
};
spi0.blockMessages(pred);
spi2.blockMessages(pred);
spi3.blockMessages(pred);
GridTestUtils.runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
startGrid(1);
return null;
}
});
spi0.waitForBlocked();
spi2.waitForBlocked();
spi3.waitForBlocked();
spi0.stopBlock();
spi2.stopBlock();
CountDownLatch topInitLatch = new CountDownLatch(1);
CountDownLatch enableDurabilityCPStartLatch = new CountDownLatch(1);
CountDownLatch delayedOnwningLatch = new CountDownLatch(1);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) grid(1).context().cache().context().database();
dbMgr.addCheckpointListener(new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
// No-op.
}
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
// No-op.
}
@Override
public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
String reason = ctx.progress().reason();
String reason0 = WalStateManager.reason(cacheId(DEFAULT_CACHE_NAME), new AffinityTopologyVersion(6, 0));
if (reason != null && reason.equals(reason0)) {
enableDurabilityCPStartLatch.countDown();
try {
assertTrue(U.await(delayedOnwningLatch, 10_000, TimeUnit.MILLISECONDS));
} catch (IgniteInterruptedCheckedException e) {
fail(X.getFullStackTrace(e));
}
}
}
});
TestRecordingCommunicationSpi.spi(grid(1)).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode clusterNode, Message msg) {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.topologyVersion().equals(new AffinityTopologyVersion(7, 0));
}
return false;
}
});
grid(1).context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
@Override
public void onDoneBeforeTopologyUnlock(GridDhtPartitionsExchangeFuture fut) {
if (fut.initialVersion().equals(new AffinityTopologyVersion(7, 0))) {
topInitLatch.countDown();
try {
assertTrue(U.await(enableDurabilityCPStartLatch, 20_000, TimeUnit.MILLISECONDS));
} catch (IgniteInterruptedCheckedException e) {
fail(X.getFullStackTrace(e));
}
System.out.println();
}
}
});
// Trigger rebalancing remap because owner has left.
IgniteInternalFuture stopFut = GridTestUtils.runAsync(new Runnable() {
@Override
public void run() {
// TODO start cache.
stopGrid(2);
}
});
// Wait for topology (7,0) init on grid1 before finishing rebalancing on (6,0).
assertTrue(U.await(topInitLatch, 20_000, TimeUnit.MILLISECONDS));
// Release last supply message, causing triggering a cp for enabling durability.
spi3.stopBlock();
// Wait for new rebalancing assignments ready on grid1.
TestRecordingCommunicationSpi.spi(grid(1)).waitForBlocked();
// Triggers spurious ideal switching before rebalancing has finished for (7,0).
delayedOnwningLatch.countDown();
stopFut.get();
TestRecordingCommunicationSpi.spi(grid(1)).stopBlock();
awaitPartitionMapExchange();
assertPartitionsSame(idleVerify(grid(0), DEFAULT_CACHE_NAME));
CacheGroupContext grpCtx = grid(1).context().cache().cacheGroup(cacheId(DEFAULT_CACHE_NAME));
if (grpCtx != null)
assertTrue(grpCtx.localWalEnabled());
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class IgnitePdsDestroyCacheTest method doTestDestroyCacheOperationNotBlockingCheckpointTest.
/**
*/
private void doTestDestroyCacheOperationNotBlockingCheckpointTest(boolean loc) throws Exception {
final IgniteEx ignite = startGrids(1);
ignite.cluster().active(true);
startGroupCachesDynamically(ignite, loc);
loadCaches(ignite, !loc);
// It's important to clear cache in group having > 1 caches.
final String cacheName = cacheName(0);
final CacheGroupContext grp = ignite.cachex(cacheName).context().group();
final IgniteCacheOffheapManager offheap = grp.offheap();
IgniteCacheOffheapManager mgr = Mockito.spy(offheap);
final CountDownLatch checkpointLocked = new CountDownLatch(1);
final CountDownLatch cpFutCreated = new CountDownLatch(1);
final CountDownLatch realMtdCalled = new CountDownLatch(1);
final CountDownLatch checked = new CountDownLatch(1);
Mockito.doAnswer(invocation -> {
checkpointLocked.countDown();
assertTrue(U.await(cpFutCreated, 30, TimeUnit.SECONDS));
Object ret = invocation.callRealMethod();
// After calling clearing code cp future must be eventually completed and cp read lock reacquired.
realMtdCalled.countDown();
// Wait for checkpoint future while holding lock.
U.awaitQuiet(checked);
return ret;
}).when(mgr).stopCache(Mockito.anyInt(), Mockito.anyBoolean());
final Field field = U.findField(CacheGroupContext.class, "offheapMgr");
field.set(grp, mgr);
final IgniteInternalFuture<Object> fut = runAsync(() -> {
assertTrue(U.await(checkpointLocked, 30, TimeUnit.SECONDS));
// Trigger checkpoint while holding checkpoint read lock on cache destroy.
final IgniteInternalFuture cpFut = ignite.context().cache().context().database().wakeupForCheckpoint("test");
assertFalse(cpFut.isDone());
cpFutCreated.countDown();
assertTrue(U.await(realMtdCalled, 30, TimeUnit.SECONDS));
try {
// Future must be completed after cache clearing but before releasing checkpoint lock.
cpFut.get(3_000);
} finally {
checked.countDown();
}
return null;
});
if (loc)
ignite.cache(cacheName).close();
else
ignite.destroyCache(cacheName);
fut.get();
}
Aggregations