use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class CachePartitionDefragmentationManager method executeDefragmentation.
/**
*/
public void executeDefragmentation() throws IgniteCheckedException {
Map<Integer, List<CacheDataStore>> oldStores = new HashMap<>();
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
final IgniteCacheOffheapManager offheap = oldGrpCtx.offheap();
List<CacheDataStore> oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false).filter(store -> {
try {
return filePageStoreMgr.exists(grpId, store.partId());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}).collect(Collectors.toList());
oldStores.put(grpId, oldCacheDataStores);
}
int partitionCount = oldStores.values().stream().mapToInt(List::size).sum();
status.onStart(cacheGrpCtxsForDefragmentation, partitionCount);
try {
// Now the actual process starts.
IgniteInternalFuture<?> idxDfrgFut = null;
DataPageEvictionMode prevPageEvictionMode = null;
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName());
List<CacheDataStore> oldCacheDataStores = oldStores.get(grpId);
if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) {
status.onCacheGroupSkipped(oldGrpCtx, oldCacheDataStores.size());
continue;
}
try {
GridCacheOffheapManager offheap = (GridCacheOffheapManager) oldGrpCtx.offheap();
status.onCacheGroupStart(oldGrpCtx, oldCacheDataStores.size());
if (workDir == null || oldCacheDataStores.isEmpty()) {
status.onCacheGroupFinish(oldGrpCtx);
continue;
}
// We can't start defragmentation of new group on the region that has wrong eviction mode.
// So waiting of the previous cache group defragmentation is inevitable.
DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode();
if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) {
prevPageEvictionMode = curPageEvictionMode;
partDataRegion.config().setPageEvictionMode(curPageEvictionMode);
if (idxDfrgFut != null)
idxDfrgFut.get();
}
IntMap<CacheDataStore> cacheDataStores = new IntHashMap<>();
for (CacheDataStore store : offheap.cacheDataStores()) {
// This would mean that these partitions are empty.
assert store.tree() == null || store.tree().groupId() == grpId;
if (store.tree() != null)
cacheDataStores.put(store.partId(), store);
}
dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion());
// Another cheat. Ttl cleanup manager knows too much shit.
oldGrpCtx.caches().stream().filter(cacheCtx -> cacheCtx.groupId() == grpId).forEach(cacheCtx -> cacheCtx.ttl().unregister());
// Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care
// and WAL records will be allocated anyway just to be ignored later if we don't disable WAL for
// cache group explicitly.
oldGrpCtx.localWalEnabled(false, false);
boolean encrypted = oldGrpCtx.config().isEncryptionEnabled();
FileVersionCheckingFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted);
AtomicLong idxAllocationTracker = new GridAtomicLong();
createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, idxAllocationTracker::addAndGet);
checkCancellation();
GridCompoundFuture<Object, Object> cmpFut = new GridCompoundFuture<>();
PageMemoryEx oldPageMem = (PageMemoryEx) oldGrpCtx.dataRegion().pageMemory();
CacheGroupContext newGrpCtx = new CacheGroupContext(sharedCtx, grpId, oldGrpCtx.receivedFrom(), CacheType.USER, oldGrpCtx.config(), oldGrpCtx.affinityNode(), partDataRegion, oldGrpCtx.cacheObjectContext(), null, null, oldGrpCtx.localStartVersion(), true, false, true);
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock();
try {
// This will initialize partition meta in index partition - meta tree and reuse list.
newGrpCtx.start();
} finally {
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock();
}
IgniteUtils.doInParallel(defragmentationThreadPool, oldCacheDataStores, oldCacheDataStore -> defragmentOnePartition(oldGrpCtx, grpId, workDir, offheap, pageStoreFactory, cmpFut, oldPageMem, newGrpCtx, oldCacheDataStore));
// A bit too general for now, but I like it more then saving only the last checkpoint future.
cmpFut.markInitialized().get();
idxDfrgFut = new GridFinishedFuture<>();
if (filePageStoreMgr.hasIndexStore(grpId)) {
defragmentIndexPartition(oldGrpCtx, newGrpCtx);
idxDfrgFut = defragmentationCheckpoint.forceCheckpoint("index defragmented", null).futureFor(FINISHED);
}
PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION);
idxDfrgFut = idxDfrgFut.chain(fut -> {
if (log.isDebugEnabled()) {
log.debug(S.toString("Index partition defragmented", "grpId", grpId, false, "oldPages", oldIdxPageStore.pages(), false, "newPages", idxAllocationTracker.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedIndexFile(workDir).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, INDEX_PARTITION);
PageMemoryEx partPageMem = (PageMemoryEx) partDataRegion.pageMemory();
partPageMem.invalidate(grpId, INDEX_PARTITION);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partPageMem.pageManager();
pageMgr.pageStoreMap().removePageStore(grpId, INDEX_PARTITION);
PageMemoryEx mappingPageMem = (PageMemoryEx) mappingDataRegion.pageMemory();
pageMgr = (DefragmentationPageReadWriteManager) mappingPageMem.pageManager();
pageMgr.pageStoreMap().clear(grpId);
renameTempIndexFile(workDir);
writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log);
batchRenameDefragmentedCacheGroupPartitions(workDir, log);
return null;
});
status.onIndexDefragmented(oldGrpCtx, oldIdxPageStore.size(), // + file header.
pageSize + idxAllocationTracker.get() * pageSize);
} catch (DefragmentationCancelledException e) {
DefragmentationFileUtils.deleteLeftovers(workDir);
throw e;
}
status.onCacheGroupFinish(oldGrpCtx);
}
if (idxDfrgFut != null)
idxDfrgFut.get();
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
status.onFinish();
completionFut.onDone();
} catch (DefragmentationCancelledException e) {
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
log.info("Defragmentation process has been cancelled.");
status.onFinish();
completionFut.onDone();
} catch (Throwable t) {
log.error("Defragmentation process failed.", t);
status.onFinish();
completionFut.onDone(t);
throw t;
} finally {
defragmentationCheckpoint.stop(true);
}
}
use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class TcpCommunicationMetricsListener method createMessageCounters.
/**
* Creates counters of sent and received messages by direct type.
*
* @param factory Message factory.
* @return Counters of sent and received messages grouped by direct type.
*/
private IntMap<IgniteBiTuple<LongAdderMetric, LongAdderMetric>> createMessageCounters(IgniteMessageFactory factory) {
IgniteMessageFactoryImpl msgFactory = (IgniteMessageFactoryImpl) factory;
short[] directTypes = msgFactory.registeredDirectTypes();
IntMap<IgniteBiTuple<LongAdderMetric, LongAdderMetric>> msgCntrsByType = new IntHashMap<>(directTypes.length);
for (short type : directTypes) {
LongAdderMetric sentCnt = mreg.longAdderMetric(sentMessagesByTypeMetricName(type), SENT_MESSAGES_BY_TYPE_METRIC_DESC);
LongAdderMetric rcvCnt = mreg.longAdderMetric(receivedMessagesByTypeMetricName(type), RECEIVED_MESSAGES_BY_TYPE_METRIC_DESC);
msgCntrsByType.put(type, new IgniteBiTuple<>(sentCnt, rcvCnt));
}
return msgCntrsByType;
}
Aggregations