use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class DataRegionMetricsImpl method removeCacheGrpPageMetrics.
/**
* Removes all memory page metrics associated with the given cache group.
*/
public void removeCacheGrpPageMetrics(Integer grpId) {
PageMetrics rmvMetrics;
synchronized (cacheGrpMetricsLock) {
IntMap<PageMetrics> copy = new IntHashMap<>(cacheGrpMetrics);
rmvMetrics = copy.remove(grpId);
cacheGrpMetrics = copy;
}
// (it hasn't been done in previous implementations)
if (rmvMetrics != null)
dataRegionPageMetrics.indexPages().add(-rmvMetrics.indexPages().value());
}
use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class DataRegionMetricsImpl method cacheGrpPageMetrics.
/**
* Returns memory page metrics associated with the given cache group.
*/
public PageMetrics cacheGrpPageMetrics(int cacheGrpId) {
PageMetrics pageMetrics = cacheGrpMetrics.get(cacheGrpId);
if (pageMetrics != null)
return pageMetrics;
synchronized (cacheGrpMetricsLock) {
IntMap<PageMetrics> localCacheGrpMetrics = cacheGrpMetrics;
// double check
PageMetrics doubleCheckPageMetrics = localCacheGrpMetrics.get(cacheGrpId);
if (doubleCheckPageMetrics != null)
return doubleCheckPageMetrics;
IntMap<PageMetrics> copy = new IntHashMap<>(localCacheGrpMetrics);
PageMetrics newMetrics = Optional.of(kernalCtx).map(GridKernalContext::cache).map(cache -> cache.cacheGroupDescriptors().get(cacheGrpId)).map(decs -> createCacheGrpPageMetrics(decs.cacheOrGroupName())).orElse(dataRegionPageMetrics);
copy.put(cacheGrpId, newMetrics);
cacheGrpMetrics = copy;
return newMetrics;
}
}
use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class IgniteStatisticsInMemoryStoreImpl method loadAllObsolescence.
/**
* {@inheritDoc}
*/
@Override
public Map<StatisticsKey, IntMap<ObjectPartitionStatisticsObsolescence>> loadAllObsolescence() {
Map<StatisticsKey, IntMap<ObjectPartitionStatisticsObsolescence>> res = new HashMap<>();
obsStats.forEach((k, v) -> {
IntHashMap newV = new IntHashMap(v.size());
v.forEach((k1, v1) -> newV.put(k1, v1));
res.put(k, newV);
});
return res;
}
use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class GridDhtPartitionDemander method preloadEntries.
/**
* Adds entries to partition p.
*
* @param topVer Topology version.
* @param part Local partition.
* @param infos Entries info for preload.
* @throws IgniteCheckedException If failed.
*/
private void preloadEntries(AffinityTopologyVersion topVer, GridDhtLocalPartition part, Iterator<GridCacheEntryInfo> infos) throws IgniteCheckedException {
// Received keys by caches, for statistics.
IntHashMap<GridMutableLong> receivedKeys = new IntHashMap<>();
grp.offheap().storeEntries(part, infos, new IgnitePredicateX<CacheDataRow>() {
/**
* {@inheritDoc}
*/
@Override
public boolean applyx(CacheDataRow row) throws IgniteCheckedException {
receivedKeys.computeIfAbsent(row.cacheId(), cid -> new GridMutableLong()).incrementAndGet();
return preloadEntry(row, topVer);
}
});
updateKeyReceivedMetrics(grp, receivedKeys);
}
use of org.apache.ignite.internal.util.collection.IntHashMap in project ignite by apache.
the class GridDhtPartitionDemander method mvccPreloadEntries.
/**
* Adds mvcc entries with theirs history to partition p.
*
* @param topVer Topology version.
* @param node Node which sent entry.
* @param p Partition id.
* @param infos Entries info for preload.
* @throws IgniteCheckedException If failed.
*/
private void mvccPreloadEntries(AffinityTopologyVersion topVer, ClusterNode node, int p, Iterator<GridCacheEntryInfo> infos) throws IgniteCheckedException {
if (!infos.hasNext())
return;
// Received keys by caches, for statistics.
IntHashMap<GridMutableLong> receivedKeys = new IntHashMap<>();
List<GridCacheMvccEntryInfo> entryHist = new ArrayList<>();
GridCacheContext<?, ?> cctx = grp.sharedGroup() ? null : grp.singleCacheContext();
// Loop through all received entries and try to preload them.
while (infos.hasNext() || !entryHist.isEmpty()) {
ctx.database().checkpointReadLock();
try {
for (int i = 0; i < PRELOAD_SIZE_UNDER_CHECKPOINT_LOCK; i++) {
boolean hasMore = infos.hasNext();
assert hasMore || !entryHist.isEmpty();
GridCacheMvccEntryInfo entry = null;
boolean flushHistory;
if (hasMore) {
entry = (GridCacheMvccEntryInfo) infos.next();
GridCacheMvccEntryInfo prev = entryHist.isEmpty() ? null : entryHist.get(0);
flushHistory = prev != null && ((grp.sharedGroup() && prev.cacheId() != entry.cacheId()) || !prev.key().equals(entry.key()));
} else
flushHistory = true;
if (flushHistory) {
assert !entryHist.isEmpty();
int cacheId = entryHist.get(0).cacheId();
if (grp.sharedGroup() && (cctx == null || cacheId != cctx.cacheId())) {
assert cacheId != CU.UNDEFINED_CACHE_ID;
cctx = grp.shared().cacheContext(cacheId);
}
if (cctx != null) {
mvccPreloadEntry(cctx, node, entryHist, topVer, p);
rebalanceFut.onReceivedKeys(p, 1, node);
receivedKeys.computeIfAbsent(cacheId, cid -> new GridMutableLong()).incrementAndGet();
}
entryHist.clear();
if (!hasMore)
break;
}
entryHist.add(entry);
}
} finally {
ctx.database().checkpointReadUnlock();
}
}
updateKeyReceivedMetrics(grp, receivedKeys);
}
Aggregations