use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* @param targetNode Target node.
* @param exchangeId ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(ClusterNode targetNode, @Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, Map<Integer, GridDhtPartitionState>>> dupData = new HashMap<>();
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
if (!cacheCtx.isLocal()) {
GridDhtPartitionMap locMap = cacheCtx.topology().localPartitionMap();
addPartitionMap(m, dupData, true, cacheCtx.cacheId(), locMap, cacheCtx.affinity().affinityCache().similarAffinityKey());
if (sndCounters)
m.partitionUpdateCounters(cacheCtx.cacheId(), cacheCtx.topology().updateCounters(true));
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.cacheId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.cacheId(), locMap, top.similarAffinityKey());
if (sndCounters)
m.partitionUpdateCounters(top.cacheId(), top.updateCounters(true));
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method update.
/** {@inheritDoc} */
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public GridDhtPartitionMap update(@Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap, @Nullable Map<Integer, T2<Long, Long>> cntrMap) {
if (log.isDebugEnabled())
log.debug("Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');
assert partMap != null;
lock.writeLock().lock();
try {
if (stopping)
return null;
if (cntrMap != null) {
// update local map partition counters
for (Map.Entry<Integer, T2<Long, Long>> e : cntrMap.entrySet()) {
T2<Long, Long> cntr = this.cntrMap.get(e.getKey());
if (cntr == null || cntr.get2() < e.getValue().get2())
this.cntrMap.put(e.getKey(), e.getValue());
}
// update local counters in partitions
for (int i = 0; i < locParts.length(); i++) {
GridDhtLocalPartition part = locParts.get(i);
if (part == null)
continue;
T2<Long, Long> cntr = cntrMap.get(part.id());
if (cntr != null)
part.updateCounter(cntr.get2());
}
}
//if need skip
if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
if (log.isDebugEnabled())
log.debug("Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ']');
return null;
}
if (node2part != null && node2part.compareTo(partMap) >= 0) {
if (log.isDebugEnabled())
log.debug("Stale partition map for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ", curMap=" + node2part + ", newMap=" + partMap + ']');
return null;
}
long updateSeq = this.updateSeq.incrementAndGet();
if (exchId != null)
lastExchangeId = exchId;
if (node2part != null) {
for (GridDhtPartitionMap part : node2part.values()) {
GridDhtPartitionMap newPart = partMap.get(part.nodeId());
// then we keep the newer value.
if (newPart != null && (newPart.updateSequence() < part.updateSequence() || (cctx.startTopologyVersion().compareTo(newPart.topologyVersion()) > 0))) {
if (log.isDebugEnabled())
log.debug("Overriding partition map in full update map [exchId=" + exchId + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']');
partMap.put(part.nodeId(), part);
}
}
// Remove entry if node left.
for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
UUID nodeId = it.next();
if (!cctx.discovery().alive(nodeId)) {
if (log.isDebugEnabled())
log.debug("Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']');
it.remove();
}
}
}
node2part = partMap;
Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);
for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
for (Integer p : e.getValue().keySet()) {
Set<UUID> ids = p2n.get(p);
if (ids == null)
// Initialize HashSet to size 3 in anticipation that there won't be
// more than 3 nodes per partitions.
p2n.put(p, ids = U.newHashSet(3));
ids.add(e.getKey());
}
}
part2node = p2n;
boolean changed = false;
AffinityTopologyVersion affVer = cctx.affinity().affinityTopologyVersion();
GridDhtPartitionMap nodeMap = partMap.get(cctx.localNodeId());
if (nodeMap != null && cctx.shared().database().persistenceEnabled()) {
for (Map.Entry<Integer, GridDhtPartitionState> e : nodeMap.entrySet()) {
int p = e.getKey();
GridDhtPartitionState state = e.getValue();
if (state == MOVING) {
GridDhtLocalPartition locPart = locParts.get(p);
assert locPart != null;
if (locPart.state() == OWNING) {
locPart.moving();
changed = true;
}
if (cntrMap != null) {
T2<Long, Long> cntr = cntrMap.get(p);
if (cntr != null && cntr.get2() > locPart.updateCounter())
locPart.updateCounter(cntr.get2());
}
}
}
}
if (!affVer.equals(AffinityTopologyVersion.NONE) && affVer.compareTo(topVer) >= 0) {
List<List<ClusterNode>> aff = cctx.affinity().assignments(topVer);
changed |= checkEvictions(updateSeq, aff);
updateRebalanceVersion(aff);
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after full update: " + fullMapString());
if (changed)
cctx.shared().exchange().scheduleResendPartitions();
return changed ? localPartitionMap() : null;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method updateLocal.
/**
* Updates value for single partition.
*
* @param p Partition.
* @param state State.
* @param updateSeq Update sequence.
* @return Update sequence.
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
private long updateLocal(int p, GridDhtPartitionState state, long updateSeq) {
ClusterNode oldest = discoCache.oldestAliveServerNodeWithCache();
assert oldest != null || cctx.kernalContext().clientNode();
// If this node became the oldest node.
if (cctx.localNode().equals(oldest)) {
long seq = node2part.updateSequence();
if (seq != updateSeq) {
if (seq > updateSeq) {
long seq0 = this.updateSeq.get();
if (seq0 < seq) {
// Update global counter if necessary.
boolean b = this.updateSeq.compareAndSet(seq0, seq + 1);
assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']';
updateSeq = seq + 1;
} else
updateSeq = seq;
}
node2part.updateSequence(updateSeq);
}
}
if (node2part != null) {
UUID locNodeId = cctx.localNodeId();
GridDhtPartitionMap map = node2part.get(locNodeId);
if (map == null) {
map = new GridDhtPartitionMap(locNodeId, updateSeq, topVer, Collections.<Integer, GridDhtPartitionState>emptyMap(), false);
node2part.put(locNodeId, map);
}
map.updateSequence(updateSeq, topVer);
map.put(p, state);
Set<UUID> ids = part2node.get(p);
if (ids == null)
part2node.put(p, ids = U.newHashSet(3));
ids.add(locNodeId);
}
return updateSeq;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class CacheGroupMetricsMXBeanImpl method nodePartitionsCountByState.
/**
* Count of partitions with a given state on the node.
*
* @param nodeId Node id.
* @param state State.
*/
private int nodePartitionsCountByState(UUID nodeId, GridDhtPartitionState state) {
int parts = ctx.topology().partitions();
GridDhtPartitionMap partMap = ctx.topology().partitionMap(false).get(nodeId);
int cnt = 0;
for (int part = 0; part < parts; part++) if (partMap.get(part) == state)
cnt++;
return cnt;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method createMovingPartitions.
/**
* @param aff Affinity.
*/
private void createMovingPartitions(AffinityAssignment aff) {
for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
GridDhtPartitionMap map = e.getValue();
addMoving(map, aff.backupPartitions(e.getKey()));
addMoving(map, aff.primaryPartitions(e.getKey()));
}
}
Aggregations