use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class CacheGroupMetricsMXBeanImpl method clusterPartitionsMapByState.
/**
* Gets partitions allocation map with a given state.
*
* @param state State.
* @return Partitions allocation map.
*/
private Map<Integer, Set<String>> clusterPartitionsMapByState(GridDhtPartitionState state) {
int parts = ctx.topology().partitions();
GridDhtPartitionFullMap partFullMap = ctx.topology().partitionMap(false);
Map<Integer, Set<String>> partsMap = new LinkedHashMap<>();
for (int part = 0; part < parts; part++) {
Set<String> partNodesSet = new HashSet<>();
for (Map.Entry<UUID, GridDhtPartitionMap> entry : partFullMap.entrySet()) {
if (entry.getValue().get(part) == state)
partNodesSet.add(entry.getKey().toString());
}
partsMap.put(part, partNodesSet);
}
return partsMap;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class CacheGroupMetricsMXBeanImpl method numberOfPartitionCopies.
/**
* Calculates the number of partition copies for all partitions of this cache group and filter values by the
* predicate.
*
* @param pred Predicate.
*/
private int numberOfPartitionCopies(IntBiPredicate pred) {
int parts = ctx.topology().partitions();
GridDhtPartitionFullMap partFullMap = ctx.topology().partitionMap(false);
int res = -1;
for (int part = 0; part < parts; part++) {
int cnt = 0;
for (Map.Entry<UUID, GridDhtPartitionMap> entry : partFullMap.entrySet()) {
if (entry.getValue().get(part) == GridDhtPartitionState.OWNING)
cnt++;
}
if (part == 0 || pred.apply(res, cnt))
res = cnt;
}
return res;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method update.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public boolean update(@Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts, boolean force) {
if (log.isDebugEnabled()) {
log.debug("Updating single partition map [grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId + ", parts=" + mapString(parts) + ']');
}
if (!ctx.discovery().alive(parts.nodeId())) {
if (log.isDebugEnabled()) {
log.debug("Received partition update for non-existing node (will ignore) [grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId + ", parts=" + parts + ']');
}
return false;
}
ctx.database().checkpointReadLock();
try {
lock.writeLock().lock();
try {
if (stopping)
return false;
if (!force) {
if (lastTopChangeVer.initialized() && exchId != null && lastTopChangeVer.compareTo(exchId.topologyVersion()) > 0) {
U.warn(log, "Stale exchange id for single partition map update (will ignore) [" + "grp=" + grp.cacheOrGroupName() + ", lastTopChange=" + lastTopChangeVer + ", readTopVer=" + readyTopVer + ", exch=" + exchId.topologyVersion() + ']');
return false;
}
}
if (node2part == null)
// Create invalid partition map.
node2part = new GridDhtPartitionFullMap();
GridDhtPartitionMap cur = node2part.get(parts.nodeId());
if (force) {
if (cur != null && cur.topologyVersion().initialized())
parts.updateSequence(cur.updateSequence(), cur.topologyVersion());
} else if (isStaleUpdate(cur, parts)) {
U.warn(log, "Stale update for single partition map update (will ignore) [" + "grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId + ", curMap=" + cur + ", newMap=" + parts + ']');
return false;
}
long updateSeq = this.updateSeq.incrementAndGet();
node2part.newUpdateSequence(updateSeq);
boolean changed = false;
if (cur == null || !cur.equals(parts))
changed = true;
node2part.put(parts.nodeId(), parts);
// During exchange diff is calculated after all messages are received and affinity initialized.
if (exchId == null && !grp.isReplicated()) {
if (readyTopVer.initialized() && readyTopVer.compareTo(diffFromAffinityVer) >= 0) {
AffinityAssignment affAssignment = grp.affinity().readyAffinity(readyTopVer);
// Add new mappings.
for (Map.Entry<Integer, GridDhtPartitionState> e : parts.entrySet()) {
int p = e.getKey();
Set<UUID> diffIds = diffFromAffinity.get(p);
if ((e.getValue() == MOVING || e.getValue() == OWNING || e.getValue() == RENTING) && !affAssignment.getIds(p).contains(parts.nodeId())) {
if (diffIds == null)
diffFromAffinity.put(p, diffIds = U.newHashSet(3));
if (diffIds.add(parts.nodeId()))
changed = true;
} else {
if (diffIds != null && diffIds.remove(parts.nodeId())) {
changed = true;
if (diffIds.isEmpty())
diffFromAffinity.remove(p);
}
}
}
// Remove obsolete mappings.
if (cur != null) {
for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
Set<UUID> ids = diffFromAffinity.get(p);
if (ids != null && ids.remove(parts.nodeId())) {
changed = true;
if (ids.isEmpty())
diffFromAffinity.remove(p);
}
}
}
diffFromAffinityVer = readyTopVer;
}
}
if (readyTopVer.initialized() && readyTopVer.equals(lastTopChangeVer)) {
AffinityAssignment aff = grp.affinity().readyAffinity(readyTopVer);
if (exchId == null)
changed |= checkEvictions(updateSeq, aff);
updateRebalanceVersion(aff.assignment());
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after single update [grp=" + grp.cacheOrGroupName() + ", map=" + fullMapString() + ']');
if (changed && exchId == null)
ctx.exchange().scheduleResendPartitions();
return changed;
} finally {
lock.writeLock().unlock();
}
} finally {
ctx.database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method detectLostPartitions.
/**
* {@inheritDoc}
*/
@Override
public boolean detectLostPartitions(AffinityTopologyVersion resTopVer, DiscoveryEvent discoEvt) {
ctx.database().checkpointReadLock();
try {
lock.writeLock().lock();
try {
if (node2part == null)
return false;
int parts = grp.affinity().partitions();
Set<Integer> lost = new HashSet<>(parts);
for (int p = 0; p < parts; p++) lost.add(p);
for (GridDhtPartitionMap partMap : node2part.values()) {
for (Map.Entry<Integer, GridDhtPartitionState> e : partMap.entrySet()) {
if (e.getValue() == OWNING) {
lost.remove(e.getKey());
if (lost.isEmpty())
break;
}
}
}
boolean changed = false;
if (!F.isEmpty(lost)) {
PartitionLossPolicy plc = grp.config().getPartitionLossPolicy();
assert plc != null;
// Update partition state on all nodes.
for (Integer part : lost) {
long updSeq = updateSeq.incrementAndGet();
GridDhtLocalPartition locPart = localPartition(part, resTopVer, false, true);
if (locPart != null) {
boolean marked = plc == PartitionLossPolicy.IGNORE ? locPart.own() : locPart.markLost();
if (marked)
updateLocal(locPart.id(), locPart.state(), updSeq, resTopVer);
changed |= marked;
} else // Update map for remote node.
if (plc != PartitionLossPolicy.IGNORE) {
for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
if (e.getKey().equals(ctx.localNodeId()))
continue;
if (e.getValue().get(part) != EVICTED)
e.getValue().put(part, LOST);
}
}
if (grp.eventRecordable(EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST)) {
grp.addRebalanceEvent(part, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
}
}
if (plc != PartitionLossPolicy.IGNORE)
grp.needsRecovery(true);
}
return changed;
} finally {
lock.writeLock().unlock();
}
} finally {
ctx.database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method updateLocal.
/**
* Updates state of partition in local {@link #node2part} map and recalculates {@link #diffFromAffinity}.
*
* @param p Partition.
* @param state Partition state.
* @param updateSeq Update sequence.
* @param affVer Affinity version.
* @return Update sequence.
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
private long updateLocal(int p, GridDhtPartitionState state, long updateSeq, AffinityTopologyVersion affVer) {
assert lock.isWriteLockedByCurrentThread();
ClusterNode oldest = discoCache.oldestAliveServerNode();
assert oldest != null || ctx.kernalContext().clientNode();
// If this node became the oldest node.
if (ctx.localNode().equals(oldest) && node2part != null) {
long seq = node2part.updateSequence();
if (seq != updateSeq) {
if (seq > updateSeq) {
long seq0 = this.updateSeq.get();
if (seq0 < seq) {
// Update global counter if necessary.
boolean b = this.updateSeq.compareAndSet(seq0, seq + 1);
assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", grp=" + grp.cacheOrGroupName() + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']';
updateSeq = seq + 1;
} else
updateSeq = seq;
}
node2part.updateSequence(updateSeq);
}
}
if (node2part != null) {
UUID locNodeId = ctx.localNodeId();
GridDhtPartitionMap map = node2part.get(locNodeId);
if (map == null) {
map = new GridDhtPartitionMap(locNodeId, updateSeq, affVer, GridPartitionStateMap.EMPTY, false);
node2part.put(locNodeId, map);
}
map.updateSequence(updateSeq, affVer);
map.put(p, state);
if (!grp.isReplicated() && (state == MOVING || state == OWNING || state == RENTING)) {
AffinityAssignment assignment = grp.affinity().cachedAffinity(diffFromAffinityVer);
if (!assignment.getIds(p).contains(ctx.localNodeId())) {
Set<UUID> diffIds = diffFromAffinity.get(p);
if (diffIds == null)
diffFromAffinity.put(p, diffIds = U.newHashSet(3));
diffIds.add(ctx.localNodeId());
}
}
}
return updateSeq;
}
Aggregations