use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method setOwners.
/**
* {@inheritDoc}
*/
@Override
public Set<UUID> setOwners(int p, Set<UUID> owners, boolean haveHistory, boolean updateSeq) {
Set<UUID> result = haveHistory ? Collections.<UUID>emptySet() : new HashSet<UUID>();
ctx.database().checkpointReadLock();
try {
lock.writeLock().lock();
try {
GridDhtLocalPartition locPart = locParts.get(p);
if (locPart != null) {
if (locPart.state() == OWNING && !owners.contains(ctx.localNodeId())) {
locPart.moving();
if (!haveHistory) {
locPart.clearAsync();
result.add(ctx.localNodeId());
}
U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " + "[nodeId=" + ctx.localNodeId() + ", grp=" + grp.cacheOrGroupName() + ", partId=" + locPart.id() + ", haveHistory=" + haveHistory + "]");
}
}
for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
UUID remoteNodeId = e.getKey();
GridDhtPartitionMap partMap = e.getValue();
if (!partMap.containsKey(p))
continue;
if (partMap.get(p) == OWNING && !owners.contains(remoteNodeId)) {
partMap.put(p, MOVING);
if (!haveHistory)
result.add(remoteNodeId);
partMap.updateSequence(partMap.updateSequence() + 1, partMap.topologyVersion());
if (partMap.nodeId().equals(ctx.localNodeId()))
this.updateSeq.setIfGreater(partMap.updateSequence());
U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " + "[nodeId=" + remoteNodeId + ", grp=" + grp.cacheOrGroupName() + ", partId=" + p + ", haveHistory=" + haveHistory + "]");
}
}
if (updateSeq) {
long updSeq = this.updateSeq.incrementAndGet();
node2part = new GridDhtPartitionFullMap(node2part, updSeq);
}
} finally {
lock.writeLock().unlock();
}
} finally {
ctx.database().checkpointReadUnlock();
}
return result;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method nodes0.
/**
* @param p Partition.
* @param affAssignment Assignments.
* @param affNodes Node assigned for given partition by affinity.
* @return Nodes responsible for given partition (primary is first).
*/
@Nullable
private List<ClusterNode> nodes0(int p, AffinityAssignment affAssignment, List<ClusterNode> affNodes) {
if (grp.isReplicated())
return affNodes;
AffinityTopologyVersion topVer = affAssignment.topologyVersion();
lock.readLock().lock();
try {
assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer1=" + topVer + ", topVer2=" + this.readyTopVer + ", node=" + ctx.igniteInstanceName() + ", grp=" + grp.cacheOrGroupName() + ", node2part=" + node2part + ']';
List<ClusterNode> nodes = null;
if (!topVer.equals(diffFromAffinityVer)) {
LT.warn(log, "Requested topology version does not match calculated diff, will require full iteration to" + "calculate mapping [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + ", diffVer=" + diffFromAffinityVer + "]");
nodes = new ArrayList<>();
nodes.addAll(affNodes);
for (Map.Entry<UUID, GridDhtPartitionMap> entry : node2part.entrySet()) {
GridDhtPartitionState state = entry.getValue().get(p);
ClusterNode n = ctx.discovery().node(entry.getKey());
if (n != null && state != null && (state == MOVING || state == OWNING || state == RENTING) && !nodes.contains(n) && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
nodes.add(n);
}
}
return nodes;
}
Collection<UUID> diffIds = diffFromAffinity.get(p);
if (!F.isEmpty(diffIds)) {
HashSet<UUID> affIds = affAssignment.getIds(p);
for (UUID nodeId : diffIds) {
if (affIds.contains(nodeId)) {
U.warn(log, "Node from diff is affinity node, skipping it [grp=" + grp.cacheOrGroupName() + ", node=" + nodeId + ']');
continue;
}
if (hasState(p, nodeId, OWNING, MOVING, RENTING)) {
ClusterNode n = ctx.discovery().node(nodeId);
if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
if (nodes == null) {
nodes = new ArrayList<>(affNodes.size() + diffIds.size());
nodes.addAll(affNodes);
}
nodes.add(n);
}
}
}
}
return nodes;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method lostPartitions.
/**
* {@inheritDoc}
*/
@Override
public Collection<Integer> lostPartitions() {
if (grp.config().getPartitionLossPolicy() == PartitionLossPolicy.IGNORE)
return Collections.emptySet();
lock.readLock().lock();
try {
Set<Integer> res = null;
int parts = grp.affinity().partitions();
for (GridDhtPartitionMap partMap : node2part.values()) {
for (Map.Entry<Integer, GridDhtPartitionState> e : partMap.entrySet()) {
if (e.getValue() == LOST) {
if (res == null)
res = new HashSet<>(parts);
res.add(e.getKey());
}
}
}
return res == null ? Collections.<Integer>emptySet() : res;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method removeNode.
/**
* Removes node from local {@link #node2part} map and recalculates {@link #diffFromAffinity}.
*
* @param nodeId Node to remove.
*/
private void removeNode(UUID nodeId) {
assert nodeId != null;
assert lock.isWriteLockedByCurrentThread();
ClusterNode oldest = discoCache.oldestAliveServerNode();
assert oldest != null || ctx.kernalContext().clientNode();
ClusterNode loc = ctx.localNode();
if (node2part != null) {
if (loc.equals(oldest) && !node2part.nodeId().equals(loc.id()))
node2part = new GridDhtPartitionFullMap(loc.id(), loc.order(), updateSeq.get(), node2part, false);
else
node2part = new GridDhtPartitionFullMap(node2part, node2part.updateSequence());
GridDhtPartitionMap parts = node2part.remove(nodeId);
if (!grp.isReplicated()) {
if (parts != null) {
for (Integer p : parts.keySet()) {
Set<UUID> diffIds = diffFromAffinity.get(p);
if (diffIds != null)
diffIds.remove(nodeId);
}
}
}
consistencyCheck();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method update.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public boolean update(@Nullable AffinityTopologyVersion exchangeVer, GridDhtPartitionFullMap partMap, @Nullable CachePartitionFullCountersMap cntrMap, Set<Integer> partsToReload, @Nullable AffinityTopologyVersion msgTopVer) {
if (log.isDebugEnabled())
log.debug("Updating full partition map [exchVer=" + exchangeVer + ", parts=" + fullMapString() + ']');
lock.writeLock().lock();
try {
if (exchangeVer != null && lastExchangeVer != null && lastExchangeVer.compareTo(exchangeVer) >= 0) {
if (log.isDebugEnabled())
log.debug("Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeVer + ", exchVer=" + exchangeVer + ']');
return false;
}
if (msgTopVer != null && lastExchangeVer != null && lastExchangeVer.compareTo(msgTopVer) > 0) {
if (log.isDebugEnabled())
log.debug("Stale topology version for full partition map update message (will ignore) " + "[lastExchId=" + lastExchangeVer + ", topVersion=" + msgTopVer + ']');
return false;
}
boolean fullMapUpdated = (node2part == null);
if (node2part != null) {
for (GridDhtPartitionMap part : node2part.values()) {
GridDhtPartitionMap newPart = partMap.get(part.nodeId());
if (shouldOverridePartitionMap(part, newPart)) {
fullMapUpdated = true;
if (log.isDebugEnabled())
log.debug("Overriding partition map in full update map [exchId=" + exchangeVer + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']');
} else {
// If for some nodes current partition has a newer map,
// then we keep the newer value.
partMap.put(part.nodeId(), part);
}
}
// Check that we have new nodes.
for (GridDhtPartitionMap part : partMap.values()) {
if (fullMapUpdated)
break;
fullMapUpdated = !node2part.containsKey(part.nodeId());
}
// Remove entry if node left.
for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
UUID nodeId = it.next();
if (!cctx.discovery().alive(nodeId)) {
if (log.isDebugEnabled())
log.debug("Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']');
it.remove();
}
}
}
if (!fullMapUpdated) {
if (log.isDebugEnabled())
log.debug("No updates for full partition map (will ignore) [lastExch=" + lastExchangeVer + ", exch=" + exchangeVer + ", curMap=" + node2part + ", newMap=" + partMap + ']');
return false;
}
if (exchangeVer != null)
lastExchangeVer = exchangeVer;
node2part = partMap;
updateSeq.incrementAndGet();
part2node.clear();
for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
for (Map.Entry<Integer, GridDhtPartitionState> e0 : e.getValue().entrySet()) {
if (e0.getValue() != MOVING && e0.getValue() != OWNING)
continue;
int p = e0.getKey();
Set<UUID> ids = part2node.get(p);
if (ids == null)
// Initialize HashSet to size 3 in anticipation that there won't be
// more than 3 nodes per partitions.
part2node.put(p, ids = U.newHashSet(3));
ids.add(e.getKey());
}
}
if (cntrMap != null)
this.cntrMap = new CachePartitionFullCountersMap(cntrMap);
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after full update: " + fullMapString());
return false;
} finally {
lock.writeLock().unlock();
}
}
Aggregations