use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method removeNode.
/**
* @param nodeId Node to remove.
*/
private void removeNode(UUID nodeId) {
assert nodeId != null;
assert lock.writeLock().isHeldByCurrentThread();
ClusterNode loc = cctx.localNode();
if (node2part != null) {
if (!node2part.nodeId().equals(loc.id())) {
updateSeq.setIfGreater(node2part.updateSequence());
node2part = new GridDhtPartitionFullMap(loc.id(), loc.order(), updateSeq.incrementAndGet(), node2part, false);
} else
node2part = new GridDhtPartitionFullMap(node2part, node2part.updateSequence());
GridDhtPartitionMap parts = node2part.remove(nodeId);
if (parts != null) {
for (Integer p : parts.keySet()) {
Set<UUID> nodeIds = part2node.get(p);
if (nodeIds != null) {
nodeIds.remove(nodeId);
if (nodeIds.isEmpty())
part2node.remove(p);
}
}
}
consistencyCheck();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridCachePartitionExchangeManager method addFullPartitionsMap.
/**
* @param m Message.
* @param dupData Duplicated data map.
* @param compress {@code True} if need check for duplicated partition state data.
* @param grpId Cache group ID.
* @param map Map to add.
* @param affKey Cache affinity key.
*/
private void addFullPartitionsMap(GridDhtPartitionsFullMessage m, Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData, boolean compress, Integer grpId, GridDhtPartitionFullMap map, Object affKey) {
assert map != null;
Integer dupDataCache = null;
if (compress && affKey != null && !m.containsGroup(grpId)) {
T2<Integer, GridDhtPartitionFullMap> state0 = dupData.get(affKey);
if (state0 != null && state0.get2().partitionStateEquals(map)) {
GridDhtPartitionFullMap map0 = new GridDhtPartitionFullMap(map.nodeId(), map.nodeOrder(), map.updateSequence());
for (Map.Entry<UUID, GridDhtPartitionMap> e : map.entrySet()) map0.put(e.getKey(), e.getValue().emptyCopy());
map = map0;
dupDataCache = state0.get1();
} else
dupData.put(affKey, new T2<>(grpId, map));
}
m.addFullPartitionsMap(grpId, map, dupDataCache);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method nodes0.
/**
* @param p Partition.
* @param affAssignment Assignments.
* @param affNodes Node assigned for given partition by affinity.
* @return Nodes responsible for given partition (primary is first).
*/
@Nullable
private List<ClusterNode> nodes0(int p, AffinityAssignment affAssignment, List<ClusterNode> affNodes) {
if (grp.isReplicated())
return affNodes;
AffinityTopologyVersion topVer = affAssignment.topologyVersion();
lock.readLock().lock();
try {
assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer1=" + topVer + ", topVer2=" + readyTopVer + ", node=" + ctx.igniteInstanceName() + ", grp=" + grp.cacheOrGroupName() + ", node2part=" + node2part + ']';
List<ClusterNode> nodes = null;
AffinityTopologyVersion diffVer = diffFromAffinityVer;
if (!diffVer.equals(topVer)) {
if (log.isDebugEnabled()) {
log.debug("Requested topology version does not match calculated diff, need to check if " + "affinity has changed [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + ", diffVer=" + diffVer + "]");
}
boolean affChanged;
if (diffVer.compareTo(topVer) < 0)
affChanged = ctx.exchange().affinityChanged(diffVer, topVer);
else
affChanged = ctx.exchange().affinityChanged(topVer, diffVer);
if (affChanged) {
if (log.isDebugEnabled()) {
log.debug("Requested topology version does not match calculated diff, will require full iteration to" + "calculate mapping [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + ", diffVer=" + diffVer + "]");
}
nodes = new ArrayList<>();
nodes.addAll(affNodes);
for (Map.Entry<UUID, GridDhtPartitionMap> entry : node2part.entrySet()) {
GridDhtPartitionState state = entry.getValue().get(p);
ClusterNode n = ctx.discovery().node(entry.getKey());
if (n != null && state != null && (state == MOVING || state == OWNING || state == RENTING) && !nodes.contains(n) && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
nodes.add(n);
}
}
return nodes;
}
Collection<UUID> diffIds = diffFromAffinity.get(p);
if (!F.isEmpty(diffIds)) {
Collection<UUID> affIds = affAssignment.getIds(p);
for (UUID nodeId : diffIds) {
if (affIds.contains(nodeId))
continue;
if (hasState(p, nodeId, OWNING, MOVING, RENTING)) {
ClusterNode n = ctx.discovery().node(nodeId);
if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
if (nodes == null) {
nodes = new ArrayList<>(affNodes.size() + diffIds.size());
nodes.addAll(affNodes);
}
nodes.add(n);
}
}
}
}
return nodes;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method localPartitionMap.
/**
* {@inheritDoc}
*/
@Override
public GridDhtPartitionMap localPartitionMap() {
GridPartitionStateMap map = new GridPartitionStateMap(locParts.length());
lock.readLock().lock();
try {
for (int i = 0; i < locParts.length(); i++) {
GridDhtLocalPartition part = locParts.get(i);
if (part == null)
continue;
map.put(i, part.state());
}
GridDhtPartitionMap locPartMap = node2part != null ? node2part.get(ctx.localNodeId()) : null;
return new GridDhtPartitionMap(ctx.localNodeId(), updateSeq.get(), locPartMap != null ? locPartMap.topologyVersion() : readyTopVer, map, true);
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method updateLocal.
/**
* Updates state of partition in local {@link #node2part} map and recalculates {@link #diffFromAffinity}.
*
* @param p Partition.
* @param state Partition state.
* @param updateSeq Update sequence.
* @param affVer Affinity version.
* @return Update sequence.
*/
private long updateLocal(int p, GridDhtPartitionState state, long updateSeq, AffinityTopologyVersion affVer) {
assert lock.isWriteLockedByCurrentThread();
ClusterNode oldest = discoCache.oldestAliveServerNode();
assert oldest != null || ctx.kernalContext().clientNode();
// If this node became the oldest node.
if (ctx.localNode().equals(oldest) && node2part != null) {
long seq = node2part.updateSequence();
if (seq != updateSeq) {
if (seq > updateSeq) {
long seq0 = this.updateSeq.get();
if (seq0 < seq) {
// Update global counter if necessary.
boolean b = this.updateSeq.compareAndSet(seq0, seq + 1);
assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", grp=" + grp.cacheOrGroupName() + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']';
updateSeq = seq + 1;
} else
updateSeq = seq;
}
node2part.updateSequence(updateSeq);
}
}
if (node2part != null) {
UUID locNodeId = ctx.localNodeId();
GridDhtPartitionMap map = node2part.get(locNodeId);
if (map == null) {
map = new GridDhtPartitionMap(locNodeId, updateSeq, affVer, GridPartitionStateMap.EMPTY, false);
node2part.put(locNodeId, map);
} else
map.updateSequence(updateSeq, affVer);
map.put(p, state);
if (!grp.isReplicated() && (state == MOVING || state == OWNING || state == RENTING)) {
AffinityAssignment assignment = grp.affinity().cachedAffinity(diffFromAffinityVer);
if (!assignment.getIds(p).contains(ctx.localNodeId())) {
Set<UUID> diffIds = diffFromAffinity.get(p);
if (diffIds == null)
diffFromAffinity.put(p, diffIds = U.newHashSet(3));
diffIds.add(ctx.localNodeId());
}
}
}
return updateSeq;
}
Aggregations