use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap in project ignite by apache.
the class GridCachePartitionExchangeManager method processFullPartitionUpdate.
/**
* @param node Node.
* @param msg Message.
*/
private void processFullPartitionUpdate(ClusterNode node, GridDhtPartitionsFullMessage msg) {
if (!enterBusy())
return;
try {
if (msg.exchangeId() == null) {
if (log.isDebugEnabled())
log.debug("Received full partition update [node=" + node.id() + ", msg=" + msg + ']');
boolean updated = false;
for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
Integer cacheId = entry.getKey();
GridCacheContext<K, V> cacheCtx = cctx.cacheContext(cacheId);
if (cacheCtx != null && !cacheCtx.started())
// Can safely ignore background exchange.
continue;
GridDhtPartitionTopology top = null;
if (cacheCtx == null)
top = clientTops.get(cacheId);
else if (!cacheCtx.isLocal())
top = cacheCtx.topology();
if (top != null)
updated |= top.update(null, entry.getValue(), null) != null;
}
if (!cctx.kernalContext().clientNode() && updated)
refreshPartitions();
} else
exchangeFuture(msg.exchangeId(), null, null, null, null).onReceive(node, msg);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method beforeExchange.
/** {@inheritDoc} */
@Override
public void beforeExchange(GridDhtPartitionsExchangeFuture exchFut, boolean affReady) throws IgniteCheckedException {
DiscoveryEvent discoEvt = exchFut.discoveryEvent();
ClusterState newState = exchFut.newClusterState();
treatAllPartAsLoc = (newState != null && newState == ClusterState.ACTIVE) || (cctx.kernalContext().state().active() && discoEvt.type() == EventType.EVT_NODE_JOINED && discoEvt.eventNode().isLocal() && !cctx.kernalContext().clientNode());
// Wait for rent outside of checkpoint lock.
waitForRent();
ClusterNode loc = cctx.localNode();
cctx.shared().database().checkpointReadLock();
synchronized (cctx.shared().exchange().interruptLock()) {
if (Thread.currentThread().isInterrupted())
throw new IgniteInterruptedCheckedException("Thread is interrupted: " + Thread.currentThread());
try {
U.writeLock(lock);
} catch (IgniteInterruptedCheckedException e) {
cctx.shared().database().checkpointReadUnlock();
throw e;
}
try {
GridDhtPartitionExchangeId exchId = exchFut.exchangeId();
if (stopping)
return;
assert topVer.equals(exchId.topologyVersion()) : "Invalid topology version [topVer=" + topVer + ", exchId=" + exchId + ']';
if (exchId.isLeft())
removeNode(exchId.nodeId());
ClusterNode oldest = discoCache.oldestAliveServerNodeWithCache();
if (log.isDebugEnabled())
log.debug("Partition map beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
long updateSeq = this.updateSeq.incrementAndGet();
cntrMap.clear();
// If this is the oldest node.
if (oldest != null && (loc.equals(oldest) || exchFut.cacheAddedOnExchange(cctx.cacheId(), cctx.receivedFrom()))) {
if (node2part == null) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq);
if (log.isDebugEnabled())
log.debug("Created brand new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
} else if (!node2part.valid()) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
if (log.isDebugEnabled())
log.debug("Created new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + node2part + ']');
} else if (!node2part.nodeId().equals(loc.id())) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
if (log.isDebugEnabled())
log.debug("Copied old map into new map on oldest node (previous oldest node left) [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
}
}
if (affReady)
initPartitions0(exchFut, updateSeq);
else {
List<List<ClusterNode>> aff = cctx.affinity().idealAssignment();
createPartitions(aff, updateSeq);
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
} finally {
lock.writeLock().unlock();
cctx.shared().database().checkpointReadUnlock();
}
}
// Wait for evictions.
waitForRent();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method update.
/** {@inheritDoc} */
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public GridDhtPartitionMap update(@Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap, @Nullable Map<Integer, T2<Long, Long>> cntrMap) {
if (log.isDebugEnabled())
log.debug("Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');
assert partMap != null;
lock.writeLock().lock();
try {
if (stopping)
return null;
if (cntrMap != null) {
// update local map partition counters
for (Map.Entry<Integer, T2<Long, Long>> e : cntrMap.entrySet()) {
T2<Long, Long> cntr = this.cntrMap.get(e.getKey());
if (cntr == null || cntr.get2() < e.getValue().get2())
this.cntrMap.put(e.getKey(), e.getValue());
}
// update local counters in partitions
for (int i = 0; i < locParts.length(); i++) {
GridDhtLocalPartition part = locParts.get(i);
if (part == null)
continue;
T2<Long, Long> cntr = cntrMap.get(part.id());
if (cntr != null)
part.updateCounter(cntr.get2());
}
}
//if need skip
if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
if (log.isDebugEnabled())
log.debug("Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ']');
return null;
}
if (node2part != null && node2part.compareTo(partMap) >= 0) {
if (log.isDebugEnabled())
log.debug("Stale partition map for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ", curMap=" + node2part + ", newMap=" + partMap + ']');
return null;
}
long updateSeq = this.updateSeq.incrementAndGet();
if (exchId != null)
lastExchangeId = exchId;
if (node2part != null) {
for (GridDhtPartitionMap part : node2part.values()) {
GridDhtPartitionMap newPart = partMap.get(part.nodeId());
// then we keep the newer value.
if (newPart != null && (newPart.updateSequence() < part.updateSequence() || (cctx.startTopologyVersion().compareTo(newPart.topologyVersion()) > 0))) {
if (log.isDebugEnabled())
log.debug("Overriding partition map in full update map [exchId=" + exchId + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']');
partMap.put(part.nodeId(), part);
}
}
// Remove entry if node left.
for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
UUID nodeId = it.next();
if (!cctx.discovery().alive(nodeId)) {
if (log.isDebugEnabled())
log.debug("Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']');
it.remove();
}
}
}
node2part = partMap;
Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);
for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
for (Integer p : e.getValue().keySet()) {
Set<UUID> ids = p2n.get(p);
if (ids == null)
// Initialize HashSet to size 3 in anticipation that there won't be
// more than 3 nodes per partitions.
p2n.put(p, ids = U.newHashSet(3));
ids.add(e.getKey());
}
}
part2node = p2n;
boolean changed = false;
AffinityTopologyVersion affVer = cctx.affinity().affinityTopologyVersion();
GridDhtPartitionMap nodeMap = partMap.get(cctx.localNodeId());
if (nodeMap != null && cctx.shared().database().persistenceEnabled()) {
for (Map.Entry<Integer, GridDhtPartitionState> e : nodeMap.entrySet()) {
int p = e.getKey();
GridDhtPartitionState state = e.getValue();
if (state == MOVING) {
GridDhtLocalPartition locPart = locParts.get(p);
assert locPart != null;
if (locPart.state() == OWNING) {
locPart.moving();
changed = true;
}
if (cntrMap != null) {
T2<Long, Long> cntr = cntrMap.get(p);
if (cntr != null && cntr.get2() > locPart.updateCounter())
locPart.updateCounter(cntr.get2());
}
}
}
}
if (!affVer.equals(AffinityTopologyVersion.NONE) && affVer.compareTo(topVer) >= 0) {
List<List<ClusterNode>> aff = cctx.affinity().assignments(topVer);
changed |= checkEvictions(updateSeq, aff);
updateRebalanceVersion(aff);
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after full update: " + fullMapString());
if (changed)
cctx.shared().exchange().scheduleResendPartitions();
return changed ? localPartitionMap() : null;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap in project ignite by apache.
the class GridClientPartitionTopology method beforeExchange0.
/**
* @param loc Local node.
* @param exchFut Exchange future.
*/
private void beforeExchange0(ClusterNode loc, GridDhtPartitionsExchangeFuture exchFut) {
GridDhtPartitionExchangeId exchId = exchFut.exchangeId();
assert topVer.equals(exchId.topologyVersion()) : "Invalid topology version [topVer=" + topVer + ", exchId=" + exchId + ']';
if (!exchId.isJoined())
removeNode(exchId.nodeId());
// In case if node joins, get topology at the time of joining node.
ClusterNode oldest = discoCache.oldestAliveServerNodeWithCache();
assert oldest != null;
if (log.isDebugEnabled())
log.debug("Partition map beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
long updateSeq = this.updateSeq.incrementAndGet();
// If this is the oldest node.
if (oldest.id().equals(loc.id()) || exchFut.dynamicCacheStarted(cacheId)) {
if (node2part == null) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq);
if (log.isDebugEnabled())
log.debug("Created brand new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
} else if (!node2part.valid()) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
if (log.isDebugEnabled())
log.debug("Created new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + node2part + ']');
} else if (!node2part.nodeId().equals(loc.id())) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
if (log.isDebugEnabled())
log.debug("Copied old map into new map on oldest node (previous oldest node left) [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
}
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap in project ignite by apache.
the class GridCacheDhtPreloadSelfTest method checkActiveState.
/**
* @param grids Grids.
*/
private void checkActiveState(Iterable<Ignite> grids) {
// Check that nodes don't have non-active information about other nodes.
for (Ignite g : grids) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
GridDhtCacheAdapter<Integer, String> dht = dht(c);
GridDhtPartitionFullMap allParts = dht.topology().partitionMap(false);
for (GridDhtPartitionMap parts : allParts.values()) {
if (!parts.nodeId().equals(g.cluster().localNode().id())) {
for (Map.Entry<Integer, GridDhtPartitionState> e : parts.entrySet()) {
int p = e.getKey();
GridDhtPartitionState state = e.getValue();
assert state == OWNING || state == MOVING || state == RENTING : "Invalid state [igniteInstanceName=" + g.name() + ", part=" + p + ", state=" + state + ", parts=" + parts + ']';
assert state.active();
}
}
}
}
}
Aggregations