use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method update.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public boolean update(@Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts, boolean force) {
if (log.isDebugEnabled())
log.debug("Updating single partition map [exchId=" + exchId + ", parts=" + mapString(parts) + ']');
if (!cctx.discovery().alive(parts.nodeId())) {
if (log.isDebugEnabled())
log.debug("Received partition update for non-existing node (will ignore) [exchId=" + exchId + ", parts=" + parts + ']');
return false;
}
lock.writeLock().lock();
try {
if (stopping)
return false;
if (!force) {
if (lastExchangeVer != null && exchId != null && lastExchangeVer.compareTo(exchId.topologyVersion()) > 0) {
if (log.isDebugEnabled())
log.debug("Stale exchange id for single partition map update (will ignore) [lastExchVer=" + lastExchangeVer + ", exchId=" + exchId + ']');
return false;
}
}
if (exchId != null)
lastExchangeVer = exchId.topologyVersion();
if (node2part == null)
// Create invalid partition map.
node2part = new GridDhtPartitionFullMap();
GridDhtPartitionMap cur = node2part.get(parts.nodeId());
if (force) {
if (cur != null && cur.topologyVersion().initialized())
parts.updateSequence(cur.updateSequence(), cur.topologyVersion());
} else if (isStaleUpdate(cur, parts)) {
if (log.isDebugEnabled())
log.debug("Stale update for single partition map update (will ignore) [exchId=" + exchId + ", curMap=" + cur + ", newMap=" + parts + ']');
return false;
}
long updateSeq = this.updateSeq.incrementAndGet();
node2part = new GridDhtPartitionFullMap(node2part, updateSeq);
boolean changed = false;
if (cur == null || !cur.equals(parts))
changed = true;
node2part.put(parts.nodeId(), parts);
// Add new mappings.
for (Map.Entry<Integer, GridDhtPartitionState> e : parts.entrySet()) {
int p = e.getKey();
Set<UUID> ids = part2node.get(p);
if (e.getValue() == MOVING || e.getValue() == OWNING) {
if (ids == null)
// Initialize HashSet to size 3 in anticipation that there won't be
// more than 3 nodes per partition.
part2node.put(p, ids = U.newHashSet(3));
changed |= ids.add(parts.nodeId());
} else {
if (ids != null)
changed |= ids.remove(parts.nodeId());
}
}
// Remove obsolete mappings.
if (cur != null) {
for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
Set<UUID> ids = part2node.get(p);
if (ids != null)
changed |= ids.remove(parts.nodeId());
}
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after single update: " + fullMapString());
return changed;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method createMovingPartitions.
/**
* @param aff Affinity.
*/
private void createMovingPartitions(AffinityAssignment aff) {
for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
GridDhtPartitionMap map = e.getValue();
addMoving(map, aff.backupPartitions(e.getKey()));
addMoving(map, aff.primaryPartitions(e.getKey()));
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridCommonAbstractTest method awaitPartitionMapExchange.
/**
* @param waitEvicts If {@code true} will wait for evictions finished.
* @param waitNode2PartUpdate If {@code true} will wait for nodes node2part info update finished.
* @param nodes Optional nodes. If {@code null} method will wait for all nodes, for non null collection nodes will
* be filtered
* @param printPartState If {@code true} will print partition state if evictions not happened.
* @throws InterruptedException If interrupted.
*/
@SuppressWarnings("BusyWait")
protected void awaitPartitionMapExchange(boolean waitEvicts, boolean waitNode2PartUpdate, @Nullable Collection<ClusterNode> nodes, boolean printPartState) throws InterruptedException {
long timeout = getPartitionMapExchangeTimeout();
long startTime = -1;
Set<String> names = new HashSet<>();
Ignite crd = null;
for (Ignite g : G.allGrids()) {
ClusterNode node = g.cluster().localNode();
if (crd == null || node.order() < crd.cluster().localNode().order()) {
crd = g;
if (node.order() == 1)
break;
}
}
if (crd == null)
return;
AffinityTopologyVersion waitTopVer = ((IgniteKernal) crd).context().discovery().topologyVersionEx();
if (waitTopVer.topologyVersion() <= 0)
waitTopVer = new AffinityTopologyVersion(1, 0);
for (Ignite g : G.allGrids()) {
if (nodes != null && !nodes.contains(g.cluster().localNode()))
continue;
IgniteKernal g0 = (IgniteKernal) g;
names.add(g0.configuration().getIgniteInstanceName());
if (startTime != -1) {
if (startTime != g0.context().discovery().gridStartTime())
fail("Found nodes from different clusters, probable some test does not stop nodes " + "[allNodes=" + names + ']');
} else
startTime = g0.context().discovery().gridStartTime();
if (g.cluster().localNode().isDaemon())
continue;
IgniteInternalFuture<?> exchFut = g0.context().cache().context().exchange().affinityReadyFuture(waitTopVer);
if (exchFut != null && !exchFut.isDone()) {
try {
exchFut.get(timeout);
} catch (IgniteCheckedException e) {
log.error("Failed to wait for exchange [topVer=" + waitTopVer + ", node=" + g0.name() + ']', e);
}
}
for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
CacheConfiguration cfg = c.context().config();
if (cfg == null)
continue;
if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) {
AffinityFunction aff = cfg.getAffinity();
GridDhtCacheAdapter<?, ?> dht = dht(c);
GridDhtPartitionTopology top = dht.topology();
for (int p = 0; p < aff.partitions(); p++) {
long start = 0;
for (int i = 0; ; i++) {
boolean match = false;
GridCachePartitionExchangeManager<?, ?> exchMgr = dht.context().shared().exchange();
AffinityTopologyVersion readyVer = exchMgr.readyAffinityVersion();
// Otherwise, there may be an assertion when printing top.readyTopologyVersion().
try {
IgniteInternalFuture<?> fut = exchMgr.affinityReadyFuture(readyVer);
if (fut != null)
fut.get();
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
if (readyVer.topologyVersion() > 0 && c.context().started()) {
// Must map on updated version of topology.
Collection<ClusterNode> affNodes = dht.context().affinity().assignment(readyVer).idealAssignment().get(p);
int affNodesCnt = affNodes.size();
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.owners(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList();
int ownerNodesCnt = owners.size();
GridDhtLocalPartition loc = top.localPartition(p, readyVer, false);
if (affNodesCnt != ownerNodesCnt || !affNodes.containsAll(owners) || (waitEvicts && loc != null && loc.state() != GridDhtPartitionState.OWNING)) {
LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", affNodesCnt=" + affNodesCnt + ", ownersCnt=" + ownerNodesCnt + ", affNodes=" + F.nodeIds(affNodes) + ", owners=" + F.nodeIds(owners) + ", topFut=" + topFut + ", locNode=" + g.cluster().localNode() + ']');
} else
match = true;
} else {
LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
}
if (!match) {
if (i == 0)
start = System.currentTimeMillis();
if (System.currentTimeMillis() - start > timeout) {
U.dumpThreads(log);
if (printPartState)
printPartitionState(c);
throw new IgniteException("Timeout of waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
}
// Busy wait.
Thread.sleep(20);
continue;
}
if (i > 0)
log().warning("Finished waiting for topology map update [igniteInstanceName=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]");
break;
}
}
if (waitNode2PartUpdate) {
long start = System.currentTimeMillis();
boolean failed = true;
while (failed) {
failed = false;
for (GridDhtPartitionMap pMap : top.partitionMap(true).values()) {
if (failed)
break;
for (Map.Entry entry : pMap.entrySet()) {
if (System.currentTimeMillis() - start > timeout) {
U.dumpThreads(log);
throw new IgniteException("Timeout of waiting for partition state update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", locNode=" + g.cluster().localNode() + ']');
}
if (entry.getValue() != GridDhtPartitionState.OWNING) {
LT.warn(log(), "Waiting for correct partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + g.name() + ", cache=" + c.getName());
// Busy wait.
Thread.sleep(200);
failed = true;
break;
}
}
}
}
}
}
}
}
log.info("awaitPartitionMapExchange finished");
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class CacheAffinitySharedManager method affinityFullMap.
/**
* @param aff Affinity assignment.
*/
private Map<UUID, GridDhtPartitionMap> affinityFullMap(AffinityAssignment aff) {
Map<UUID, GridDhtPartitionMap> map = new HashMap<>();
for (int p = 0; p < aff.assignment().size(); p++) {
Collection<UUID> ids = aff.getIds(p);
for (UUID nodeId : ids) {
GridDhtPartitionMap partMap = map.get(nodeId);
if (partMap == null) {
partMap = new GridDhtPartitionMap(nodeId, 1L, aff.topologyVersion(), new GridPartitionStateMap(), false);
map.put(nodeId, partMap);
}
partMap.put(p, OWNING);
}
}
return map;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class CacheGroupMetricsImpl method clusterPartitionsMapByState.
/**
* Gets partitions allocation map with a given state.
*
* @param state State.
* @return Partitions allocation map.
*/
private Map<Integer, Set<String>> clusterPartitionsMapByState(GridDhtPartitionState state) {
GridDhtPartitionFullMap partFullMap = ctx.topology().partitionMap(false);
if (partFullMap == null)
return Collections.emptyMap();
int parts = ctx.topology().partitions();
Map<Integer, Set<String>> partsMap = new LinkedHashMap<>();
for (int part = 0; part < parts; part++) {
Set<String> partNodesSet = new HashSet<>();
for (Map.Entry<UUID, GridDhtPartitionMap> entry : partFullMap.entrySet()) {
if (entry.getValue().get(part) == state)
partNodesSet.add(entry.getKey().toString());
}
partsMap.put(part, partNodesSet);
}
return partsMap;
}
Aggregations