use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionFullCountersMap in project ignite by apache.
the class GridDhtPartitionTopologyImpl method update.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public boolean update(@Nullable AffinityTopologyVersion exchangeVer, GridDhtPartitionFullMap partMap, @Nullable CachePartitionFullCountersMap incomeCntrMap, Set<Integer> partsToReload, @Nullable AffinityTopologyVersion msgTopVer) {
if (log.isDebugEnabled()) {
log.debug("Updating full partition map [grp=" + grp.cacheOrGroupName() + ", exchVer=" + exchangeVer + ", fullMap=" + fullMapString() + ']');
}
assert partMap != null;
ctx.database().checkpointReadLock();
try {
lock.writeLock().lock();
try {
if (stopping || !lastTopChangeVer.initialized() || // Ignore message not-related to exchange if exchange is in progress.
(exchangeVer == null && !lastTopChangeVer.equals(readyTopVer)))
return false;
if (incomeCntrMap != null) {
// update local counters in partitions
for (int i = 0; i < locParts.length(); i++) {
GridDhtLocalPartition part = locParts.get(i);
if (part == null)
continue;
if (part.state() == OWNING || part.state() == MOVING) {
long updCntr = incomeCntrMap.updateCounter(part.id());
if (updCntr != 0 && updCntr > part.updateCounter())
part.updateCounter(updCntr);
}
}
}
if (exchangeVer != null) {
// Ignore if exchange already finished or new exchange started.
if (readyTopVer.compareTo(exchangeVer) > 0 || lastTopChangeVer.compareTo(exchangeVer) > 0) {
U.warn(log, "Stale exchange id for full partition map update (will ignore) [" + "grp=" + grp.cacheOrGroupName() + ", lastTopChange=" + lastTopChangeVer + ", readTopVer=" + readyTopVer + ", exchVer=" + exchangeVer + ']');
return false;
}
}
if (msgTopVer != null && lastTopChangeVer.compareTo(msgTopVer) > 0) {
U.warn(log, "Stale version for full partition map update message (will ignore) [" + "grp=" + grp.cacheOrGroupName() + ", lastTopChange=" + lastTopChangeVer + ", readTopVer=" + readyTopVer + ", msgVer=" + msgTopVer + ']');
return false;
}
boolean fullMapUpdated = (node2part == null);
if (node2part != null) {
for (GridDhtPartitionMap part : node2part.values()) {
GridDhtPartitionMap newPart = partMap.get(part.nodeId());
if (shouldOverridePartitionMap(part, newPart)) {
fullMapUpdated = true;
if (log.isDebugEnabled()) {
log.debug("Overriding partition map in full update map [" + "grp=" + grp.cacheOrGroupName() + ", exchVer=" + exchangeVer + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']');
}
if (newPart.nodeId().equals(ctx.localNodeId()))
updateSeq.setIfGreater(newPart.updateSequence());
} else {
// If for some nodes current partition has a newer map,
// then we keep the newer value.
partMap.put(part.nodeId(), part);
}
}
// Check that we have new nodes.
for (GridDhtPartitionMap part : partMap.values()) {
if (fullMapUpdated)
break;
fullMapUpdated = !node2part.containsKey(part.nodeId());
}
// Remove entry if node left.
for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
UUID nodeId = it.next();
if (!ctx.discovery().alive(nodeId)) {
if (log.isDebugEnabled())
log.debug("Removing left node from full map update [grp=" + grp.cacheOrGroupName() + ", nodeId=" + nodeId + ", partMap=" + partMap + ']');
it.remove();
}
}
} else {
GridDhtPartitionMap locNodeMap = partMap.get(ctx.localNodeId());
if (locNodeMap != null)
updateSeq.setIfGreater(locNodeMap.updateSequence());
}
if (!fullMapUpdated) {
if (log.isDebugEnabled()) {
log.debug("No updates for full partition map (will ignore) [" + "grp=" + grp.cacheOrGroupName() + ", lastExch=" + lastTopChangeVer + ", exchVer=" + exchangeVer + ", curMap=" + node2part + ", newMap=" + partMap + ']');
}
return false;
}
if (exchangeVer != null) {
assert exchangeVer.compareTo(readyTopVer) >= 0 && exchangeVer.compareTo(lastTopChangeVer) >= 0;
lastTopChangeVer = readyTopVer = exchangeVer;
}
node2part = partMap;
if (exchangeVer == null && !grp.isReplicated() && (readyTopVer.initialized() && readyTopVer.compareTo(diffFromAffinityVer) >= 0)) {
AffinityAssignment affAssignment = grp.affinity().readyAffinity(readyTopVer);
for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
for (Map.Entry<Integer, GridDhtPartitionState> e0 : e.getValue().entrySet()) {
int p = e0.getKey();
Set<UUID> diffIds = diffFromAffinity.get(p);
if ((e0.getValue() == MOVING || e0.getValue() == OWNING || e0.getValue() == RENTING) && !affAssignment.getIds(p).contains(e.getKey())) {
if (diffIds == null)
diffFromAffinity.put(p, diffIds = U.newHashSet(3));
diffIds.add(e.getKey());
} else {
if (diffIds != null && diffIds.remove(e.getKey())) {
if (diffIds.isEmpty())
diffFromAffinity.remove(p);
}
}
}
}
diffFromAffinityVer = readyTopVer;
}
boolean changed = false;
GridDhtPartitionMap nodeMap = partMap.get(ctx.localNodeId());
if (nodeMap != null && grp.persistenceEnabled() && readyTopVer.initialized()) {
for (Map.Entry<Integer, GridDhtPartitionState> e : nodeMap.entrySet()) {
int p = e.getKey();
GridDhtPartitionState state = e.getValue();
if (state == OWNING) {
GridDhtLocalPartition locPart = locParts.get(p);
assert locPart != null : grp.cacheOrGroupName();
if (locPart.state() == MOVING) {
boolean success = locPart.own();
assert success : locPart;
changed |= success;
}
} else if (state == MOVING) {
GridDhtLocalPartition locPart = locParts.get(p);
if (!partsToReload.contains(p)) {
if (locPart == null || locPart.state() == EVICTED)
locPart = createPartition(p);
if (locPart.state() == OWNING) {
locPart.moving();
changed = true;
}
} else {
if (locPart == null || locPart.state() == EVICTED) {
createPartition(p);
changed = true;
} else if (locPart.state() == OWNING || locPart.state() == MOVING) {
if (locPart.state() == OWNING)
locPart.moving();
locPart.clearAsync();
changed = true;
} else if (locPart.state() == RENTING) {
// Try to prevent partition eviction.
if (locPart.reserve()) {
try {
locPart.moving();
locPart.clearAsync();
} finally {
locPart.release();
}
} else // In other case just recreate it.
{
assert locPart.state() == EVICTED;
createPartition(p);
}
changed = true;
}
}
}
}
}
long updateSeq = this.updateSeq.incrementAndGet();
if (readyTopVer.initialized() && readyTopVer.equals(lastTopChangeVer)) {
AffinityAssignment aff = grp.affinity().readyAffinity(readyTopVer);
if (exchangeVer == null)
changed |= checkEvictions(updateSeq, aff);
updateRebalanceVersion(aff.assignment());
}
consistencyCheck();
if (log.isDebugEnabled()) {
log.debug("Partition map after full update [grp=" + grp.cacheOrGroupName() + ", map=" + fullMapString() + ']');
}
if (changed)
ctx.exchange().scheduleResendPartitions();
return changed;
} finally {
lock.writeLock().unlock();
}
} finally {
ctx.database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionFullCountersMap in project ignite by apache.
the class CacheExchangeMessageDuplicatedStateTest method checkFullMessage.
/**
* @param msg Message.
*/
private void checkFullMessage(GridDhtPartitionsFullMessage msg) {
Map<Integer, Integer> dupPartsData = getFieldValue(msg, "dupPartsData");
assertNotNull(dupPartsData);
checkFullMessage(AFF1_CACHE1, AFF1_CACHE2, dupPartsData, msg);
checkFullMessage(AFF4_FILTER_CACHE1, AFF4_FILTER_CACHE2, dupPartsData, msg);
assertFalse(dupPartsData.containsKey(CU.cacheId(AFF3_CACHE1)));
Map<Integer, CachePartitionFullCountersMap> partCntrs = getFieldValue(getFieldValue(msg, "partCntrs2"), "map");
if (partCntrs != null) {
for (CachePartitionFullCountersMap cntrs : partCntrs.values()) {
long[] initialUpdCntrs = getFieldValue(cntrs, "initialUpdCntrs");
long[] updCntrs = getFieldValue(cntrs, "updCntrs");
for (int i = 0; i < initialUpdCntrs.length; i++) {
assertEquals(0, initialUpdCntrs[i]);
assertEquals(0, updCntrs[i]);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionFullCountersMap in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param compress {@code True} if possible to compress message (properly work only if prepareMarshall/
* finishUnmarshall methods are called).
* @param newCntrMap {@code True} if possible to use {@link CachePartitionFullCountersMap}.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param partHistSuppliers Partition history suppliers map.
* @param partsToReload Partitions to reload map.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(boolean compress, boolean newCntrMap, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, @Nullable IgniteDhtPartitionHistorySuppliersMap partHistSuppliers, @Nullable IgniteDhtPartitionsToReloadMap partsToReload) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE, partHistSuppliers, partsToReload);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal()) {
if (exchId != null) {
AffinityTopologyVersion startTopVer = grp.localStartVersion();
if (startTopVer.compareTo(exchId.topologyVersion()) > 0)
continue;
}
GridAffinityAssignmentCache affCache = grp.affinity();
GridDhtPartitionFullMap locMap = grp.topology().partitionMap(true);
if (locMap != null) {
addFullPartitionsMap(m, dupData, compress, grp.groupId(), locMap, affCache.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = grp.topology().fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(grp.groupId(), cntrsMap);
else {
m.addPartitionUpdateCounters(grp.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
}
}
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
if (map != null) {
addFullPartitionsMap(m, dupData, compress, top.groupId(), map, top.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = top.fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(top.groupId(), cntrsMap);
else
m.addPartitionUpdateCounters(top.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionFullCountersMap in project ignite by apache.
the class GridClientPartitionTopology method update.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "MismatchedQueryAndUpdateOfCollection" })
@Override
public boolean update(@Nullable AffinityTopologyVersion exchangeVer, GridDhtPartitionFullMap partMap, @Nullable CachePartitionFullCountersMap cntrMap, Set<Integer> partsToReload, @Nullable AffinityTopologyVersion msgTopVer) {
if (log.isDebugEnabled())
log.debug("Updating full partition map [exchVer=" + exchangeVer + ", parts=" + fullMapString() + ']');
lock.writeLock().lock();
try {
if (exchangeVer != null && lastExchangeVer != null && lastExchangeVer.compareTo(exchangeVer) >= 0) {
if (log.isDebugEnabled())
log.debug("Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeVer + ", exchVer=" + exchangeVer + ']');
return false;
}
if (msgTopVer != null && lastExchangeVer != null && lastExchangeVer.compareTo(msgTopVer) > 0) {
if (log.isDebugEnabled())
log.debug("Stale topology version for full partition map update message (will ignore) " + "[lastExchId=" + lastExchangeVer + ", topVersion=" + msgTopVer + ']');
return false;
}
boolean fullMapUpdated = (node2part == null);
if (node2part != null) {
for (GridDhtPartitionMap part : node2part.values()) {
GridDhtPartitionMap newPart = partMap.get(part.nodeId());
if (shouldOverridePartitionMap(part, newPart)) {
fullMapUpdated = true;
if (log.isDebugEnabled())
log.debug("Overriding partition map in full update map [exchId=" + exchangeVer + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']');
} else {
// If for some nodes current partition has a newer map,
// then we keep the newer value.
partMap.put(part.nodeId(), part);
}
}
// Check that we have new nodes.
for (GridDhtPartitionMap part : partMap.values()) {
if (fullMapUpdated)
break;
fullMapUpdated = !node2part.containsKey(part.nodeId());
}
// Remove entry if node left.
for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
UUID nodeId = it.next();
if (!cctx.discovery().alive(nodeId)) {
if (log.isDebugEnabled())
log.debug("Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']');
it.remove();
}
}
}
if (!fullMapUpdated) {
if (log.isDebugEnabled())
log.debug("No updates for full partition map (will ignore) [lastExch=" + lastExchangeVer + ", exch=" + exchangeVer + ", curMap=" + node2part + ", newMap=" + partMap + ']');
return false;
}
if (exchangeVer != null)
lastExchangeVer = exchangeVer;
node2part = partMap;
updateSeq.incrementAndGet();
part2node.clear();
for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
for (Map.Entry<Integer, GridDhtPartitionState> e0 : e.getValue().entrySet()) {
if (e0.getValue() != MOVING && e0.getValue() != OWNING)
continue;
int p = e0.getKey();
Set<UUID> ids = part2node.get(p);
if (ids == null)
// Initialize HashSet to size 3 in anticipation that there won't be
// more than 3 nodes per partitions.
part2node.put(p, ids = U.newHashSet(3));
ids.add(e.getKey());
}
}
if (cntrMap != null)
this.cntrMap = new CachePartitionFullCountersMap(cntrMap);
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after full update: " + fullMapString());
return false;
} finally {
lock.writeLock().unlock();
}
}
Aggregations