use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method detectLostPartitions.
/**
* {@inheritDoc}
*/
@Override
public boolean detectLostPartitions(AffinityTopologyVersion affVer, GridDhtPartitionsExchangeFuture fut) {
lock.writeLock().lock();
try {
if (node2part == null)
return false;
final GridClusterStateProcessor state = cctx.kernalContext().state();
boolean isInMemoryCluster = CU.isInMemoryCluster(cctx.kernalContext().discovery().allNodes(), cctx.kernalContext().marshallerContext().jdkMarshaller(), U.resolveClassLoader(cctx.kernalContext().config()));
boolean compatibleWithIgnorePlc = isInMemoryCluster && state.isBaselineAutoAdjustEnabled() && state.baselineAutoAdjustTimeout() == 0L;
// Calculate how loss data is handled.
boolean safe = partLossPlc != PartitionLossPolicy.IGNORE || !compatibleWithIgnorePlc;
boolean changed = false;
for (int part = 0; part < parts; part++) {
boolean lost = F.contains(lostParts, part);
if (!lost) {
boolean hasOwner = false;
// Detect if all owners are left.
for (GridDhtPartitionMap partMap : node2part.values()) {
if (partMap.get(part) == OWNING) {
hasOwner = true;
break;
}
}
if (!hasOwner) {
lost = true;
// Do not detect and record lost partition in IGNORE mode.
if (safe) {
if (lostParts == null)
lostParts = new TreeSet<>();
lostParts.add(part);
}
}
}
if (lost) {
// Update remote maps according to policy.
for (Map.Entry<UUID, GridDhtPartitionMap> entry : node2part.entrySet()) {
if (entry.getValue().get(part) != null)
entry.getValue().put(part, safe ? LOST : OWNING);
}
}
}
return changed;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method updateLocal.
/**
* Updates value for single partition.
*
* @param p Partition.
* @param nodeId Node ID.
* @param state State.
* @param updateSeq Update sequence.
*/
private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) {
assert lock.isWriteLockedByCurrentThread();
assert nodeId.equals(cctx.localNodeId());
// In case if node joins, get topology at the time of joining node.
ClusterNode oldest = discoCache.oldestAliveServerNode();
// If this node became the oldest node.
if (cctx.localNode().equals(oldest)) {
long seq = node2part.updateSequence();
if (seq != updateSeq) {
if (seq > updateSeq) {
if (this.updateSeq.get() < seq) {
// Update global counter if necessary.
boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1);
assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']';
updateSeq = seq + 1;
} else
updateSeq = seq;
}
node2part.updateSequence(updateSeq);
}
}
GridDhtPartitionMap map = node2part.get(nodeId);
if (map == null)
node2part.put(nodeId, map = new GridDhtPartitionMap(nodeId, updateSeq, topVer, GridPartitionStateMap.EMPTY, false));
map.updateSequence(updateSeq, topVer);
map.put(p, state);
Set<UUID> ids = part2node.get(p);
if (ids == null)
part2node.put(p, ids = U.newHashSet(3));
ids.add(nodeId);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridClientPartitionTopology method update.
/**
* {@inheritDoc}
*/
@Override
public boolean update(@Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts, boolean force) {
if (log.isDebugEnabled())
log.debug("Updating single partition map [exchId=" + exchId + ", parts=" + mapString(parts) + ']');
if (!cctx.discovery().alive(parts.nodeId())) {
if (log.isDebugEnabled())
log.debug("Received partition update for non-existing node (will ignore) [exchId=" + exchId + ", parts=" + parts + ']');
return false;
}
lock.writeLock().lock();
try {
if (stopping)
return false;
if (!force) {
if (lastExchangeVer != null && exchId != null && lastExchangeVer.compareTo(exchId.topologyVersion()) > 0) {
if (log.isDebugEnabled())
log.debug("Stale exchange id for single partition map update (will ignore) [lastExchVer=" + lastExchangeVer + ", exchId=" + exchId + ']');
return false;
}
}
if (exchId != null)
lastExchangeVer = exchId.topologyVersion();
if (node2part == null)
// Create invalid partition map.
node2part = new GridDhtPartitionFullMap();
GridDhtPartitionMap cur = node2part.get(parts.nodeId());
if (force) {
if (cur != null && cur.topologyVersion().initialized())
parts.updateSequence(cur.updateSequence(), cur.topologyVersion());
} else if (isStaleUpdate(cur, parts)) {
if (log.isDebugEnabled())
log.debug("Stale update for single partition map update (will ignore) [exchId=" + exchId + ", curMap=" + cur + ", newMap=" + parts + ']');
return false;
}
long updateSeq = this.updateSeq.incrementAndGet();
node2part = new GridDhtPartitionFullMap(node2part, updateSeq);
boolean changed = false;
if (cur == null || !cur.equals(parts))
changed = true;
node2part.put(parts.nodeId(), parts);
// Add new mappings.
for (Map.Entry<Integer, GridDhtPartitionState> e : parts.entrySet()) {
int p = e.getKey();
Set<UUID> ids = part2node.get(p);
if (e.getValue() == MOVING || e.getValue() == OWNING) {
if (ids == null)
// Initialize HashSet to size 3 in anticipation that there won't be
// more than 3 nodes per partition.
part2node.put(p, ids = U.newHashSet(3));
changed |= ids.add(parts.nodeId());
} else {
if (ids != null)
changed |= ids.remove(parts.nodeId());
}
}
// Remove obsolete mappings.
if (cur != null) {
for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
Set<UUID> ids = part2node.get(p);
if (ids != null)
changed |= ids.remove(parts.nodeId());
}
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after single update: " + fullMapString());
return changed;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* Creates partitions single message for selected cache groups.
*
* @param exchangeId Exchange ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @param newCntrMap {@code True} if possible to use {@link CachePartitionPartialCountersMap}.
* @param grps Selected cache groups.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(@Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters, boolean newCntrMap, ExchangeActions exchActions, Collection<CacheGroupContext> grps) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, GridPartitionStateMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : grps) {
if (!grp.isLocal() && (exchActions == null || !exchActions.cacheGroupStopping(grp.groupId()))) {
GridDhtPartitionMap locMap = grp.topology().localPartitionMap();
addPartitionMap(m, dupData, true, grp.groupId(), locMap, grp.affinity().similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = grp.topology().localUpdateCounters(true);
m.addPartitionUpdateCounters(grp.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
m.addPartitionSizes(grp.groupId(), grp.topology().partitionSizes());
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.groupId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.groupId(), locMap, top.similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = top.localUpdateCounters(true);
m.addPartitionUpdateCounters(top.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
m.addPartitionSizes(top.groupId(), top.partitionSizes());
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap in project ignite by apache.
the class GridCachePartitionExchangeManager method processSinglePartitionUpdate.
/**
* @param node Sender cluster node.
* @param msg Message.
*/
private void processSinglePartitionUpdate(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
if (!enterBusy())
return;
try {
if (msg.exchangeId() == null) {
if (log.isDebugEnabled())
log.debug("Received local partition update [nodeId=" + node.id() + ", parts=" + msg + ']');
boolean updated = false;
for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
Integer grpId = entry.getKey();
CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
if (grp != null && !grp.topology().initialized())
continue;
GridDhtPartitionTopology top = null;
if (grp == null)
top = clientTops.get(grpId);
else if (!grp.isLocal())
top = grp.topology();
if (top != null) {
updated |= top.update(null, entry.getValue(), false);
cctx.affinity().checkRebalanceState(top, grpId);
}
}
if (updated) {
if (log.isDebugEnabled())
log.debug("Partitions have been scheduled to resend [reason=Single update from " + node.id() + "]");
scheduleResendPartitions();
}
} else {
GridDhtPartitionsExchangeFuture exchFut = exchangeFuture(msg.exchangeId());
if (log.isTraceEnabled())
log.trace("Notifying exchange future about single message: " + exchFut);
if (msg.client()) {
AffinityTopologyVersion initVer = exchFut.initialVersion();
AffinityTopologyVersion readyVer = readyAffinityVersion();
if (initVer.compareTo(readyVer) < 0 && !exchFut.isDone()) {
U.warn(log, "Client node tries to connect but its exchange " + "info is cleaned up from exchange history. " + "Consider increasing 'IGNITE_EXCHANGE_HISTORY_SIZE' property " + "or start clients in smaller batches. " + "Current settings and versions: " + "[IGNITE_EXCHANGE_HISTORY_SIZE=" + EXCHANGE_HISTORY_SIZE + ", " + "initVer=" + initVer + ", " + "readyVer=" + readyVer + "].");
exchFut.forceClientReconnect(node, msg);
return;
}
}
exchFut.onReceiveSingleMessage(node, msg);
}
} finally {
leaveBusy();
}
}
Aggregations