use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class GridDhtPartitionTopologyImpl method update.
/**
* {@inheritDoc}
*/
@Override
public boolean update(@Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts, boolean force) {
if (log.isDebugEnabled()) {
log.debug("Updating single partition map [grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId + ", parts=" + mapString(parts) + ']');
}
if (!ctx.discovery().alive(parts.nodeId())) {
if (log.isTraceEnabled()) {
log.trace("Received partition update for non-existing node (will ignore) [grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId + ", parts=" + parts + ']');
}
return false;
}
ctx.database().checkpointReadLock();
try {
lock.writeLock().lock();
try {
if (stopping)
return false;
if (!force) {
if (lastTopChangeVer.initialized() && exchId != null && lastTopChangeVer.compareTo(exchId.topologyVersion()) > 0) {
U.warn(log, "Stale exchange id for single partition map update (will ignore) [" + "grp=" + grp.cacheOrGroupName() + ", lastTopChange=" + lastTopChangeVer + ", readTopVer=" + readyTopVer + ", exch=" + exchId.topologyVersion() + ']');
return false;
}
}
if (node2part == null)
// Create invalid partition map.
node2part = new GridDhtPartitionFullMap();
GridDhtPartitionMap cur = node2part.get(parts.nodeId());
if (force) {
if (cur != null && cur.topologyVersion().initialized())
parts.updateSequence(cur.updateSequence(), cur.topologyVersion());
} else if (isStaleUpdate(cur, parts)) {
assert cur != null;
String msg = "Stale update for single partition map update (will ignore) [" + "nodeId=" + parts.nodeId() + ", grp=" + grp.cacheOrGroupName() + ", exchId=" + exchId + ", curMap=" + cur + ", newMap=" + parts + ']';
// This is usual situation when partition maps are equal, just print debug message.
if (cur.compareTo(parts) == 0) {
if (log.isTraceEnabled())
log.trace(msg);
} else
U.warn(log, msg);
return false;
}
long updateSeq = this.updateSeq.incrementAndGet();
node2part.newUpdateSequence(updateSeq);
boolean changed = false;
if (cur == null || !cur.equals(parts))
changed = true;
node2part.put(parts.nodeId(), parts);
// During exchange diff is calculated after all messages are received and affinity initialized.
if (exchId == null && !grp.isReplicated()) {
if (readyTopVer.initialized() && readyTopVer.compareTo(diffFromAffinityVer) >= 0) {
AffinityAssignment affAssignment = grp.affinity().readyAffinity(readyTopVer);
// Add new mappings.
for (Map.Entry<Integer, GridDhtPartitionState> e : parts.entrySet()) {
int p = e.getKey();
Set<UUID> diffIds = diffFromAffinity.get(p);
if ((e.getValue() == MOVING || e.getValue() == OWNING || e.getValue() == RENTING) && !affAssignment.getIds(p).contains(parts.nodeId())) {
if (diffIds == null)
diffFromAffinity.put(p, diffIds = U.newHashSet(3));
if (diffIds.add(parts.nodeId()))
changed = true;
} else {
if (diffIds != null && diffIds.remove(parts.nodeId())) {
changed = true;
if (diffIds.isEmpty())
diffFromAffinity.remove(p);
}
}
}
// Remove obsolete mappings.
if (cur != null) {
for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
Set<UUID> ids = diffFromAffinity.get(p);
if (ids != null && ids.remove(parts.nodeId())) {
changed = true;
if (ids.isEmpty())
diffFromAffinity.remove(p);
}
}
}
diffFromAffinityVer = readyTopVer;
}
}
if (readyTopVer.initialized() && readyTopVer.equals(lastTopChangeVer)) {
AffinityAssignment aff = grp.affinity().readyAffinity(readyTopVer);
if (exchId == null)
changed |= checkEvictions(updateSeq, aff);
updateRebalanceVersion(aff.topologyVersion(), aff.assignment());
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after single update [grp=" + grp.cacheOrGroupName() + ", map=" + fullMapString() + ']');
if (changed && exchId == null) {
if (log.isDebugEnabled())
log.debug("Partitions have been scheduled to resend [reason=" + "Single map update [grp" + grp.cacheOrGroupName() + "]");
ctx.exchange().scheduleResendPartitions();
}
return changed;
} finally {
lock.writeLock().unlock();
}
} finally {
ctx.database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class CacheGroupContext method processAffinityAssignmentRequest0.
/**
* @param nodeId Node ID.
* @param req Request.
*/
private void processAffinityAssignmentRequest0(UUID nodeId, final GridDhtAffinityAssignmentRequest req) {
AffinityTopologyVersion topVer = req.topologyVersion();
if (log.isDebugEnabled())
log.debug("Affinity is ready for topology version, will send response [topVer=" + topVer + ", node=" + nodeId + ']');
AffinityAssignment assignment;
GridDhtAffinityAssignmentResponse res;
try {
assignment = aff.cachedAffinity(topVer);
res = new GridDhtAffinityAssignmentResponse(req.futureId(), grpId, topVer, assignment.assignment());
if (aff.centralizedAffinityFunction()) {
assert assignment.idealAssignment() != null;
res.idealAffinityAssignment(assignment.idealAssignment());
}
if (req.sendPartitionsState())
res.partitionMap(top.partitionMap(true));
} catch (IllegalStateException err) {
res = new GridDhtAffinityAssignmentResponse(req.futureId(), grpId, topVer, Collections.emptyList());
res.affinityAssignmentsError(new IgniteCheckedException("Failed to prepare the required affinity assignment " + "[nodeId=" + nodeId + ", topVer=" + topVer + ']', err));
}
try {
ctx.io().send(nodeId, res, AFFINITY_POOL);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send affinity assignment response to remote node [node=" + nodeId + ']', e);
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class GridDhtPreloader method generateAssignments.
/**
* {@inheritDoc}
*/
@Override
public GridDhtPreloaderAssignments generateAssignments(GridDhtPartitionExchangeId exchId, GridDhtPartitionsExchangeFuture exchFut) {
assert exchFut == null || exchFut.isDone();
// No assignments for disabled preloader.
GridDhtPartitionTopology top = grp.topology();
if (!grp.rebalanceEnabled())
return new GridDhtPreloaderAssignments(exchId, top.readyTopologyVersion(), false);
int partitions = grp.affinity().partitions();
AffinityTopologyVersion topVer = top.readyTopologyVersion();
assert exchFut == null || exchFut.context().events().topologyVersion().equals(top.readyTopologyVersion()) || exchFut.context().events().topologyVersion().equals(ctx.exchange().lastAffinityChangedTopologyVersion(top.readyTopologyVersion())) : "Topology version mismatch [exchId=" + exchId + ", grp=" + grp.name() + ", topVer=" + top.readyTopologyVersion() + ']';
GridDhtPreloaderAssignments assignments = new GridDhtPreloaderAssignments(exchId, topVer, exchFut != null && exchFut.affinityReassign());
AffinityAssignment aff = grp.affinity().cachedAffinity(topVer);
CachePartitionFullCountersMap countersMap = grp.topology().fullUpdateCounters();
for (int p = 0; p < partitions; p++) {
if (ctx.exchange().hasPendingServerExchange()) {
if (log.isDebugEnabled())
log.debug("Skipping assignments creation, exchange worker has pending assignments: " + exchId);
assignments.cancelled(true);
return assignments;
}
// If partition belongs to local node.
if (aff.get(p).contains(ctx.localNode())) {
GridDhtLocalPartition part = top.localPartition(p);
assert part != null;
assert part.id() == p;
// Do not rebalance OWNING or LOST partitions.
if (part.state() == OWNING || part.state() == LOST)
continue;
// State should be switched to MOVING during PME.
if (part.state() != MOVING) {
throw new AssertionError("Partition has invalid state for rebalance " + aff.topologyVersion() + " " + part);
}
ClusterNode histSupplier = null;
if (grp.persistenceEnabled() && exchFut != null) {
List<UUID> nodeIds = exchFut.partitionHistorySupplier(grp.groupId(), p, part.initialUpdateCounter());
if (!F.isEmpty(nodeIds))
histSupplier = ctx.discovery().node(nodeIds.get(p % nodeIds.size()));
}
if (histSupplier != null && !exchFut.isClearingPartition(grp, p)) {
assert grp.persistenceEnabled();
assert remoteOwners(p, topVer).contains(histSupplier) : remoteOwners(p, topVer);
GridDhtPartitionDemandMessage msg = assignments.get(histSupplier);
if (msg == null) {
assignments.put(histSupplier, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), assignments.topologyVersion(), grp.groupId()));
}
// TODO FIXME https://issues.apache.org/jira/browse/IGNITE-11790
msg.partitions().addHistorical(p, part.initialUpdateCounter(), countersMap.updateCounter(p), partitions);
} else {
int partId = p;
List<ClusterNode> picked = remoteOwners(p, topVer, (node, owners) -> {
if (owners.size() == 1)
return true;
return exchFut == null || exchFut.isNodeApplicableForFullRebalance(node.id(), grp.groupId(), partId);
});
if (!picked.isEmpty()) {
ClusterNode n = picked.get(p % picked.size());
GridDhtPartitionDemandMessage msg = assignments.get(n);
if (msg == null) {
assignments.put(n, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), assignments.topologyVersion(), grp.groupId()));
}
msg.partitions().addFull(p);
}
}
}
}
if (!assignments.isEmpty()) {
if (exchFut != null && exchFut.rebalanced()) {
GridDhtPartitionDemandMessage first = assignments.values().iterator().next();
GridDhtLocalPartition locPart = grp.topology().localPartition(first.partitions().all().iterator().next());
SB buf = new SB(1024);
buf.a("Unexpected rebalance on rebalanced cluster: assignments=");
buf.a(assignments);
buf.a(", locPart=");
if (locPart != null)
locPart.dumpDebugInfo(buf);
else
buf.a("NA");
throw new AssertionError(buf.toString());
}
ctx.database().lastCheckpointInapplicableForWalRebalance(grp.groupId());
}
return assignments;
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class ClientCachePartitionsRequest method processCache.
/**
* Process cache and create new partition mapping, if it does not belong to any existent.
* @param ctx Connection context.
* @param groups Cache affinity groups.
* @param cacheGroupIds Map of known group IDs.
* @param affinityVer Affinity topology version.
* @param cacheDesc Cache descriptor.
* @return Null if cache was processed and new client cache partition awareness group if it does not belong to any
* existent.
*/
private static ClientCachePartitionAwarenessGroup processCache(ClientConnectionContext ctx, List<ClientCachePartitionAwarenessGroup> groups, Map<Integer, ClientCachePartitionAwarenessGroup> cacheGroupIds, ClientAffinityTopologyVersion affinityVer, DynamicCacheDescriptor cacheDesc) {
int cacheGroupId = cacheDesc.groupId();
int cacheId = cacheDesc.cacheId();
ClientCachePartitionAwarenessGroup group = cacheGroupIds.get(cacheGroupId);
if (group != null) {
// Cache group is found. It means that cache belongs to one of cache groups with known mapping.
// Just adding our cache to this group here.
group.addCache(cacheDesc);
return null;
}
AffinityAssignment assignment = getCacheAssignment(ctx, affinityVer, cacheId);
// If assignment is not available for the cache for required affinity version, ignore the cache.
if (assignment == null)
return null;
ClientCachePartitionMapping mapping = null;
if (isApplicable(cacheDesc.cacheConfiguration()))
mapping = new ClientCachePartitionMapping(cacheId, assignment);
group = getCompatibleGroup(groups, mapping);
if (group != null) {
group.addCache(cacheDesc);
cacheGroupIds.put(cacheGroupId, group);
return null;
}
CacheObjectBinaryProcessorImpl proc = (CacheObjectBinaryProcessorImpl) ctx.kernalContext().cacheObjects();
return new ClientCachePartitionAwarenessGroup(proc, mapping, cacheDesc);
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class CachePartitionStateTest method partitionState2.
/**
* @param backups Number of backups.
* @param crdAffNode If {@code false} cache is not created on coordinator.
* @throws Exception If failed.
*/
private void partitionState2(int backups, boolean crdAffNode) throws Exception {
startGrids(3);
blockSupplySend(DEFAULT_CACHE_NAME);
ccfg = cacheConfiguration(DEFAULT_CACHE_NAME, backups);
if (!crdAffNode)
ccfg.setNodeFilter(new TestCacheNodeExcludingFilter(getTestIgniteInstanceName(0)));
startGrid(4);
AffinityAssignment assign0 = grid(1).context().cache().internalCache(DEFAULT_CACHE_NAME).context().affinity().assignment(new AffinityTopologyVersion(4, 0));
awaitPartitionMapExchange(true, true, null, false);
checkPartitionsState(assign0, DEFAULT_CACHE_NAME, OWNING);
checkRebalance(DEFAULT_CACHE_NAME, true);
if (!crdAffNode)
ignite(0).cache(DEFAULT_CACHE_NAME);
checkPartitionsState(assign0, DEFAULT_CACHE_NAME, OWNING);
checkRebalance(DEFAULT_CACHE_NAME, true);
stopBlock();
startGrid(5);
AffinityAssignment assign1 = grid(1).context().cache().internalCache(DEFAULT_CACHE_NAME).context().affinity().assignment(new AffinityTopologyVersion(5, 1));
awaitPartitionMapExchange(true, true, null, false);
checkPartitionsState(assign1, DEFAULT_CACHE_NAME, OWNING);
checkRebalance(DEFAULT_CACHE_NAME, true);
}
Aggregations