use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class SingleSplitTestTask method map.
/**
* {@inheritDoc}
*/
@NotNull
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Integer arg) {
assert !subgrid.isEmpty() : "Subgrid cannot be empty.";
Map<ComputeJobAdapter, ClusterNode> jobs = new HashMap<>(subgrid.size());
taskSes.setAttribute("1st", "1");
taskSes.setAttribute("2nd", "2");
Collection<UUID> assigned = new ArrayList<>(subgrid.size());
for (int i = 0; i < arg; i++) {
ComputeJobAdapter job = new ComputeJobAdapter(1) {
/**
*/
@TaskSessionResource
private ComputeTaskSession jobSes;
/**
* {@inheritDoc}
*/
@Override
public Object execute() {
assert jobSes != null;
Integer arg = this.<Integer>argument(0);
assert arg != null;
return new SingleSplitTestJobTarget().executeLoadTestJob(arg, jobSes);
}
};
ClusterNode node = balancer.getBalancedNode(job, null);
assert node != null;
assigned.add(node.id());
jobs.put(job, node);
}
taskSes.setAttribute("nodes", assigned);
return jobs;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridAffinityAssignmentJolBenchmark method measure.
/**
* @param aff Aff.
* @param parts Parts.
* @param nodeCnt Node count.
* @param backups Backups.
*/
private static void measure(RendezvousAffinityFunction aff, int parts, int nodeCnt, int backups) throws Exception {
List<ClusterNode> nodes = new ArrayList<>();
for (int i = 0; i < nodeCnt; i++) {
ClusterNode node = node(i);
nodes.add(node);
}
AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(nodes, new ArrayList<>(), new DiscoveryEvent(), new AffinityTopologyVersion(), backups);
List<List<ClusterNode>> assignment = aff.assignPartitions(ctx);
setOptimization(false);
GridAffinityAssignmentV2 ga = new GridAffinityAssignmentV2(new AffinityTopologyVersion(1, 0), assignment, new ArrayList<>());
System.gc();
long totalSize = GraphLayout.parseInstance(ga).totalSize();
System.out.println("Optimized, parts " + parts + " nodeCount " + nodeCnt + " backups " + backups + " " + totalSize);
setOptimization(true);
GridAffinityAssignmentV2 ga2 = new GridAffinityAssignmentV2(new AffinityTopologyVersion(1, 0), assignment, new ArrayList<>());
System.gc();
long totalSize2 = GraphLayout.parseInstance(ga2).totalSize();
System.out.println("Deoptimized, parts " + parts + " nodeCount " + nodeCnt + " backups " + backups + " " + totalSize2);
if (totalSize > totalSize2)
throw new Exception("Optimized AffinityAssignment size " + totalSize + " is more than deoptimized " + totalSize2);
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheAffinitySharedManager method initAffinityBasedOnPartitionsAvailability.
/**
* Initializes current affinity assignment based on partitions availability. Nodes that have most recent data will
* be considered affinity nodes.
*
* @param topVer Topology version.
* @param fut Exchange future.
* @param c Closure converting affinity diff.
* @param initAff {@code True} if need initialize affinity.
* @return Affinity assignment for each of registered cache group.
*/
private <T> Map<Integer, Map<Integer, List<T>>> initAffinityBasedOnPartitionsAvailability(final AffinityTopologyVersion topVer, final GridDhtPartitionsExchangeFuture fut, final IgniteClosure<ClusterNode, T> c, final boolean initAff) {
final boolean enforcedCentralizedAssignment = DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(fut.firstEvent());
final WaitRebalanceInfo waitRebalanceInfo = enforcedCentralizedAssignment ? new WaitRebalanceInfo(fut.exchangeId().topologyVersion()) : new WaitRebalanceInfo(fut.context().events().lastServerEventVersion());
final Collection<ClusterNode> evtNodes = fut.context().events().discoveryCache().serverNodes();
final Map<Integer, Map<Integer, List<T>>> assignment = new ConcurrentHashMap<>();
forAllRegisteredCacheGroups(new IgniteInClosureX<CacheGroupDescriptor>() {
@Override
public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException {
CacheGroupHolder grpHolder = getOrCreateGroupHolder(topVer, desc);
if (!grpHolder.rebalanceEnabled || (fut.cacheGroupAddedOnExchange(desc.groupId(), desc.receivedFrom()) && !enforcedCentralizedAssignment))
return;
AffinityTopologyVersion affTopVer = grpHolder.affinity().lastVersion();
assert (affTopVer.topologyVersion() > 0 && !affTopVer.equals(topVer)) || enforcedCentralizedAssignment : "Invalid affinity version [last=" + affTopVer + ", futVer=" + topVer + ", grp=" + desc.cacheOrGroupName() + ']';
List<List<ClusterNode>> curAssignment = grpHolder.affinity().assignments(affTopVer);
List<List<ClusterNode>> newAssignment = grpHolder.affinity().idealAssignmentRaw();
assert newAssignment != null;
List<List<ClusterNode>> newAssignment0 = initAff ? new ArrayList<>(newAssignment) : null;
GridDhtPartitionTopology top = grpHolder.topology(fut.context().events().discoveryCache());
Map<Integer, List<T>> cacheAssignment = null;
for (int p = 0; p < newAssignment.size(); p++) {
List<ClusterNode> newNodes = newAssignment.get(p);
List<ClusterNode> curNodes = curAssignment.get(p);
assert evtNodes.containsAll(newNodes) : "Invalid new assignment [grp=" + grpHolder.aff.cacheOrGroupName() + ", nodes=" + newNodes + ", topVer=" + fut.context().events().discoveryCache().version() + ", evts=" + fut.context().events().events() + "]";
ClusterNode curPrimary = !curNodes.isEmpty() ? curNodes.get(0) : null;
ClusterNode newPrimary = !newNodes.isEmpty() ? newNodes.get(0) : null;
List<ClusterNode> newNodes0 = null;
assert newPrimary == null || evtNodes.contains(newPrimary) : "Invalid new primary [" + "grp=" + desc.cacheOrGroupName() + ", node=" + newPrimary + ", topVer=" + topVer + ']';
List<ClusterNode> owners = top.owners(p, topVer);
// It is essential that curPrimary node has partition in OWNING state.
if (!owners.isEmpty() && !owners.contains(curPrimary))
curPrimary = owners.get(0);
// If new assignment is empty preserve current ownership for alive nodes.
if (curPrimary != null && newPrimary == null) {
newNodes0 = new ArrayList<>(curNodes.size());
for (ClusterNode node : curNodes) {
if (evtNodes.contains(node))
newNodes0.add(node);
}
} else if (curPrimary != null && !curPrimary.equals(newPrimary)) {
GridDhtPartitionState state = top.partitionState(newPrimary.id(), p);
if (evtNodes.contains(curPrimary)) {
if (state != OWNING) {
newNodes0 = latePrimaryAssignment(grpHolder.affinity(), p, curPrimary, newNodes, waitRebalanceInfo);
}
} else {
if (state != OWNING) {
for (int i = 1; i < curNodes.size(); i++) {
ClusterNode curNode = curNodes.get(i);
if (top.partitionState(curNode.id(), p) == OWNING && evtNodes.contains(curNode)) {
newNodes0 = latePrimaryAssignment(grpHolder.affinity(), p, curNode, newNodes, waitRebalanceInfo);
break;
}
}
if (newNodes0 == null) {
for (ClusterNode owner : owners) {
if (evtNodes.contains(owner)) {
newNodes0 = latePrimaryAssignment(grpHolder.affinity(), p, owner, newNodes, waitRebalanceInfo);
break;
}
}
}
}
}
}
// This will happen if no primary has changed but some backups still need to be rebalanced.
if (!owners.isEmpty() && !owners.containsAll(newNodes) && !top.lostPartitions().contains(p))
waitRebalanceInfo.add(grpHolder.groupId(), p, newNodes);
if (newNodes0 != null) {
assert evtNodes.containsAll(newNodes0) : "Invalid late assignment [grp=" + grpHolder.aff.cacheOrGroupName() + ", nodes=" + newNodes + ", topVer=" + fut.context().events().discoveryCache().version() + ", evts=" + fut.context().events().events() + "]";
if (newAssignment0 != null)
newAssignment0.set(p, newNodes0);
if (cacheAssignment == null)
cacheAssignment = new HashMap<>();
List<T> n = new ArrayList<>(newNodes0.size());
for (int i = 0; i < newNodes0.size(); i++) n.add(c.apply(newNodes0.get(i)));
cacheAssignment.put(p, n);
}
}
if (cacheAssignment != null)
assignment.put(grpHolder.groupId(), cacheAssignment);
if (initAff)
grpHolder.affinity().initialize(topVer, newAssignment0);
fut.timeBag().finishLocalStage("Affinity recalculation (partitions availability) " + "[grp=" + desc.cacheOrGroupName() + "]");
}
});
if (log.isDebugEnabled()) {
log.debug("Computed new affinity after node left [topVer=" + topVer + ", waitGrps=" + groupNames(waitRebalanceInfo.waitGrps.keySet()) + ']');
}
synchronized (mux) {
waitInfo = !waitRebalanceInfo.empty() ? waitRebalanceInfo : null;
}
return assignment;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheAffinitySharedManager method toNodes.
/**
* @param topVer Topology version.
* @param ids IDs.
* @return Nodes.
*/
private List<ClusterNode> toNodes(AffinityTopologyVersion topVer, List<UUID> ids) {
List<ClusterNode> nodes = new ArrayList<>(ids.size());
for (int i = 0; i < ids.size(); i++) {
UUID id = ids.get(i);
ClusterNode node = cctx.discovery().node(topVer, id);
assert node != null : "Failed to get node [id=" + id + ", topVer=" + topVer + ", locNode=" + cctx.localNode() + ", allNodes=" + cctx.discovery().nodes(topVer) + ']';
nodes.add(node);
}
return nodes;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheAffinitySharedManager method processClientCachesRequests.
/**
* Process non affinity node cache start/close requests, called from exchange thread.
*
* @param msg Change request.
*/
void processClientCachesRequests(ClientCacheChangeDummyDiscoveryMessage msg) {
// Get ready exchange version.
AffinityTopologyVersion topVer = cctx.exchange().readyAffinityVersion();
DiscoCache discoCache = cctx.discovery().discoCache(topVer);
ClusterNode node = discoCache.oldestAliveServerNode();
// Resolve coordinator for specific version.
boolean crd = node != null && node.isLocal();
Map<Integer, Boolean> startedCaches = null;
Set<Integer> closedCaches = null;
// Check and start caches via dummy message.
if (msg.startRequests() != null)
startedCaches = processClientCacheStartRequests(crd, msg, topVer, discoCache);
// Check and close caches via dummy message.
if (msg.cachesToClose() != null)
closedCaches = processCacheCloseRequests(msg, topVer);
// Shedule change message.
if (startedCaches != null || closedCaches != null)
scheduleClientChangeMessage(startedCaches, closedCaches);
}
Aggregations