use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GroupTrainerBaseProcessorTask method map.
/**
* {@inheritDoc}
*/
@Nullable
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable Void arg) throws IgniteException {
Map<ComputeJob, ClusterNode> res = new HashMap<>();
for (ClusterNode node : subgrid) {
BaseLocalProcessorJob<K, V, T, R> job = createJob();
res.put(job, node);
}
return res;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridReduceQueryExecutor method partitionedUnstableDataNodes.
/**
* Calculates partition mapping for partitioned cache on unstable topology.
*
* @param cacheIds Cache IDs.
* @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
*/
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
// If the main cache is replicated, just replace it with the first partitioned.
GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
final int partsCnt = cctx.affinity().partitions();
if (cacheIds.size() > 1) {
// Check correct number of partitions for partitioned caches.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
int parts = extraCctx.affinity().partitions();
if (parts != partsCnt)
throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
}
}
Set<ClusterNode>[] partLocs = new Set[partsCnt];
// Fill partition locations for main cache.
for (int p = 0; p < partsCnt; p++) {
List<ClusterNode> owners = cctx.topology().owners(p);
if (F.isEmpty(owners)) {
// Handle special case: no mapping is configured for a partition.
if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
// Mark unmapped partition.
partLocs[p] = UNMAPPED_PARTS;
continue;
} else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
}
partLocs[p] = new HashSet<>(owners);
}
if (cacheIds.size() > 1) {
// We need this for logical collocation between different partitioned caches with the same affinity.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
// This is possible if we have replaced a replicated cache with a partitioned one earlier.
if (cctx == extraCctx)
continue;
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
List<ClusterNode> owners = extraCctx.topology().owners(p);
if (partLocs[p] == UNMAPPED_PARTS)
// Skip unmapped partitions.
continue;
if (F.isEmpty(owners)) {
if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
}
if (partLocs[p] == null)
partLocs[p] = new HashSet<>(owners);
else {
// Intersection of owners.
partLocs[p].retainAll(owners);
if (partLocs[p].isEmpty())
// Intersection is empty -> retry.
return null;
}
}
}
// Filter nodes where not all the replicated caches loaded.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (!extraCctx.isReplicated())
continue;
Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
if (F.isEmpty(dataNodes))
// Retry.
return null;
for (Set<ClusterNode> partLoc : partLocs) {
if (partLoc == UNMAPPED_PARTS)
// Skip unmapped partition.
continue;
partLoc.retainAll(dataNodes);
if (partLoc.isEmpty())
// Retry.
return null;
}
}
}
// Collect the final partitions mapping.
Map<ClusterNode, IntArray> res = new HashMap<>();
// Here partitions in all IntArray's will be sorted in ascending order, this is important.
for (int p = 0; p < partLocs.length; p++) {
Set<ClusterNode> pl = partLocs[p];
// Skip unmapped partitions.
if (pl == UNMAPPED_PARTS)
continue;
assert !F.isEmpty(pl) : pl;
ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
IntArray parts = res.get(n);
if (parts == null)
res.put(n, parts = new IntArray());
parts.add(p);
}
return res;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class IgfsSizeSelfTest method primaryOrBackups.
/**
* Determine primary and backup node IDs for the given block key.
*
* @param key Block key.
* @return Collection of node IDs.
*/
private Collection<UUID> primaryOrBackups(IgfsBlockKey key) {
IgniteEx grid = grid(0);
Collection<UUID> ids = new HashSet<>();
for (ClusterNode node : grid.cluster().nodes()) {
if (grid.affinity(grid.igfsx(IGFS_NAME).configuration().getDataCacheConfiguration().getName()).isPrimaryOrBackup(node, key))
ids.add(node.id());
}
return ids;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridMapQueryExecutor method onMessage.
/**
* @param nodeId Node ID.
* @param msg Message.
*/
public void onMessage(UUID nodeId, Object msg) {
try {
assert msg != null;
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
// Node left, ignore.
return;
boolean processed = true;
if (msg instanceof GridH2QueryRequest)
onQueryRequest(node, (GridH2QueryRequest) msg);
else if (msg instanceof GridQueryNextPageRequest)
onNextPageRequest(node, (GridQueryNextPageRequest) msg);
else if (msg instanceof GridQueryCancelRequest)
onCancel(node, (GridQueryCancelRequest) msg);
else if (msg instanceof GridH2DmlRequest)
onDmlRequest(node, (GridH2DmlRequest) msg);
else
processed = false;
if (processed && log.isDebugEnabled())
log.debug("Processed request: " + nodeId + "->" + ctx.localNodeId() + " " + msg);
} catch (Throwable th) {
U.error(log, "Failed to process message: " + msg, th);
}
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridMergeIndexSorted method setSources.
/**
* {@inheritDoc}
*/
@Override
public void setSources(Collection<ClusterNode> nodes, int segmentsCnt) {
super.setSources(nodes, segmentsCnt);
streamsMap = U.newHashMap(nodes.size());
RowStream[] streams = new RowStream[nodes.size() * segmentsCnt];
int i = 0;
for (ClusterNode node : nodes) {
RowStream[] segments = new RowStream[segmentsCnt];
for (int s = 0; s < segmentsCnt; s++) streams[i++] = segments[s] = new RowStream();
if (streamsMap.put(node.id(), segments) != null)
throw new IllegalStateException();
}
it = new MergeStreamIterator(streams);
}
Aggregations