use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridReduceQueryExecutor method stableDataNodes.
/**
* @param isReplicatedOnly If we must only have replicated caches.
* @param topVer Topology version.
* @param cacheIds Participating cache IDs.
* @param parts Partitions.
* @return Data nodes or {@code null} if repartitioning started and we need to retry.
*/
private Map<ClusterNode, IntArray> stableDataNodes(boolean isReplicatedOnly, AffinityTopologyVersion topVer, List<Integer> cacheIds, int[] parts) {
GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(0));
Map<ClusterNode, IntArray> map = stableDataNodesMap(topVer, cctx, parts);
Set<ClusterNode> nodes = map.keySet();
if (F.isEmpty(map))
throw new CacheException("Failed to find data nodes for cache: " + cctx.name());
for (int i = 1; i < cacheIds.size(); i++) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
String extraCacheName = extraCctx.name();
if (extraCctx.isLocal())
// No consistency guaranties for local caches.
continue;
if (isReplicatedOnly && !extraCctx.isReplicated())
throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with partitioned tables [replicatedCache=" + cctx.name() + ", partitionedCache=" + extraCacheName + "]");
Set<ClusterNode> extraNodes = stableDataNodesMap(topVer, extraCctx, parts).keySet();
if (F.isEmpty(extraNodes))
throw new CacheException("Failed to find data nodes for cache: " + extraCacheName);
boolean disjoint;
if (extraCctx.isReplicated()) {
if (isReplicatedOnly) {
nodes.retainAll(extraNodes);
disjoint = map.isEmpty();
} else
disjoint = !extraNodes.containsAll(nodes);
} else
disjoint = !extraNodes.equals(nodes);
if (disjoint) {
if (isPreloadingActive(cacheIds))
// Retry.
return null;
else
throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
}
}
return map;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridReduceQueryExecutor method replicatedUnstableDataNodes.
/**
* Calculates data nodes for replicated caches on unstable topology.
*
* @param cacheIds Cache IDs.
* @return Collection of all data nodes owning all the caches or {@code null} for retry.
*/
private Collection<ClusterNode> replicatedUnstableDataNodes(List<Integer> cacheIds) {
int i = 0;
GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(i++));
// The main cache is allowed to be partitioned.
if (!cctx.isReplicated()) {
assert cacheIds.size() > 1 : "no extra replicated caches with partitioned main cache";
// Just replace the main cache with the first one extra.
cctx = cacheContext(cacheIds.get(i++));
assert cctx.isReplicated() : "all the extra caches must be replicated here";
}
Set<ClusterNode> nodes = replicatedUnstableDataNodes(cctx);
if (F.isEmpty(nodes))
// Retry.
return null;
for (; i < cacheIds.size(); i++) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
if (extraCctx.isLocal())
continue;
if (!extraCctx.isReplicated())
throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with tables in partitioned caches [replicatedCache=" + cctx.name() + ", " + "partitionedCache=" + extraCctx.name() + "]");
Set<ClusterNode> extraOwners = replicatedUnstableDataNodes(extraCctx);
if (F.isEmpty(extraOwners))
// Retry.
return null;
nodes.retainAll(extraOwners);
if (nodes.isEmpty())
// Retry.
return null;
}
return nodes;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class SparseDistributedMatrix method times.
/**
* {@inheritDoc}
*/
@Override
public Vector times(Vector vec) {
if (vec == null)
throw new IllegalArgumentException("The vector should be not null.");
if (columnSize() != vec.size())
throw new CardinalityException(columnSize(), vec.size());
SparseDistributedMatrix matrixA = this;
SparseDistributedVector vectorB = (SparseDistributedVector) vec;
String cacheName = storage().cacheName();
int rows = this.rowSize();
SparseDistributedVector vectorC = (SparseDistributedVector) likeVector(rows);
CacheUtils.bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
Affinity<RowColMatrixKey> affinity = ignite.affinity(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
SparseDistributedVectorStorage storageC = vectorC.storage();
Map<ClusterNode, Collection<RowColMatrixKey>> keysCToNodes = affinity.mapKeysToNodes(storageC.getAllKeys());
Collection<RowColMatrixKey> locKeys = keysCToNodes.get(locNode);
if (locKeys == null)
return;
// compute Cij locally on each node
// TODO: IGNITE:5114, exec in parallel
locKeys.forEach(key -> {
int idx = key.index();
Vector Aik = matrixA.getRow(idx);
vectorC.set(idx, Aik.times(vectorB).sum());
});
});
return vectorC;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridH2IndexBase method broadcastSegments.
/**
* @param qctx Query context.
* @param cctx Cache context.
* @param isLocalQry Local query flag.
* @return Collection of nodes for broadcasting.
*/
private List<SegmentKey> broadcastSegments(GridH2QueryContext qctx, GridCacheContext<?, ?> cctx, boolean isLocalQry) {
Map<UUID, int[]> partMap = qctx.partitionsMap();
List<ClusterNode> nodes;
if (isLocalQry) {
if (partMap != null && !partMap.containsKey(cctx.localNodeId()))
// Prevent remote index call for local queries.
return Collections.emptyList();
nodes = Collections.singletonList(cctx.localNode());
} else {
if (partMap == null)
nodes = new ArrayList<>(CU.affinityNodes(cctx, qctx.topologyVersion()));
else {
nodes = new ArrayList<>(partMap.size());
GridKernalContext ctx = kernalContext();
for (UUID nodeId : partMap.keySet()) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
throw new GridH2RetryException("Failed to find node.");
nodes.add(node);
}
}
if (F.isEmpty(nodes))
throw new GridH2RetryException("Failed to collect affinity nodes.");
}
int segmentsCount = segmentsCount();
List<SegmentKey> res = new ArrayList<>(nodes.size() * segmentsCount);
for (ClusterNode node : nodes) {
for (int seg = 0; seg < segmentsCount; seg++) res.add(new SegmentKey(node, seg));
}
return res;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridH2IndexBase method rangeSegment.
/**
* @param cctx Cache context.
* @param qctx Query context.
* @param affKeyObj Affinity key.
* @param isLocalQry Local query flag.
* @return Segment key for Affinity key.
*/
private SegmentKey rangeSegment(GridCacheContext<?, ?> cctx, GridH2QueryContext qctx, Object affKeyObj, boolean isLocalQry) {
assert affKeyObj != null && affKeyObj != EXPLICIT_NULL : affKeyObj;
ClusterNode node;
int partition = cctx.affinity().partition(affKeyObj);
if (isLocalQry) {
if (qctx.partitionsMap() != null) {
// If we have explicit partitions map, we have to use it to calculate affinity node.
UUID nodeId = qctx.nodeForPartition(partition, cctx);
if (!cctx.localNodeId().equals(nodeId))
// Prevent remote index call for local queries.
return null;
}
if (!cctx.affinity().primaryByKey(cctx.localNode(), partition, qctx.topologyVersion()))
return null;
node = cctx.localNode();
} else {
if (qctx.partitionsMap() != null) {
// If we have explicit partitions map, we have to use it to calculate affinity node.
UUID nodeId = qctx.nodeForPartition(partition, cctx);
node = cctx.discovery().node(nodeId);
} else
// Get primary node for current topology version.
node = cctx.affinity().primaryByKey(affKeyObj, qctx.topologyVersion());
if (// Node was not found, probably topology changed and we need to retry the whole query.
node == null)
throw new GridH2RetryException("Failed to find node.");
}
return new SegmentKey(node, segmentForPartition(partition));
}
Aggregations