use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheAffinityApiSelfTest method testPrimaryPartitionsOneNode.
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testPrimaryPartitionsOneNode() throws Exception {
AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(new ArrayList<>(grid(0).cluster().nodes()), null, null, new AffinityTopologyVersion(1), 1);
List<List<ClusterNode>> assignment = affinity().assignPartitions(ctx);
for (ClusterNode node : grid(0).cluster().nodes()) {
int[] parts = grid(0).affinity(DEFAULT_CACHE_NAME).primaryPartitions(node);
assert !F.isEmpty(parts);
for (int p : parts) {
Collection<ClusterNode> owners = nodes(assignment, p);
assert !F.isEmpty(owners);
ClusterNode primary = F.first(owners);
assert F.eqNodes(node, primary);
}
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheAffinityApiSelfTest method testBackupPartitions.
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testBackupPartitions() throws Exception {
// Pick 2 nodes and create a projection over them.
ClusterNode n0 = grid(0).localNode();
// Get backup partitions without explicitly specified levels.
int[] parts = grid(0).affinity(DEFAULT_CACHE_NAME).backupPartitions(n0);
assert !F.isEmpty(parts);
AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(new ArrayList<>(grid(0).cluster().nodes()), null, null, new AffinityTopologyVersion(1), 1);
List<List<ClusterNode>> assignment = affinity().assignPartitions(ctx);
for (int p : parts) {
Collection<ClusterNode> owners = new ArrayList<>(nodes(assignment, p));
assert !F.isEmpty(owners);
// Remove primary.
Iterator<ClusterNode> iter = owners.iterator();
iter.next();
iter.remove();
assert owners.contains(n0);
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheAffinityApiSelfTest method testMapPartitionsToNode.
/**
* JUnit.
*
* @throws Exception If failed.
*/
public void testMapPartitionsToNode() throws Exception {
Map<Integer, ClusterNode> map = grid(0).affinity(DEFAULT_CACHE_NAME).mapPartitionsToNodes(F.asList(0, 1, 5, 19, 12));
AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(new ArrayList<>(grid(0).cluster().nodes()), null, null, new AffinityTopologyVersion(1), 1);
AffinityFunction aff = affinity();
List<List<ClusterNode>> assignment = aff.assignPartitions(ctx);
for (Map.Entry<Integer, ClusterNode> e : map.entrySet()) assert F.eqNodes(F.first(nodes(assignment, aff, e.getKey())), e.getValue());
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class IgniteCachePartitionLossPolicySelfTest method checkLostPartition.
/**
* @param canWrite {@code True} if writes are allowed.
* @param safe {@code True} if lost partition should trigger exception.
* @throws Exception if failed.
*/
private void checkLostPartition(boolean canWrite, boolean safe) throws Exception {
assert partLossPlc != null;
int part = prepareTopology();
// Wait for all grids (servers and client) have same topology version
// to make sure that all nodes received map with lost partition.
GridTestUtils.waitForCondition(() -> {
AffinityTopologyVersion last = null;
for (Ignite ig : G.allGrids()) {
AffinityTopologyVersion ver = ((IgniteEx) ig).context().cache().context().exchange().readyAffinityVersion();
if (last != null && !last.equals(ver))
return false;
last = ver;
}
return true;
}, 10000);
for (Ignite ig : G.allGrids()) {
info("Checking node: " + ig.cluster().localNode().id());
IgniteCache<Integer, Integer> cache = ig.cache(CACHE_NAME);
verifyCacheOps(canWrite, safe, part, ig);
// Check we can read and write to lost partition in recovery mode.
IgniteCache<Integer, Integer> recoverCache = cache.withPartitionRecover();
for (int lostPart : recoverCache.lostPartitions()) {
recoverCache.get(lostPart);
recoverCache.put(lostPart, lostPart);
}
// Check that writing in recover mode does not clear partition state.
verifyCacheOps(canWrite, safe, part, ig);
}
// Check that partition state does not change after we start a new node.
IgniteEx grd = startGrid(3);
info("Newly started node: " + grd.cluster().localNode().id());
for (Ignite ig : G.allGrids()) verifyCacheOps(canWrite, safe, part, ig);
ignite(0).resetLostPartitions(Collections.singletonList(CACHE_NAME));
awaitPartitionMapExchange(true, true, null);
for (Ignite ig : G.allGrids()) {
IgniteCache<Integer, Integer> cache = ig.cache(CACHE_NAME);
assertTrue(cache.lostPartitions().isEmpty());
int parts = ig.affinity(CACHE_NAME).partitions();
for (int i = 0; i < parts; i++) {
cache.get(i);
cache.put(i, i);
}
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class CacheScanPartitionQueryFallbackSelfTest method remotePartition.
/**
* @param cctx Cctx.
* @return Remote partition.
*/
private IgniteBiTuple<Integer, UUID> remotePartition(final GridCacheContext cctx) {
ClusterNode node = F.first(cctx.kernalContext().grid().cluster().forRemotes().nodes());
GridCacheAffinityManager affMgr = cctx.affinity();
AffinityTopologyVersion topVer = affMgr.affinityTopologyVersion();
Set<Integer> parts = affMgr.primaryPartitions(node.id(), topVer);
return new IgniteBiTuple<>(F.first(parts), node.id());
}
Aggregations