use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgnitePdsCacheRebalancingAbstractTest method testPartitionCounterConsistencyOnUnstableTopology.
/**
* @throws Exception If failed
*/
@Test
public void testPartitionCounterConsistencyOnUnstableTopology() throws Exception {
Ignite ig = startGridsMultiThreaded(4);
ig.cluster().active(true);
int keys = 0;
try (IgniteDataStreamer<Object, Object> ds = ig.dataStreamer(CACHE)) {
ds.allowOverwrite(true);
for (; keys < 10_000; keys++) ds.addData(keys, keys);
}
assertPartitionsSame(idleVerify(grid(0), CACHE));
for (int it = 0; it < SF.applyLB(10, 3); it++) {
final int it0 = it;
IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
try {
int dataLoadTimeout = SF.applyLB(500, 250);
stopGrid(3);
// Wait for data load.
U.sleep(dataLoadTimeout);
startGrid(3);
// Wait for data load.
U.sleep(dataLoadTimeout);
if (it0 % 2 != 0) {
stopGrid(2);
// Wait for data load.
U.sleep(dataLoadTimeout);
startGrid(2);
}
awaitPartitionMapExchange();
} catch (Exception e) {
error("Unable to start/stop grid", e);
throw new RuntimeException(e);
}
});
IgniteCache<Object, Object> cache = ig.cache(CACHE);
while (!fut.isDone()) {
int nextKeys = keys + 10;
for (; keys < nextKeys; keys++) cache.put(keys, keys);
}
fut.get();
log.info("Checking data...");
Map<Integer, Long> cntrs = new HashMap<>();
for (int g = 0; g < 4; g++) {
IgniteEx ig0 = grid(g);
for (GridDhtLocalPartition part : ig0.cachex(CACHE).context().topology().currentLocalPartitions()) {
if (cntrs.containsKey(part.id()))
assertEquals(String.valueOf(part.id()), (long) cntrs.get(part.id()), part.updateCounter());
else
cntrs.put(part.id(), part.updateCounter());
}
IgniteCache<Integer, String> ig0cache = ig0.cache(CACHE);
for (Cache.Entry<Integer, String> entry : ig0cache.query(new ScanQuery<Integer, String>())) assertEquals(entry.getKey() + " " + g, entry.getKey(), entry.getValue());
}
assertEquals(ig.affinity(CACHE).partitions(), cntrs.size());
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgnitePdsSpuriousRebalancingOnNodeJoinTest method testNoSpuriousRebalancing.
/**
*/
@SuppressWarnings("ConstantConditions")
@Test
public void testNoSpuriousRebalancing() throws Exception {
try {
IgniteEx crd = startGrids(2);
crd.cluster().active(true);
crd.cluster().baselineAutoAdjustEnabled(false);
List<Integer> moving = movingKeysAfterJoin(crd, DEFAULT_CACHE_NAME, 10);
int[] primParts = crd.affinity(DEFAULT_CACHE_NAME).primaryPartitions(crd.localNode());
Arrays.sort(primParts);
// This partition will be new primary on joining node.
int primChangePartId = -1;
for (int id : moving) {
if (Arrays.binarySearch(primParts, id) >= 0) {
primChangePartId = id;
break;
}
}
assertTrue(primChangePartId != -1);
startGrid(2);
// Trigger partition movement.
resetBaselineTopology();
awaitPartitionMapExchange();
GridCacheContext<Object, Object> ctx = crd.cachex(DEFAULT_CACHE_NAME).context();
AffinityAssignment a0 = ctx.affinity().assignment(new AffinityTopologyVersion(3, 1));
List<ClusterNode> nodes = a0.get(primChangePartId);
assertEquals(3, nodes.size());
assertEquals(crd.configuration().getConsistentId(), nodes.get(0).consistentId());
awaitPartitionMapExchange();
for (int k = 0; k < PARTS * 2; k++) crd.cache(DEFAULT_CACHE_NAME).put(k, k);
forceCheckpoint();
stopGrid(2);
// Forge the counter on coordinator for switching partition.
GridDhtLocalPartition part = ctx.topology().localPartition(primChangePartId);
assertNotNull(part);
PartitionUpdateCounter cntr0 = part.dataStore().partUpdateCounter();
assertTrue(cntr0 instanceof PartitionUpdateCounterErrorWrapper);
PartitionUpdateCounterTrackingImpl delegate = U.field(cntr0, "delegate");
AtomicLong cntr = U.field(delegate, "cntr");
cntr.set(cntr.get() - 1);
TestRecordingCommunicationSpi.spi(crd).record((node, msg) -> msg instanceof GridDhtPartitionDemandMessage);
startGrid(2);
awaitPartitionMapExchange();
// Expecting no rebalancing.
List<Object> msgs = TestRecordingCommunicationSpi.spi(crd).recordedMessages(true);
assertTrue("Rebalancing is not expected " + msgs, msgs.isEmpty());
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class PartitionReservationManager method reservePartitions.
/**
* @param cacheIds Cache IDs.
* @param reqTopVer Topology version from request.
* @param explicitParts Explicit partitions list.
* @param nodeId Node ID.
* @param reqId Request ID.
* @return String which is null in case of success or with causeMessage if failed
* @throws IgniteCheckedException If failed.
*/
public PartitionReservation reservePartitions(@Nullable List<Integer> cacheIds, AffinityTopologyVersion reqTopVer, final int[] explicitParts, UUID nodeId, long reqId) throws IgniteCheckedException {
try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_PARTITIONS_RESERVE, MTC.span()))) {
assert reqTopVer != null;
AffinityTopologyVersion topVer = ctx.cache().context().exchange().lastAffinityChangedTopologyVersion(reqTopVer);
if (F.isEmpty(cacheIds))
return new PartitionReservation(Collections.emptyList());
Collection<Integer> partIds;
if (explicitParts == null)
partIds = null;
else if (explicitParts.length == 0)
partIds = Collections.emptyList();
else {
partIds = new ArrayList<>(explicitParts.length);
for (int explicitPart : explicitParts) partIds.add(explicitPart);
}
List<GridReservable> reserved = new ArrayList<>();
for (int i = 0; i < cacheIds.size(); i++) {
GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(cacheIds.get(i));
// Cache was not found, probably was not deployed yet.
if (cctx == null) {
return new PartitionReservation(reserved, String.format("Failed to reserve partitions for query (cache is not " + "found on local node) [localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s]", ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i)));
}
if (cctx.isLocal() || !cctx.rebalanceEnabled())
continue;
// For replicated cache topology version does not make sense.
final PartitionReservationKey grpKey = new PartitionReservationKey(cctx.name(), cctx.isReplicated() ? null : topVer);
GridReservable r = reservations.get(grpKey);
if (explicitParts == null && r != null) {
// Try to reserve group partition if any and no explicits.
if (r != REPLICATED_RESERVABLE) {
if (!r.reserve())
return new PartitionReservation(reserved, String.format("Failed to reserve partitions for query (group " + "reservation failed) [localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " + "cacheName=%s]", ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i), cctx.name()));
reserved.add(r);
MTC.span().addLog(() -> "Cache partitions were reserved " + r);
}
} else {
// Try to reserve partitions one by one.
int partsCnt = cctx.affinity().partitions();
if (cctx.isReplicated()) {
// Check all the partitions are in owning state for replicated cache.
if (r == null) {
// Check only once.
for (int p = 0; p < partsCnt; p++) {
GridDhtLocalPartition part = partition(cctx, p);
// We don't need to reserve partitions because they will not be evicted in replicated caches.
GridDhtPartitionState partState = part != null ? part.state() : null;
if (partState != OWNING)
return new PartitionReservation(reserved, String.format("Failed to reserve partitions for " + "query (partition of REPLICATED cache is not in OWNING state) [" + "localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " + "cacheName=%s, part=%s, partFound=%s, partState=%s]", ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i), cctx.name(), p, (part != null), partState));
}
// Mark that we checked this replicated cache.
reservations.putIfAbsent(grpKey, REPLICATED_RESERVABLE);
MTC.span().addLog(() -> "Cache partitions were reserved [cache=" + cctx.name() + ", partitions=[0.." + partsCnt + ']');
}
} else {
// Reserve primary partitions for partitioned cache (if no explicit given).
if (explicitParts == null)
partIds = cctx.affinity().primaryPartitions(ctx.localNodeId(), topVer);
int reservedCnt = 0;
for (int partId : partIds) {
GridDhtLocalPartition part = partition(cctx, partId);
GridDhtPartitionState partState = part != null ? part.state() : null;
if (partState != OWNING) {
if (partState == LOST)
failQueryOnLostData(cctx, part);
else {
return new PartitionReservation(reserved, String.format("Failed to reserve partitions " + "for query (partition of PARTITIONED cache is not found or not in OWNING " + "state) [localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " + "cacheName=%s, part=%s, partFound=%s, partState=%s]", ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i), cctx.name(), partId, (part != null), partState));
}
}
if (!part.reserve()) {
return new PartitionReservation(reserved, String.format("Failed to reserve partitions for query " + "(partition of PARTITIONED cache cannot be reserved) [" + "localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " + "cacheName=%s, part=%s, partFound=%s, partState=%s]", ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i), cctx.name(), partId, true, partState));
}
reserved.add(part);
reservedCnt++;
// Double check that we are still in owning state and partition contents are not cleared.
partState = part.state();
if (partState != OWNING) {
if (partState == LOST)
failQueryOnLostData(cctx, part);
else {
return new PartitionReservation(reserved, String.format("Failed to reserve partitions for " + "query (partition of PARTITIONED cache is not in OWNING state after " + "reservation) [localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, " + "cacheId=%s, cacheName=%s, part=%s, partState=%s]", ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i), cctx.name(), partId, partState));
}
}
}
final Collection<Integer> finalPartIds = partIds;
MTC.span().addLog(() -> "Cache partitions were reserved [cache=" + cctx.name() + ", partitions=" + finalPartIds + ", topology=" + topVer + ']');
if (explicitParts == null && reservedCnt > 0) {
// We reserved all the primary partitions for cache, attempt to add group reservation.
GridDhtPartitionsReservation grp = new GridDhtPartitionsReservation(topVer, cctx, "SQL");
if (grp.register(reserved.subList(reserved.size() - reservedCnt, reserved.size()))) {
if (reservations.putIfAbsent(grpKey, grp) != null)
throw new IllegalStateException("Reservation already exists.");
grp.onPublish(new CI1<GridDhtPartitionsReservation>() {
@Override
public void apply(GridDhtPartitionsReservation r) {
reservations.remove(grpKey, r);
}
});
}
}
}
}
}
return new PartitionReservation(reserved);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method checkUpdateCounters.
/**
* Checks update counter value on all nodes.
*
* @param cacheName Cache name.
* @param p Part number.
* @param val Expected partition counter value.
*/
private void checkUpdateCounters(String cacheName, int p, long val) {
for (Ignite node : G.allGrids()) {
if (!node.configuration().isClientMode()) {
IgniteCacheProxy cache = (IgniteCacheProxy) node.cache(cacheName);
GridDhtLocalPartition part = cache.context().topology().localPartition(p);
if (!cache.context().mvccEnabled() || part == null)
continue;
assertEquals("part=" + p, val, part.updateCounter());
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method getUpdateCounter.
/**
* @param node Node.
* @param key Key.
* @return Extracts update counter of partition which key belongs to.
*/
private long getUpdateCounter(IgniteEx node, Integer key) {
int partId = node.cachex(DEFAULT_CACHE_NAME).context().affinity().partition(key);
GridDhtLocalPartition part = node.cachex(DEFAULT_CACHE_NAME).context().dht().topology().localPartition(partId);
assert part != null;
return part.updateCounter();
}
Aggregations