use of org.apache.ignite.internal.processors.cache.PartitionUpdateCounter in project ignite by apache.
the class IdleVerifyUtility method getUpdateCountersSnapshot.
/**
* Gather updateCounters info.
* Holds {@link org.apache.ignite.internal.processors.cache.PartitionUpdateCounter#copy} of update counters.
*
* @param ign Ignite instance.
* @param grpIds Group Id`s.
* @return Current groups distribution with update counters per partitions.
*/
public static Map<Integer, Map<Integer, PartitionUpdateCounter>> getUpdateCountersSnapshot(IgniteEx ign, Set<Integer> grpIds) {
Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCountersPerGrp = new HashMap<>();
for (Integer grpId : grpIds) {
CacheGroupContext grpCtx = ign.context().cache().cacheGroup(grpId);
if (grpCtx == null)
throw new GridNotIdleException("Group not found: " + grpId + "." + " Possible reasons: rebalance in progress or concurrent cache destroy.");
GridDhtPartitionTopology top = grpCtx.topology();
Map<Integer, PartitionUpdateCounter> partsWithCounters = partsWithCountersPerGrp.computeIfAbsent(grpId, k -> new HashMap<>());
for (GridDhtLocalPartition part : top.currentLocalPartitions()) {
if (part.state() != GridDhtPartitionState.OWNING)
continue;
@Nullable PartitionUpdateCounter updCntr = part.dataStore().partUpdateCounter();
partsWithCounters.put(part.id(), updCntr == null ? null : updCntr.copy());
}
}
return partsWithCountersPerGrp;
}
use of org.apache.ignite.internal.processors.cache.PartitionUpdateCounter in project ignite by apache.
the class CachePartitionLostAfterSupplierHasLeftTest method doTestPartitionLostWhileClearing.
/**
* Test scenario: last supplier has left while a partition on demander is cleared before sending first demand request.
*
* @param cnt Nodes count.
* @param mode Test mode: 0 - reset while clearing, 1 - restart while clearing and activate all, 2 - restart while
* clearing and activate in wrong order.
* @throws Exception If failed.
*/
private void doTestPartitionLostWhileClearing(int cnt, int mode) throws Exception {
IgniteEx crd = startGrids(cnt);
crd.cluster().baselineAutoAdjustEnabled(false);
crd.cluster().active(true);
int partId = -1;
int idx0 = 0;
int idx1 = 1;
for (int p = 0; p < PARTS_CNT; p++) {
List<ClusterNode> nodes = new ArrayList<>(crd.affinity(DEFAULT_CACHE_NAME).mapPartitionToPrimaryAndBackups(p));
if (grid(nodes.get(0)) == grid(idx0) && grid(nodes.get(1)) == grid(idx1)) {
partId = p;
break;
}
}
assertTrue(partId >= 0);
final int keysCnt = 10_010;
List<Integer> keys = partitionKeys(grid(idx0).cache(DEFAULT_CACHE_NAME), partId, keysCnt, 0);
load(grid(idx0), DEFAULT_CACHE_NAME, keys.subList(0, keysCnt - 10));
stopGrid(idx1);
load(grid(idx0), DEFAULT_CACHE_NAME, keys.subList(keysCnt - 10, keysCnt));
TestRecordingCommunicationSpi.spi(grid(0)).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode clusterNode, Message msg) {
if (msg instanceof GridDhtPartitionSupplyMessage) {
GridDhtPartitionSupplyMessage msg0 = (GridDhtPartitionSupplyMessage) msg;
return msg0.groupId() == CU.cacheId(DEFAULT_CACHE_NAME);
}
return false;
}
});
IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(idx1));
((TestRecordingCommunicationSpi) cfg.getCommunicationSpi()).blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage demandMsg = (GridDhtPartitionDemandMessage) msg;
return CU.cacheId(DEFAULT_CACHE_NAME) == demandMsg.groupId();
}
return false;
});
IgniteEx g1 = startGrid(optimize(cfg));
// Stop supplier in the middle of rebalancing.
stopGrid(idx0);
TestRecordingCommunicationSpi.spi(g1).stopBlock();
final GridDhtLocalPartition part = g1.cachex(DEFAULT_CACHE_NAME).context().topology().localPartition(partId);
assertTrue("Unexpected partition state [p=" + partId + ", expected=" + GridDhtPartitionState.LOST + ", actual=" + part.state() + ']', GridTestUtils.waitForCondition(() -> part.state() == GridDhtPartitionState.LOST, 30_000));
assertTrue(g1.cachex(DEFAULT_CACHE_NAME).lostPartitions().contains(partId));
if (mode != 0) {
stopAllGrids();
if (mode == 1) {
crd = startGrids(cnt);
crd.cluster().active(true);
} else if (mode == 2) {
crd = startGrid(idx1);
crd.cluster().active(true);
startGrid(idx0);
} else
fail("Mode: " + mode);
awaitPartitionMapExchange();
assertPartitionsSame(idleVerify(crd, DEFAULT_CACHE_NAME));
} else {
// Will own a clearing partition.
g1.resetLostPartitions(Collections.singletonList(DEFAULT_CACHE_NAME));
awaitPartitionMapExchange();
// Expecting partition in OWNING state.
final PartitionUpdateCounter cntr = counter(partId, DEFAULT_CACHE_NAME, g1.name());
assertNotNull(cntr);
// Counter must be reset.
assertEquals(0, cntr.get());
// Puts done concurrently with clearing after reset should not be lost.
g1.cache(DEFAULT_CACHE_NAME).putAll(keys.stream().collect(Collectors.toMap(k -> k, v -> -1)));
GridTestUtils.waitForCondition(() -> g1.context().cache().context().evict().total() == 0, 30_000);
for (Integer key : keys) assertEquals("key=" + key.toString(), -1, g1.cache(DEFAULT_CACHE_NAME).get(key));
}
}
use of org.apache.ignite.internal.processors.cache.PartitionUpdateCounter in project ignite by apache.
the class TxPartitionCounterStateConsistencyTest method testPartitionConsistencyDuringRebalanceAndConcurrentUpdates_TxDuringPME.
/**
* Tests tx load concurrently with PME not changing tx topology.
* In such scenario a race is possible with tx updates and PME counters set.
* Outdated counters on PME should be ignored.
*/
@Test
public void testPartitionConsistencyDuringRebalanceAndConcurrentUpdates_TxDuringPME() throws Exception {
backups = 2;
Ignite crd = startGrid(0);
startGrid(1);
startGrid(2);
crd.cluster().active(true);
Ignite client = startClientGrid(CLIENT_GRID_NAME);
IgniteCache<Object, Object> cache = client.cache(DEFAULT_CACHE_NAME);
// Put one key per partition.
try (IgniteDataStreamer<Object, Object> streamer = client.dataStreamer(DEFAULT_CACHE_NAME)) {
for (int k = 0; k < partitions(); k++) streamer.addData(k, 0);
}
Integer key0 = primaryKey(grid(1).cache(DEFAULT_CACHE_NAME));
Integer key = primaryKey(grid(0).cache(DEFAULT_CACHE_NAME));
TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(crd);
crdSpi.blockMessages((node, message) -> {
if (message instanceof GridDhtPartitionsFullMessage) {
GridDhtPartitionsFullMessage tmp = (GridDhtPartitionsFullMessage) message;
return tmp.exchangeId() != null;
}
return false;
});
// Locks mapped wait.
CountDownLatch l = new CountDownLatch(1);
IgniteInternalFuture startNodeFut = GridTestUtils.runAsync(() -> {
U.awaitQuiet(l);
try {
// Start node out of BLT.
startGrid(SERVER_NODES);
} catch (Exception e) {
fail(X.getFullStackTrace(e));
}
});
TestRecordingCommunicationSpi cliSpi = TestRecordingCommunicationSpi.spi(client);
cliSpi.blockMessages((node, message) -> {
// Block second lock map req.
return message instanceof GridNearLockRequest && node.order() == crd.cluster().localNode().order();
});
IgniteInternalFuture txFut = GridTestUtils.runAsync(() -> {
try (Transaction tx = client.transactions().txStart()) {
Map<Integer, Integer> map = new LinkedHashMap<>();
// clientFirst=true in lockAll.
map.put(key, key);
// clientFirst=false in lockAll.
map.put(key0, key0);
cache.putAll(map);
// Will start preparing in the middle of PME.
tx.commit();
}
});
IgniteInternalFuture lockFut = GridTestUtils.runAsync(() -> {
try {
// Delay first before PME.
cliSpi.waitForBlocked();
l.countDown();
// Block PME after finish on crd and wait on others.
crdSpi.waitForBlocked();
// Start remote lock mapping.
cliSpi.stopBlock();
} catch (InterruptedException e) {
fail();
}
});
lockFut.get();
crdSpi.stopBlock();
txFut.get();
startNodeFut.get();
awaitPartitionMapExchange();
assertPartitionsSame(idleVerify(crd, DEFAULT_CACHE_NAME));
// Expect correct reservation counters.
PartitionUpdateCounter cntr = counter(key, grid(0).name());
assertNotNull(cntr);
assertEquals(cntr.toString(), 2, cntr.reserved());
}
use of org.apache.ignite.internal.processors.cache.PartitionUpdateCounter in project ignite by apache.
the class TxPartitionCounterStateWithFilterTest method testAssignCountersInTxWithFilter.
/**
*/
@Test
public void testAssignCountersInTxWithFilter() {
for (Ignite ig : G.allGrids()) {
for (TransactionConcurrency concurrency : TransactionConcurrency.values()) {
for (TransactionIsolation isolation : TransactionIsolation.values()) {
try {
ignite(0).createCache(cacheConfiguration(cacheMode, backups, CacheAtomicityMode.TRANSACTIONAL));
IgniteCache<Integer, Integer> cache = ig.cache(DEFAULT_CACHE_NAME);
assertNotNull(cache);
int partId = 0;
List<Integer> keys = partitionKeys(cache, partId, 2, 0);
int key = keys.get(0), val = 0;
if (!sameTx)
cache.put(key, val);
try (Transaction tx = ig.transactions().txStart(concurrency, isolation)) {
if (sameTx)
cache.put(key, val);
Object prev = cache.getAndPutIfAbsent(key, val + 1);
assertNotNull(prev);
cache.put(keys.get(1), val);
tx.commit();
}
assertEquals(Integer.valueOf(val), cache.get(key));
assertEquals(Integer.valueOf(val), cache.get(keys.get(1)));
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
PartitionUpdateCounter cntr = counter(partId, ignite.name());
if (cntr != null)
assertEquals("Expecting counter for node=" + ignite.name(), 2, cntr.get());
}
} finally {
ignite(0).destroyCache(DEFAULT_CACHE_NAME);
}
}
}
}
}
use of org.apache.ignite.internal.processors.cache.PartitionUpdateCounter in project ignite by apache.
the class TxPartitionCounterStateOnePrimaryOneBackupTest method doTestPrepareCommitReorder.
/**
* Tests reproduces the problem: if partition was marked for clear on rebalance, it should never be selected for
* historical rebalance or only partial data defined by counter range will be loaded.
*
* @param skipCheckpoint Skip checkpoint.
*
* @throws Exception
*/
private void doTestPrepareCommitReorder(boolean skipCheckpoint) throws Exception {
T2<Ignite, List<Ignite>> txTop = runTest(skipCheckpoint).get(PARTITION_ID);
waitForTopology(SERVERS_CNT);
awaitPartitionMapExchange();
IgniteEx client = grid(CLIENT_GRID_NAME);
assertEquals("Primary has not all committed transactions", TOTAL, client.cache(DEFAULT_CACHE_NAME).size());
TestRecordingCommunicationSpi.stopBlockAll();
String primaryName = txTop.get1().name();
String backupName = txTop.get2().get(0).name();
IgniteEx primary = startGrid(primaryName);
awaitPartitionMapExchange();
assertPartitionsSame(idleVerify(client, DEFAULT_CACHE_NAME));
// Check if gaps are closed on rebalance.
PartitionUpdateCounter cntr = counter(PARTITION_ID, primary.name());
assertNotNull(cntr);
assertTrue(cntr.sequential());
assertEquals(TOTAL, cntr.get());
stopGrid(backupName);
awaitPartitionMapExchange();
assertNotNull(cntr = counter(PARTITION_ID, primary.name()));
assertEquals(TOTAL, cntr.reserved());
// Make update to advance a counter.
int addCnt = 10;
loadDataToPartition(PARTITION_ID, primaryName, DEFAULT_CACHE_NAME, addCnt, TOTAL);
// TODO https://issues.apache.org/jira/browse/IGNITE-11607
// Historical rebalance is not possible from history containing rebalanced entries.
// Next rebalance will be full.
IgniteEx grid0 = startGrid(backupName);
awaitPartitionMapExchange();
assertNotNull(cntr = counter(PARTITION_ID, grid0.name()));
assertEquals(TOTAL + addCnt, cntr.get());
assertEquals(TOTAL + addCnt, cntr.reserved());
assertPartitionsSame(idleVerify(client, DEFAULT_CACHE_NAME));
}
Aggregations