use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccAbstractBasicCoordinatorFailoverTest method checkCoordinatorsLeft.
/**
* @param num Number of coordinators to stop.
* @throws Exception If failed.
*/
@SuppressWarnings("unchecked")
private void checkCoordinatorsLeft(int num, boolean stopCrdFirst) throws Exception {
disableScheduledVacuum = true;
final int DATA_NODES = 3;
final int NODES = num + DATA_NODES;
nodeAttr = CRD_ATTR;
// Do not use startMultithreaded here.
startGrids(num);
nodeAttr = null;
startGridsMultiThreaded(num, DATA_NODES);
List<Ignite> victims = new ArrayList<>(num);
List<Ignite> survivors = new ArrayList<>(DATA_NODES);
for (int i = 0; i < NODES; i++) {
if (i < num)
victims.add(grid(i));
else
survivors.add(grid(i));
}
if (log.isInfoEnabled()) {
log.info("Nodes to be stopped [" + victims.stream().map(n -> n.cluster().localNode().id().toString()).collect(Collectors.joining(", ")) + ']');
log.info("Nodes not to be stopped [" + survivors.stream().map(n -> n.cluster().localNode().id().toString()).collect(Collectors.joining(", ")) + ']');
}
Ignite nearNode = survivors.get(0);
if (persistence)
nearNode.cluster().active(true);
CacheConfiguration ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, DATA_NODES - 1, DFLT_PARTITION_COUNT).setNodeFilter(new CoordinatorNodeFilter());
IgniteCache cache = nearNode.createCache(ccfg);
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 1);
tx.commit();
}
List<Thread> stopThreads = victims.stream().map(v -> new Thread(() -> stopGrid(v.name()))).collect(Collectors.toList());
ScanQuery<Object, Object> scan = new ScanQuery<>();
QueryCursor<Cache.Entry<Object, Object>> cur = survivors.get(0).cache(DEFAULT_CACHE_NAME).query(scan);
Iterator<Cache.Entry<Object, Object>> it = cur.iterator();
assertTrue(it.hasNext());
assertEquals(1, it.next().getValue());
if (log.isInfoEnabled())
log.info("Start stopping nodes.");
// Stop nodes and join threads.
if (stopCrdFirst) {
for (Thread t : stopThreads) t.start();
} else {
// We should stop the oldest node last.
GridCachePartitionExchangeManager exch = ((IgniteEx) survivors.get(1)).context().cache().context().exchange();
GridDhtTopologyFuture lastFinished = exch.lastFinishedFuture();
for (int i = 1; i < stopThreads.size(); i++) stopThreads.get(i).start();
while (lastFinished == exch.lastTopologyFuture()) doSleep(1);
stopThreads.get(0).start();
}
for (Thread t : stopThreads) t.join();
if (log.isInfoEnabled())
log.info("All nodes stopped.");
assertTrue(it.hasNext());
assertEquals(1, it.next().getValue());
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(1, node.cache(DEFAULT_CACHE_NAME).get(key));
}
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 2);
tx.commit();
} catch (Exception e) {
stopAllGrids(true);
fail(X.getFullStackTrace(e));
}
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(2, node.cache(DEFAULT_CACHE_NAME).get(key));
}
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 3);
tx.commit();
} catch (Exception e) {
stopAllGrids(true);
fail(X.getFullStackTrace(e));
}
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(3, node.cache(DEFAULT_CACHE_NAME).get(key));
}
while (it.hasNext()) assertEquals(1, (int) it.next().getValue());
cur.close();
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapsClosed.
/**
* @throws Exception If failed.
*/
private void checkUpdateCountersGapsClosed(CacheMode cacheMode) throws Exception {
testSpi = true;
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
IgniteEx nearNode = grid(srvCnt - 1);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, srvCnt - 1, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
Affinity<Object> aff = nearNode.affinity(cache.getName());
int[] nearBackupParts = aff.backupPartitions(nearNode.localNode());
int[] primaryParts = aff.primaryPartitions(primary.localNode());
Collection<Integer> nearSet = new HashSet<>();
for (int part : nearBackupParts) nearSet.add(part);
Collection<Integer> primarySet = new HashSet<>();
for (int part : primaryParts) primarySet.add(part);
// We need backup partitions on the near node.
nearSet.retainAll(primarySet);
List<Integer> keys = singlePartKeys(primary.cache(DEFAULT_CACHE_NAME), 20, nearSet.iterator().next());
int range = 3;
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(range * 2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
private final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < srvCnt - 1;
return false;
}
});
Transaction txA = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
for (int i = 0; i < range; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 2);
txA.commitAsync();
GridTestUtils.runAsync(() -> {
try (Transaction tx = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range; i < range * 2; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 1);
tx.commit();
}
}).get();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return primary.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
}
}, 3_000);
GridTestUtils.runAsync(() -> {
try (Transaction txB = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range * 2; i < range * 3; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 3);
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
assertEquals(range * 3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(range * 2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(5, SECONDS));
assertEquals(range * 2, arrivedEvts.size());
cur.close();
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccTxRecoveryTest method testUpdateCountersGapIsClosed.
/**
* @throws Exception if failed.
*/
@Test
public void testUpdateCountersGapIsClosed() throws Exception {
int srvCnt = 3;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx ign = startGrid(srvCnt);
IgniteCache<Object, Object> cache = ign.getOrCreateCache(basicCcfg().setBackups(2));
int vid = 1;
IgniteEx victim = grid(vid);
ArrayList<Integer> keys = new ArrayList<>();
Integer part = null;
Affinity<Object> aff = ign.affinity(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
int p = aff.partition(i);
if (aff.isPrimary(victim.localNode(), i)) {
if (part == null)
part = p;
if (p == part)
keys.add(i);
if (keys.size() == 2)
break;
}
}
assert keys.size() == 2;
Transaction txA = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
// prevent first transaction prepare on backups
((TestRecordingCommunicationSpi) victim.configuration().getCommunicationSpi()).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < 2;
return false;
}
});
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0)));
txA.commitAsync();
GridCacheVersion aXidVer = ((TransactionProxyImpl) txA).tx().xidVersion();
assertConditionEventually(() -> txsOnNode(victim, aXidVer).stream().anyMatch(tx -> tx.state() == PREPARING));
GridTestUtils.runAsync(() -> {
try (Transaction txB = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1)));
txB.commit();
}
}).get();
long victimUpdCntr = updateCounter(victim.cachex(DEFAULT_CACHE_NAME).context(), keys.get(0));
List<IgniteEx> backupNodes = grids(srvCnt, i -> i != vid);
List<IgniteInternalTx> backupTxsA = backupNodes.stream().map(node -> txsOnNode(node, aXidVer)).flatMap(Collection::stream).collect(Collectors.toList());
// drop primary
victim.close();
assertConditionEventually(() -> backupTxsA.stream().allMatch(tx -> tx.state() == ROLLED_BACK));
backupNodes.stream().map(node -> node.cache(DEFAULT_CACHE_NAME)).forEach(c -> {
assertEquals(1, c.query(new SqlFieldsQuery("select * from Integer")).getAll().size());
});
backupNodes.forEach(node -> {
for (Integer k : keys) assertEquals(victimUpdCntr, updateCounter(node.cachex(DEFAULT_CACHE_NAME).context(), k));
});
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccTxRecoveryTest method checkRecoveryPrimaryFailure.
/**
*/
private void checkRecoveryPrimaryFailure(TxEndResult endRes, boolean mvccCrd) throws Exception {
int gridCnt = 4;
int baseCnt = gridCnt - 1;
boolean commit = endRes == COMMIT;
startGridsMultiThreaded(baseCnt);
client = true;
IgniteEx nearNode = startGrid(baseCnt);
IgniteCache<Object, Object> cache = nearNode.getOrCreateCache(basicCcfg().setBackups(1));
Affinity<Object> aff = nearNode.affinity(DEFAULT_CACHE_NAME);
List<Integer> keys = new ArrayList<>();
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(0).localNode(), i) && aff.isBackup(grid(1).localNode(), i)) {
keys.add(i);
break;
}
}
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(1).localNode(), i) && aff.isBackup(grid(2).localNode(), i)) {
keys.add(i);
break;
}
}
assert keys.size() == 2;
int victim, victimBackup;
if (mvccCrd) {
victim = 0;
victimBackup = 1;
} else {
victim = 1;
victimBackup = 2;
}
TestRecordingCommunicationSpi victimComm = (TestRecordingCommunicationSpi) grid(victim).configuration().getCommunicationSpi();
if (commit)
victimComm.blockMessages(GridNearTxFinishResponse.class, nearNode.name());
else
victimComm.blockMessages(GridDhtTxPrepareRequest.class, grid(victimBackup).name());
GridNearTxLocal nearTx = ((TransactionProxyImpl) nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)).tx();
for (Integer k : keys) cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(k));
List<IgniteInternalTx> txs = IntStream.range(0, baseCnt).filter(i -> i != victim).mapToObj(i -> txsOnNode(grid(i), nearTx.xidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
IgniteInternalFuture<IgniteInternalTx> commitFut = nearTx.commitAsync();
if (commit)
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == COMMITTED));
else
assertConditionEventually(() -> txs.stream().anyMatch(tx -> tx.state() == PREPARED));
// drop victim
grid(victim).close();
awaitPartitionMapExchange();
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == (commit ? COMMITTED : ROLLED_BACK)));
assert victimComm.hasBlockedMessages();
if (commit) {
assertConditionEventually(() -> {
int rowsCnt = G.allGrids().get(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
return rowsCnt == keys.size();
});
} else {
int rowsCnt = G.allGrids().get(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
assertEquals(0, rowsCnt);
}
assertTrue(commitFut.isDone());
assertPartitionCountersAreConsistent(keys, grids(baseCnt, i -> i != victim));
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class EvictionPolicyFailureHandlerTest method testCacheMapDoesNotContainsWrongEntityAfterTransaction.
/**
* We expect that localPeek produces an exception, but the entry evict returns false because the transaction locks
* this entry. After transaction commit, the entry will be evicted.
*/
@Test
public void testCacheMapDoesNotContainsWrongEntityAfterTransaction() throws Exception {
LogListener lsnr = LogListener.matches(s -> s.contains("The cache entry cannot be touched")).times(1).build();
log.registerListener(lsnr);
IgniteEx node = startGrid(0);
IgniteEx client = startClientGrid(1);
GridCacheAdapter<Object, Object> cache = ((IgniteKernal) node).internalCache(DEFAULT_CACHE_NAME);
cache.put(1, 1);
CountDownLatch locPeekFinished = new CountDownLatch(1);
CountDownLatch txStarted = new CountDownLatch(1);
CountDownLatch txFinished = new CountDownLatch(1);
GridTestUtils.runAsync(() -> {
IgniteCache<Object, Object> cache1 = client.cache(DEFAULT_CACHE_NAME);
IgniteTransactions transactions = client.transactions();
try (Transaction tx = transactions.txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache1.put(2.1, 2.4);
txStarted.countDown();
locPeekFinished.await();
tx.commit();
} catch (Exception ignore) {
}
txFinished.countDown();
}, "tx-thread");
txStarted.await();
try {
cache.localPeek(2.1, new CachePeekMode[] { CachePeekMode.ONHEAP });
} catch (Exception ignore) {
}
locPeekFinished.countDown();
assertTrue(lsnr.check(10_000));
txFinished.await();
assertFalse(cache.map().entrySet(cache.context().cacheId()).stream().anyMatch(e -> new Double(2.1).equals(e.key().value(null, false))));
assertEquals(ACTIVE, node.cluster().state());
}
Aggregations