use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class TxRollbackOnTimeoutTest method testRollbackOnNearNodeLeft.
/**
* Tests that transactions from near node that has left are rolled back on server nodes.
*
* @throws Exception If failed.
*/
@Test
public void testRollbackOnNearNodeLeft() throws Exception {
Ignite client = startClient();
Integer pk0 = primaryKey(grid(0).cache(CACHE_NAME));
Integer pk1 = primaryKey(grid(1).cache(CACHE_NAME));
CountDownLatch locked = new CountDownLatch(1);
CountDownLatch blocked = new CountDownLatch(1);
IgniteInternalFuture<Void> fut = runAsync(new Callable<Void>() {
@Override
public Void call() {
try (Transaction tx0 = client.transactions().txStart()) {
client.cache(CACHE_NAME).put(pk0, 0);
locked.countDown();
awaitQuiet(blocked);
tx0.commit();
} catch (Exception ignored) {
// No-op.
}
return null;
}
});
IgniteInternalFuture fut2 = runAsync(new Runnable() {
@Override
public void run() {
try (Transaction tx1 = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 1000, 0)) {
awaitQuiet(locked);
client.cache(CACHE_NAME).put(pk1, 1);
spi(client).blockMessages((node, msg) -> msg instanceof GridNearTxFinishRequest);
spi(grid(0)).blockMessages((node, msg) -> msg instanceof GridNearLockResponse);
client.cache(CACHE_NAME).put(pk0, 1);
fail();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
}
});
spi(client).waitForBlocked();
spi(grid(0)).waitForBlocked();
fut2.get();
client.close();
spi(grid(0)).stopBlock();
blocked.countDown();
fut.get();
for (int i = 0; i < GRID_CNT; i++) assertTrue(grid(i).context().cache().context().tm().activeTransactions().isEmpty());
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccTxRecoveryTest method checkRecoveryNearFailure.
/**
*/
private void checkRecoveryNearFailure(TxEndResult endRes, NodeMode nearNodeMode) throws Exception {
int gridCnt = 4;
int baseCnt = gridCnt - 1;
boolean commit = endRes == COMMIT;
startGridsMultiThreaded(baseCnt);
// tweak client/server near
client = nearNodeMode == CLIENT;
IgniteEx nearNode = startGrid(baseCnt);
IgniteCache<Object, Object> cache = nearNode.getOrCreateCache(basicCcfg().setBackups(1));
Affinity<Object> aff = nearNode.affinity(DEFAULT_CACHE_NAME);
List<Integer> keys = new ArrayList<>();
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(0).localNode(), i) && aff.isBackup(grid(1).localNode(), i)) {
keys.add(i);
break;
}
}
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(1).localNode(), i) && aff.isBackup(grid(2).localNode(), i)) {
keys.add(i);
break;
}
}
assert keys.size() == 2;
TestRecordingCommunicationSpi nearComm = (TestRecordingCommunicationSpi) nearNode.configuration().getCommunicationSpi();
if (!commit)
nearComm.blockMessages(GridNearTxPrepareRequest.class, grid(1).name());
GridTestUtils.runAsync(() -> {
// run in separate thread to exclude tx from thread-local map
GridNearTxLocal nearTx = ((TransactionProxyImpl) nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)).tx();
for (Integer k : keys) cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(k));
List<IgniteInternalTx> txs = IntStream.range(0, baseCnt).mapToObj(i -> txsOnNode(grid(i), nearTx.xidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
IgniteInternalFuture<?> prepareFut = nearTx.prepareNearTxLocal();
if (commit)
prepareFut.get();
else
assertConditionEventually(() -> txs.stream().anyMatch(tx -> tx.state() == PREPARED));
// drop near
nearNode.close();
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == (commit ? COMMITTED : ROLLED_BACK)));
return null;
}).get();
if (commit) {
assertConditionEventually(() -> {
int rowsCnt = grid(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
return rowsCnt == keys.size();
});
} else {
int rowsCnt = G.allGrids().get(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
assertEquals(0, rowsCnt);
}
assertPartitionCountersAreConsistent(keys, grids(baseCnt, i -> true));
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccTxRecoveryTest method testCountersNeighborcastServerFailed.
/**
* @throws Exception if failed.
*/
@Test
public void testCountersNeighborcastServerFailed() throws Exception {
// Reopen https://issues.apache.org/jira/browse/IGNITE-10766 if starts failing
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx ign = startGrid(srvCnt);
IgniteCache<Object, Object> cache = ign.getOrCreateCache(basicCcfg().setBackups(2));
ArrayList<Integer> keys = new ArrayList<>();
int vid = 3;
IgniteEx victim = grid(vid);
Affinity<Object> aff = ign.affinity(DEFAULT_CACHE_NAME);
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(victim.localNode(), i) && !aff.isBackup(grid(0).localNode(), i)) {
keys.add(i);
break;
}
}
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(victim.localNode(), i) && !aff.isBackup(grid(1).localNode(), i)) {
keys.add(i);
break;
}
}
assert keys.size() == 2 && !keys.contains(99);
// prevent prepare on one backup
((TestRecordingCommunicationSpi) victim.configuration().getCommunicationSpi()).blockMessages(GridDhtTxPrepareRequest.class, grid(0).name());
GridNearTxLocal nearTx = ((TransactionProxyImpl) ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)).tx();
for (Integer k : keys) cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(k));
List<IgniteInternalTx> txs = IntStream.range(0, srvCnt).mapToObj(this::grid).filter(g -> g != victim).map(g -> txsOnNode(g, nearTx.xidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
nearTx.commitAsync();
// await tx partially prepared
assertConditionEventually(() -> txs.stream().anyMatch(tx -> tx.state() == PREPARED));
CountDownLatch latch1 = new CountDownLatch(1);
CountDownLatch latch2 = new CountDownLatch(1);
IgniteInternalFuture<Object> backgroundTxFut = GridTestUtils.runAsync(() -> {
try (Transaction ignored = ign.transactions().txStart()) {
boolean upd = false;
for (int i = 100; i < 200; i++) {
if (!aff.isPrimary(victim.localNode(), i)) {
cache.put(i, 11);
upd = true;
break;
}
}
assert upd;
latch1.countDown();
latch2.await(getTestTimeout(), TimeUnit.MILLISECONDS);
}
return null;
});
latch1.await(getTestTimeout(), TimeUnit.MILLISECONDS);
// drop primary
victim.close();
// do all assertions before rebalance
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == ROLLED_BACK));
List<IgniteEx> liveNodes = grids(srvCnt, i -> i != vid);
assertPartitionCountersAreConsistent(keys, liveNodes);
latch2.countDown();
backgroundTxFut.get(getTestTimeout());
assertTrue(liveNodes.stream().map(node -> node.cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll()).allMatch(Collection::isEmpty));
}
use of org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapIsProcessedSimple.
/**
* @throws Exception if failed.
*/
private void checkUpdateCountersGapIsProcessedSimple(CacheMode cacheMode) throws Exception {
testSpi = true;
final int srvCnt = 4;
final int backups = srvCnt - 1;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx nearNode = startGrid(srvCnt);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, backups, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
List<Integer> keys = primaryKeys(primary.cache(DEFAULT_CACHE_NAME), 3);
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// Initial value.
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0))).getAll();
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
final AtomicInteger dhtPrepMsgLimiter = new AtomicInteger();
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return dhtPrepMsgLimiter.getAndIncrement() < backups;
if (msg instanceof GridContinuousMessage)
return true;
return false;
}
});
// First tx. Expect it will be prepared only on the primary node and GridDhtTxPrepareRequests to remotes
// will be swallowed.
Transaction txA = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1))).getAll();
txA.commitAsync();
// Wait until first tx changes it's status to PREPARING.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean preparing = nearNode.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
boolean allPrepsSwallowed = dhtPrepMsgLimiter.get() == backups;
return preparing && allPrepsSwallowed;
}
}, 3_000);
// Second tx.
GridTestUtils.runAsync(() -> {
try (Transaction txB = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(2)));
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
// There were three updates: init, first and second.
assertEquals(3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(3, SECONDS));
assertEquals(2, arrivedEvts.size());
assertEquals(keys.get(0), arrivedEvts.get(0).getKey());
assertEquals(keys.get(2), arrivedEvts.get(1).getKey());
cur.close();
nearNode.close();
}
Aggregations