use of org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl in project ignite by apache.
the class CacheTxFastFinishTest method checkFastTxFinish.
/**
* @param tx Transaction.
* @param commit Commit flag.
*/
protected void checkFastTxFinish(Transaction tx, boolean commit) {
if (commit)
tx.commit();
else
tx.rollback();
IgniteInternalTx tx0 = ((TransactionProxyImpl) tx).tx();
assertNull(prepareFuture(tx0));
assertTrue(finishFuture(tx0) instanceof GridNearTxFastFinishFuture);
}
use of org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl in project ignite by apache.
the class CacheMvccTxNodeMappingTest method checkScenario.
/**
*/
private void checkScenario(IgniteEx ign, int srvCnt, ImmutableMap<UUID, Set<UUID>> txNodes, Runnable r) throws Exception {
try (Transaction userTx = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
r.run();
GridNearTxLocal nearTx = ((TransactionProxyImpl) userTx).tx();
nearTx.prepareNearTxLocal().get();
List<IgniteInternalTx> txs = IntStream.range(0, srvCnt).mapToObj(i -> txsOnNode(grid(i), nearTx.nearXidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
assertFalse(txs.isEmpty());
txs.forEach(tx -> assertEquals(txNodes, repack(tx.transactionNodes())));
}
}
use of org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl in project ignite by apache.
the class CacheMvccTxRecoveryTest method testUpdateCountersGapIsClosed.
/**
* @throws Exception if failed.
*/
@Test
public void testUpdateCountersGapIsClosed() throws Exception {
int srvCnt = 3;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx ign = startGrid(srvCnt);
IgniteCache<Object, Object> cache = ign.getOrCreateCache(basicCcfg().setBackups(2));
int vid = 1;
IgniteEx victim = grid(vid);
ArrayList<Integer> keys = new ArrayList<>();
Integer part = null;
Affinity<Object> aff = ign.affinity(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
int p = aff.partition(i);
if (aff.isPrimary(victim.localNode(), i)) {
if (part == null)
part = p;
if (p == part)
keys.add(i);
if (keys.size() == 2)
break;
}
}
assert keys.size() == 2;
Transaction txA = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
// prevent first transaction prepare on backups
((TestRecordingCommunicationSpi) victim.configuration().getCommunicationSpi()).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < 2;
return false;
}
});
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0)));
txA.commitAsync();
GridCacheVersion aXidVer = ((TransactionProxyImpl) txA).tx().xidVersion();
assertConditionEventually(() -> txsOnNode(victim, aXidVer).stream().anyMatch(tx -> tx.state() == PREPARING));
GridTestUtils.runAsync(() -> {
try (Transaction txB = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1)));
txB.commit();
}
}).get();
long victimUpdCntr = updateCounter(victim.cachex(DEFAULT_CACHE_NAME).context(), keys.get(0));
List<IgniteEx> backupNodes = grids(srvCnt, i -> i != vid);
List<IgniteInternalTx> backupTxsA = backupNodes.stream().map(node -> txsOnNode(node, aXidVer)).flatMap(Collection::stream).collect(Collectors.toList());
// drop primary
victim.close();
assertConditionEventually(() -> backupTxsA.stream().allMatch(tx -> tx.state() == ROLLED_BACK));
backupNodes.stream().map(node -> node.cache(DEFAULT_CACHE_NAME)).forEach(c -> {
assertEquals(1, c.query(new SqlFieldsQuery("select * from Integer")).getAll().size());
});
backupNodes.forEach(node -> {
for (Integer k : keys) assertEquals(victimUpdCntr, updateCounter(node.cachex(DEFAULT_CACHE_NAME).context(), k));
});
}
use of org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl in project ignite by apache.
the class CacheMvccTxRecoveryTest method checkRecoveryPrimaryFailure.
/**
*/
private void checkRecoveryPrimaryFailure(TxEndResult endRes, boolean mvccCrd) throws Exception {
int gridCnt = 4;
int baseCnt = gridCnt - 1;
boolean commit = endRes == COMMIT;
startGridsMultiThreaded(baseCnt);
client = true;
IgniteEx nearNode = startGrid(baseCnt);
IgniteCache<Object, Object> cache = nearNode.getOrCreateCache(basicCcfg().setBackups(1));
Affinity<Object> aff = nearNode.affinity(DEFAULT_CACHE_NAME);
List<Integer> keys = new ArrayList<>();
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(0).localNode(), i) && aff.isBackup(grid(1).localNode(), i)) {
keys.add(i);
break;
}
}
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(1).localNode(), i) && aff.isBackup(grid(2).localNode(), i)) {
keys.add(i);
break;
}
}
assert keys.size() == 2;
int victim, victimBackup;
if (mvccCrd) {
victim = 0;
victimBackup = 1;
} else {
victim = 1;
victimBackup = 2;
}
TestRecordingCommunicationSpi victimComm = (TestRecordingCommunicationSpi) grid(victim).configuration().getCommunicationSpi();
if (commit)
victimComm.blockMessages(GridNearTxFinishResponse.class, nearNode.name());
else
victimComm.blockMessages(GridDhtTxPrepareRequest.class, grid(victimBackup).name());
GridNearTxLocal nearTx = ((TransactionProxyImpl) nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)).tx();
for (Integer k : keys) cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(k));
List<IgniteInternalTx> txs = IntStream.range(0, baseCnt).filter(i -> i != victim).mapToObj(i -> txsOnNode(grid(i), nearTx.xidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
IgniteInternalFuture<IgniteInternalTx> commitFut = nearTx.commitAsync();
if (commit)
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == COMMITTED));
else
assertConditionEventually(() -> txs.stream().anyMatch(tx -> tx.state() == PREPARED));
// drop victim
grid(victim).close();
awaitPartitionMapExchange();
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == (commit ? COMMITTED : ROLLED_BACK)));
assert victimComm.hasBlockedMessages();
if (commit) {
assertConditionEventually(() -> {
int rowsCnt = G.allGrids().get(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
return rowsCnt == keys.size();
});
} else {
int rowsCnt = G.allGrids().get(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
assertEquals(0, rowsCnt);
}
assertTrue(commitFut.isDone());
assertPartitionCountersAreConsistent(keys, grids(baseCnt, i -> i != victim));
}
use of org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl in project ignite by apache.
the class GridExchangeFreeCellularSwitchComplexOperationsTest method testComplexOperationsRecoveryOnCellularSwitch.
/**
* Test checks that txs will be recovered on Cellular switch if prepared, regardless of their content,
* as well as upcoming txs will be committed.
*/
@Test
public void testComplexOperationsRecoveryOnCellularSwitch() throws Exception {
int nodes = 6;
startGridsMultiThreaded(nodes);
blockRecoveryMessages();
CellularCluster cluster = resolveCellularCluster(nodes, startFrom);
Ignite orig = cluster.orig;
Ignite failed = cluster.failed;
List<Ignite> brokenCellNodes = cluster.brokenCellNodes;
List<Ignite> aliveCellNodes = cluster.aliveCellNodes;
int recFutsCnt = 7;
CountDownLatch prepLatch = new CountDownLatch(recFutsCnt);
CountDownLatch commitLatch = new CountDownLatch(1);
Set<Integer> partSet = new GridConcurrentHashSet<>();
Set<Integer> replSet = new GridConcurrentHashSet<>();
List<IgniteInternalFuture<?>> futs = new ArrayList<>();
AtomicInteger cnt = new AtomicInteger();
BiFunction<Ignite, String, Integer> nextPrimaryKey = (ignite, cacheName) -> {
int idx = cnt.getAndIncrement();
return primaryKeys(ignite.getOrCreateCache(cacheName), idx + 1).get(idx);
};
BiConsumer<String, Set<Integer>> singlePutEverywhere = (cacheName, globSet) -> {
try {
Transaction tx = orig.transactions().txStart(concurrency, isolation);
Set<Integer> set = new HashSet<>();
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
set.add(nextPrimaryKey.apply(ignite, cacheName));
}
globSet.addAll(set);
IgniteCache<Integer, Integer> cache = orig.getOrCreateCache(cacheName);
for (Integer key : set) cache.put(key, key);
((TransactionProxyImpl<?, ?>) tx).tx().prepare(true);
prepLatch.countDown();
commitLatch.await();
if (orig != failed)
((TransactionProxyImpl<?, ?>) tx).commit();
} catch (Exception e) {
fail("Should not happen [exception=" + e + "]");
}
};
futs.add(multithreadedAsync(() -> singlePutEverywhere.accept(PART_CACHE_NAME, partSet), 1));
futs.add(multithreadedAsync(() -> singlePutEverywhere.accept(REPL_CACHE_NAME, replSet), 1));
Consumer<Integer> putEverywhereToBoth = (putPerTx) -> {
try {
Transaction tx = orig.transactions().txStart(concurrency, isolation);
Set<Integer> pSet = new HashSet<>();
Set<Integer> rSet = new HashSet<>();
for (int i = 0; i < putPerTx; i++) for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
pSet.add(nextPrimaryKey.apply(ignite, PART_CACHE_NAME));
rSet.add(nextPrimaryKey.apply(ignite, REPL_CACHE_NAME));
}
partSet.addAll(pSet);
replSet.addAll(rSet);
IgniteCache<Integer, Integer> pCache = orig.getOrCreateCache(PART_CACHE_NAME);
IgniteCache<Integer, Integer> rCache = orig.getOrCreateCache(REPL_CACHE_NAME);
for (Integer key : pSet) pCache.put(key, key);
for (Integer key : rSet) rCache.put(key, key);
((TransactionProxyImpl<?, ?>) tx).tx().prepare(true);
prepLatch.countDown();
commitLatch.await();
if (orig != failed)
((TransactionProxyImpl<?, ?>) tx).commit();
} catch (Exception e) {
fail("Should not happen [exception=" + e + "]");
}
};
futs.add(multithreadedAsync(() -> putEverywhereToBoth.accept(1), 1));
futs.add(multithreadedAsync(() -> putEverywhereToBoth.accept(2), 1));
futs.add(multithreadedAsync(() -> putEverywhereToBoth.accept(10), 1));
Consumer<Boolean> singleTxPerCell = (partAtBrokenCell) -> {
try {
Transaction tx = orig.transactions().txStart(concurrency, isolation);
Integer pKey = partAtBrokenCell ? nextPrimaryKey.apply(failed, PART_CACHE_NAME) : nextPrimaryKey.apply(aliveCellNodes.get(0), PART_CACHE_NAME);
Integer rKey = partAtBrokenCell ? nextPrimaryKey.apply(aliveCellNodes.get(0), REPL_CACHE_NAME) : nextPrimaryKey.apply(failed, REPL_CACHE_NAME);
IgniteCache<Integer, Integer> pCache = orig.getOrCreateCache(PART_CACHE_NAME);
IgniteCache<Integer, Integer> rCache = orig.getOrCreateCache(REPL_CACHE_NAME);
pCache.put(pKey, pKey);
rCache.put(rKey, rKey);
partSet.add(pKey);
replSet.add((rKey));
((TransactionProxyImpl<?, ?>) tx).tx().prepare(true);
prepLatch.countDown();
commitLatch.await();
if (orig != failed)
((TransactionProxyImpl<?, ?>) tx).commit();
} catch (Exception e) {
fail("Should not happen [exception=" + e + "]");
}
};
futs.add(multithreadedAsync(() -> singleTxPerCell.accept(true), 1));
futs.add(multithreadedAsync(() -> singleTxPerCell.accept(false), 1));
prepLatch.await();
assertEquals(futs.size(), recFutsCnt);
// Stopping node.
failed.close();
awaitForSwitchOnNodeLeft(failed);
Consumer<Ignite> partTxRun = (ignite) -> {
try {
IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(PART_CACHE_NAME);
try (Transaction tx = ignite.transactions().txStart(concurrency, isolation)) {
Integer key = nextPrimaryKey.apply(ignite, PART_CACHE_NAME);
partSet.add(key);
cache.put(key, key);
tx.commit();
}
} catch (Exception e) {
fail("Should not happen [exception=" + e + "]");
}
};
Consumer<Ignite> replTxRun = (ignite) -> {
try {
IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(REPL_CACHE_NAME);
try (Transaction tx = ignite.transactions().txStart(concurrency, isolation)) {
Integer key = nextPrimaryKey.apply(ignite, REPL_CACHE_NAME);
replSet.add(key);
cache.put(key, key);
tx.commit();
}
} catch (Exception e) {
fail("Should not happen [exception=" + e + "]");
}
};
for (Ignite brokenCellNode : brokenCellNodes) {
futs.add(multithreadedAsync(() -> partTxRun.accept(brokenCellNode), 1));
futs.add(multithreadedAsync(() -> replTxRun.accept(brokenCellNode), 1));
}
for (Ignite aliveCellNode : aliveCellNodes) {
futs.add(multithreadedAsync(() -> partTxRun.accept(aliveCellNode), 1));
futs.add(multithreadedAsync(() -> replTxRun.accept(aliveCellNode), 1));
}
// Allowing recovery.
for (Ignite ignite : G.allGrids()) {
TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite.configuration().getCommunicationSpi();
spi.stopBlock(true, blockedMsg -> true);
}
commitLatch.countDown();
for (IgniteInternalFuture<?> fut : futs) fut.get();
for (Ignite node : G.allGrids()) {
IgniteCache<Integer, Integer> partCache = node.getOrCreateCache(PART_CACHE_NAME);
IgniteCache<Integer, Integer> replCache = node.getOrCreateCache(REPL_CACHE_NAME);
for (Integer key : partSet) assertEquals(key, partCache.get(key));
for (Integer key : replSet) assertEquals(key, replCache.get(key));
}
// Final check that any transactions are absent.
checkTransactionsCount(null, 0, brokenCellNodes, 0, aliveCellNodes, 0, null);
}
Aggregations