use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_ASYNC in project ignite by apache.
the class TxRecoveryWithConcurrentRollbackTest method testTxDoesntBecomePreparedAfterError.
/**
* Start 3 servers,
* start 2 clients,
* start two OPTIMISTIC transactions with the same key from different client nodes,
* trying to transfer both to PREPARED state,
* stop one client node.
*/
@Test
public void testTxDoesntBecomePreparedAfterError() throws Exception {
backups = 2;
persistence = true;
syncMode = FULL_ASYNC;
final IgniteEx node0 = startGrids(3);
node0.cluster().state(ACTIVE);
final IgniteEx client1 = startGrid("client1");
final IgniteEx client2 = startGrid("client2");
awaitPartitionMapExchange();
final IgniteCache<Object, Object> cache = client1.cache(DEFAULT_CACHE_NAME);
final IgniteCache<Object, Object> cache2 = client2.cache(DEFAULT_CACHE_NAME);
final Integer pk = primaryKey(node0.cache(DEFAULT_CACHE_NAME));
CountDownLatch txPrepareLatch = new CountDownLatch(1);
GridTestUtils.runMultiThreadedAsync(() -> {
try (final Transaction tx = client1.transactions().withLabel("tx1").txStart(OPTIMISTIC, READ_COMMITTED, 5000, 1)) {
cache.put(pk, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
// To prevent tx rollback on exit from try-with-resource block, this should cause another tx timeout fail.
spi(client1).blockMessages((node, msg) -> msg instanceof GridNearTxFinishRequest);
log.info("Test, preparing tx: xid=" + tx.xid() + ", tx=" + tx);
// Doing only prepare to try to lock the key, commit is not needed here.
p.tx().prepareNearTxLocal();
p.tx().currentPrepareFuture().listen(fut -> txPrepareLatch.countDown());
} catch (Exception e) {
// No-op.
}
}, 1, "tx1-thread");
try (final Transaction tx = client2.transactions().withLabel("tx2").txStart(OPTIMISTIC, READ_COMMITTED, 5000, 1)) {
cache2.put(pk, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
log.info("Test, preparing tx: xid=" + tx.xid() + ", tx=" + tx);
p.tx().prepareNearTxLocal();
p.tx().currentPrepareFuture().listen(fut -> txPrepareLatch.countDown());
txPrepareLatch.await(6, TimeUnit.SECONDS);
if (txPrepareLatch.getCount() > 0)
fail("Failed to await for tx prepare.");
AtomicReference<GridDhtTxLocal> dhtTxLocRef = new AtomicReference<>();
assertTrue(waitForCondition(() -> {
dhtTxLocRef.set((GridDhtTxLocal) txs(node0).stream().filter(t -> t.state() == TransactionState.PREPARING).findFirst().orElse(null));
return dhtTxLocRef.get() != null;
}, 6_000));
assertNotNull(dhtTxLocRef.get());
UUID clientNodeToFail = dhtTxLocRef.get().eventNodeId();
GridDhtTxPrepareFuture prep = GridTestUtils.getFieldValue(dhtTxLocRef.get(), "prepFut");
prep.get();
List<IgniteInternalTx> txs = txs(node0);
String txsStr = txs.stream().map(Object::toString).collect(Collectors.joining(", "));
log.info("Transactions check point [count=" + txs.size() + ", txs=" + txsStr + "]");
if (clientNodeToFail.equals(client1.localNode().id()))
client1.close();
else if (clientNodeToFail.equals(client2.localNode().id()))
client2.close();
} catch (Exception e) {
log.error(e.getMessage(), e);
}
U.sleep(500);
assertEquals(3, grid(1).context().discovery().aliveServerNodes().size());
assertEquals(txs(client1).toString() + ", " + txs(client2).toString(), 1, txs(client1).size() + txs(client2).size());
}
Aggregations