use of org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED in project ignite by apache.
the class FunctionalTest method testTransactionsWithLabel.
/**
* Test transactions with label.
*/
@Test
public void testTransactionsWithLabel() throws Exception {
try (IgniteEx ignite = (IgniteEx) Ignition.start(Config.getServerConfiguration());
IgniteClient client = Ignition.startClient(getClientConfiguration())) {
ClientCache<Integer, String> cache = client.createCache(new ClientCacheConfiguration().setName("cache").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
SystemView<TransactionView> txsView = ignite.context().systemView().view(TXS_MON_LIST);
cache.put(0, "value1");
try (ClientTransaction tx = client.transactions().withLabel("label").txStart()) {
cache.put(0, "value2");
assertEquals(1, F.size(txsView.iterator()));
TransactionView txv = txsView.iterator().next();
assertEquals("label", txv.label());
assertEquals("value2", cache.get(0));
}
assertEquals("value1", cache.get(0));
try (ClientTransaction tx = client.transactions().withLabel("label1").withLabel("label2").txStart()) {
cache.put(0, "value2");
assertEquals(1, F.size(txsView.iterator()));
TransactionView txv = txsView.iterator().next();
assertEquals("label2", txv.label());
tx.commit();
}
assertEquals("value2", cache.get(0));
// Test concurrent with label and without label transactions.
try (ClientTransaction tx = client.transactions().withLabel("label").txStart(PESSIMISTIC, READ_COMMITTED)) {
CyclicBarrier barrier = new CyclicBarrier(2);
cache.put(0, "value3");
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
try (ClientTransaction tx1 = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
cache.put(1, "value3");
barrier.await();
assertEquals("value2", cache.get(0));
barrier.await();
} catch (InterruptedException | BrokenBarrierException ignore) {
// No-op.
}
});
barrier.await();
assertNull(cache.get(1));
assertEquals(1, F.size(txsView.iterator(), txv -> txv.label() == null));
assertEquals(1, F.size(txsView.iterator(), txv -> "label".equals(txv.label())));
barrier.await();
fut.get();
}
// Test nested transactions is not possible.
try (ClientTransaction tx = client.transactions().withLabel("label1").txStart()) {
try (ClientTransaction tx1 = client.transactions().txStart()) {
fail();
} catch (ClientException expected) {
// No-op.
}
try (ClientTransaction tx1 = client.transactions().withLabel("label2").txStart()) {
fail();
} catch (ClientException expected) {
// No-op.
}
}
}
}
use of org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED in project ignite by apache.
the class IgniteLogicalRecoveryWithParamsTest method testPartiallyCommitedTx.
/**
* Tests concurrent tx with node stop and further recovery.
*/
private void testPartiallyCommitedTx() throws Exception {
final String cacheName = "recovery";
int itmsCount = 30_000;
AtomicBoolean failFileIO = new AtomicBoolean();
List<Integer> keys;
CacheConfiguration<Integer, Integer> cfg = new CacheConfiguration<Integer, Integer>(cacheName).setCacheMode(CacheMode.PARTITIONED).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, 32));
try {
final IgniteEx srv = (IgniteEx) startGridsMultiThreaded(numSrvNodes);
G.allGrids().forEach(n -> setWalIOFactory(n, failFileIO));
IgniteEx clnt = startClientGrid("client");
TestRecordingCommunicationSpi nearComm = TestRecordingCommunicationSpi.spi(clnt);
srv.cluster().state(ClusterState.ACTIVE);
final IgniteCache cache = clnt.getOrCreateCache(cfg);
final CountDownLatch commitStart = new CountDownLatch(1);
forceCheckpoint();
nearComm.blockMessages((node, msg) -> msg instanceof GridNearTxPrepareRequest);
if (singleNodeTx)
keys = primaryKeys(srv.cache(cacheName), itmsCount, 0);
else
keys = IntStream.range(0, itmsCount).boxed().collect(Collectors.toList());
Thread t = new Thread(() -> {
try (Transaction tx = clnt.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
keys.forEach(k -> cache.put(k, k));
commitStart.countDown();
tx.commit();
}
});
t.start();
commitStart.await();
nearComm.waitForBlocked();
nearComm.stopBlock();
assertTrue(waitForWalUpdates(G.allGrids().stream().filter(g -> !g.configuration().isClientMode()).collect(Collectors.toList())));
} finally {
failFileIO.set(true);
stopAllGrids(true);
assertTrue(G.allGrids().isEmpty());
}
final IgniteEx srv = (IgniteEx) startGridsMultiThreaded(numSrvNodes);
srv.cluster().state(ClusterState.ACTIVE);
IgniteCache<Integer, Integer> cache = srv.cache(cacheName);
int cSize = cache.size();
boolean pr = cache.get(keys.get(0)) == null;
for (int i : keys) {
Object res = cache.get(i);
if (pr != (res == null))
assertEquals("ethalon=" + pr + ", current=" + res + ", key=" + i, pr, res == null);
}
assert (cSize == itmsCount || cSize == 0) : "unexpected cache size: " + cSize;
}
use of org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED in project ignite by apache.
the class GridCommandHandlerTest method testKillHangingRemoteTransactions.
/**
* Simulate uncommitted backup transactions and test rolling back using utility.
*/
@Test
public void testKillHangingRemoteTransactions() throws Exception {
final int cnt = 3;
startGridsMultiThreaded(cnt);
Ignite[] clients = new Ignite[] { startGrid("client1"), startGrid("client2"), startGrid("client3"), startGrid("client4") };
clients[0].getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setBackups(2).setAtomicityMode(TRANSACTIONAL).setWriteSynchronizationMode(FULL_SYNC).setAffinity(new RendezvousAffinityFunction(false, 64)));
awaitPartitionMapExchange();
for (Ignite client : clients) {
assertTrue(client.configuration().isClientMode());
assertNotNull(client.cache(DEFAULT_CACHE_NAME));
}
LongAdder progress = new LongAdder();
AtomicInteger idx = new AtomicInteger();
int tc = clients.length;
CountDownLatch lockLatch = new CountDownLatch(1);
CountDownLatch commitLatch = new CountDownLatch(1);
Ignite prim = primaryNode(0L, DEFAULT_CACHE_NAME);
TestRecordingCommunicationSpi primSpi = TestRecordingCommunicationSpi.spi(prim);
primSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message message) {
return message instanceof GridDhtTxFinishRequest;
}
});
Set<IgniteUuid> xidSet = new GridConcurrentHashSet<>();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
int id = idx.getAndIncrement();
Ignite client = clients[id];
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED, 0, 1)) {
xidSet.add(tx.xid());
IgniteCache<Long, Long> cache = client.cache(DEFAULT_CACHE_NAME);
if (id != 0)
U.awaitQuiet(lockLatch);
cache.invoke(0L, new IncrementClosure(), null);
if (id == 0) {
lockLatch.countDown();
U.awaitQuiet(commitLatch);
// Wait until candidates will enqueue.
doSleep(500);
}
tx.commit();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
progress.increment();
}
}, tc, "invoke-thread");
U.awaitQuiet(lockLatch);
commitLatch.countDown();
primSpi.waitForBlocked(clients.length);
// Unblock only finish messages from clients from 2 to 4.
primSpi.stopBlock(true, blockedMsg -> {
GridIoMessage iom = blockedMsg.ioMessage();
Message m = iom.message();
if (m instanceof GridDhtTxFinishRequest) {
GridDhtTxFinishRequest r = (GridDhtTxFinishRequest) m;
return !r.nearNodeId().equals(clients[0].cluster().localNode().id());
}
return true;
});
// Wait until queue is stable
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteInternalTx tx : txs) if (!tx.local()) {
IgniteTxEntry entry = tx.writeEntries().iterator().next();
GridCacheEntryEx cached = entry.cached();
Collection<GridCacheMvccCandidate> candidates = cached.remoteMvccSnapshot();
if (candidates.size() != clients.length)
return false;
}
return true;
}
}, 10_000);
}
CommandHandler h = new CommandHandler();
// Check listing.
validate(h, map -> {
for (int i = 0; i < cnt; i++) {
IgniteEx grid = grid(i);
// Skip primary.
if (grid.localNode().id().equals(prim.cluster().localNode().id()))
continue;
VisorTxTaskResult res = map.get(grid.localNode());
List<VisorTxInfo> infos = res.getInfos().stream().filter(info -> xidSet.contains(info.getNearXid())).collect(Collectors.toList());
// Validate queue length on backups.
assertEquals(clients.length, infos.size());
}
}, "--tx");
// Check kill.
validate(h, map -> {
// No-op.
}, "--tx", "--kill");
// Wait for all remote txs to finish.
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
for (IgniteInternalTx tx : txs) if (!tx.local())
tx.finishFuture().get();
}
// Unblock finish message from client1.
primSpi.stopBlock(true);
fut.get();
Long cur = (Long) clients[0].cache(DEFAULT_CACHE_NAME).get(0L);
assertEquals(tc - 1, cur.longValue());
checkUserFutures();
}
use of org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED in project ignite by apache.
the class TxRecoveryWithConcurrentRollbackTest method testTxDoesntBecomePreparedAfterError.
/**
* Start 3 servers,
* start 2 clients,
* start two OPTIMISTIC transactions with the same key from different client nodes,
* trying to transfer both to PREPARED state,
* stop one client node.
*/
@Test
public void testTxDoesntBecomePreparedAfterError() throws Exception {
backups = 2;
persistence = true;
syncMode = FULL_ASYNC;
final IgniteEx node0 = startGrids(3);
node0.cluster().state(ACTIVE);
final IgniteEx client1 = startGrid("client1");
final IgniteEx client2 = startGrid("client2");
awaitPartitionMapExchange();
final IgniteCache<Object, Object> cache = client1.cache(DEFAULT_CACHE_NAME);
final IgniteCache<Object, Object> cache2 = client2.cache(DEFAULT_CACHE_NAME);
final Integer pk = primaryKey(node0.cache(DEFAULT_CACHE_NAME));
CountDownLatch txPrepareLatch = new CountDownLatch(1);
GridTestUtils.runMultiThreadedAsync(() -> {
try (final Transaction tx = client1.transactions().withLabel("tx1").txStart(OPTIMISTIC, READ_COMMITTED, 5000, 1)) {
cache.put(pk, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
// To prevent tx rollback on exit from try-with-resource block, this should cause another tx timeout fail.
spi(client1).blockMessages((node, msg) -> msg instanceof GridNearTxFinishRequest);
log.info("Test, preparing tx: xid=" + tx.xid() + ", tx=" + tx);
// Doing only prepare to try to lock the key, commit is not needed here.
p.tx().prepareNearTxLocal();
p.tx().currentPrepareFuture().listen(fut -> txPrepareLatch.countDown());
} catch (Exception e) {
// No-op.
}
}, 1, "tx1-thread");
try (final Transaction tx = client2.transactions().withLabel("tx2").txStart(OPTIMISTIC, READ_COMMITTED, 5000, 1)) {
cache2.put(pk, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
log.info("Test, preparing tx: xid=" + tx.xid() + ", tx=" + tx);
p.tx().prepareNearTxLocal();
p.tx().currentPrepareFuture().listen(fut -> txPrepareLatch.countDown());
txPrepareLatch.await(6, TimeUnit.SECONDS);
if (txPrepareLatch.getCount() > 0)
fail("Failed to await for tx prepare.");
AtomicReference<GridDhtTxLocal> dhtTxLocRef = new AtomicReference<>();
assertTrue(waitForCondition(() -> {
dhtTxLocRef.set((GridDhtTxLocal) txs(node0).stream().filter(t -> t.state() == TransactionState.PREPARING).findFirst().orElse(null));
return dhtTxLocRef.get() != null;
}, 6_000));
assertNotNull(dhtTxLocRef.get());
UUID clientNodeToFail = dhtTxLocRef.get().eventNodeId();
GridDhtTxPrepareFuture prep = GridTestUtils.getFieldValue(dhtTxLocRef.get(), "prepFut");
prep.get();
List<IgniteInternalTx> txs = txs(node0);
String txsStr = txs.stream().map(Object::toString).collect(Collectors.joining(", "));
log.info("Transactions check point [count=" + txs.size() + ", txs=" + txsStr + "]");
if (clientNodeToFail.equals(client1.localNode().id()))
client1.close();
else if (clientNodeToFail.equals(client2.localNode().id()))
client2.close();
} catch (Exception e) {
log.error(e.getMessage(), e);
}
U.sleep(500);
assertEquals(3, grid(1).context().discovery().aliveServerNodes().size());
assertEquals(txs(client1).toString() + ", " + txs(client2).toString(), 1, txs(client1).size() + txs(client2).size());
}
Aggregations