use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest in project ignite by apache.
the class IgniteCacheClientMultiNodeUpdateTopologyLockTest method testPessimisticTx.
/**
* @throws Exception If failed.
*/
@Test
public void testPessimisticTx() throws Exception {
startGrids(3);
Ignite clientNode = startClientGrid(3);
IgniteCache<Integer, Integer> cache = clientNode.createCache(cacheConfiguration(0, FULL_SYNC));
awaitPartitionMapExchange();
Integer key1 = movingKeysAfterJoin(ignite(1), TEST_CACHE, 1).get(0);
Integer key2 = movingKeysAfterJoin(ignite(2), TEST_CACHE, 1).get(0);
log.info("Start tx [key1=" + key1 + ", key2=" + key2 + ']');
IgniteInternalFuture<?> startFut;
TestRecordingCommunicationSpi spi2 = TestRecordingCommunicationSpi.spi(ignite(2));
final TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(clientNode);
final UUID node0Id = ignite(0).cluster().localNode().id();
final UUID node2Id = ignite(2).cluster().localNode().id();
spi2.record(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (!node0Id.equals(node.id()))
return false;
return (msg instanceof GridDhtPartitionsSingleMessage) && ((GridDhtPartitionsSingleMessage) msg).exchangeId() != null;
}
});
clientSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(final ClusterNode node, final Message msg) {
if (!node2Id.equals(node.id()))
return false;
if (msg instanceof GridNearTxFinishRequest) {
log.info("Delay message [msg=" + msg + ']');
GridTestUtils.runAsync(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
log.info("Send delayed message [msg=" + msg + ']');
clientSpi.stopBlock(true);
}
});
return true;
}
return false;
}
});
try (Transaction tx = clientNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.put(key1, 1);
startFut = GridTestUtils.runAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
startGrid(4);
return null;
}
}, "start-thread");
spi2.waitForRecorded();
U.sleep(5);
cache.put(key2, 2);
log.info("Commit tx");
tx.commit();
}
assertEquals((Integer) 1, cache.get(key1));
assertEquals((Integer) 2, cache.get(key2));
startFut.get();
assertEquals((Integer) 1, cache.get(key1));
assertEquals((Integer) 2, cache.get(key2));
awaitPartitionMapExchange();
assertEquals((Integer) 1, cache.get(key1));
assertEquals((Integer) 2, cache.get(key2));
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest in project ignite by apache.
the class IgniteTxCachePrimarySyncTest method singleKeyCommit.
/**
* @param client Node executing cache operation.
* @param ccfg Cache configuration.
* @param c Cache update closure.
* @throws Exception If failed.
*/
private void singleKeyCommit(Ignite client, final CacheConfiguration<Object, Object> ccfg, IgniteBiInClosure<Integer, IgniteCache<Object, Object>> c) throws Exception {
Ignite ignite = ignite(0);
assertNotSame(ignite, client);
TestRecordingCommunicationSpi commSpiClient = (TestRecordingCommunicationSpi) client.configuration().getCommunicationSpi();
TestRecordingCommunicationSpi commSpi0 = (TestRecordingCommunicationSpi) ignite.configuration().getCommunicationSpi();
IgniteCache<Object, Object> cache = ignite.cache(ccfg.getName());
final Integer key = primaryKey(cache);
cache.remove(key);
waitKeyRemoved(ccfg.getName(), key);
IgniteCache<Object, Object> clientCache = client.cache(ccfg.getName());
commSpiClient.record(GridNearTxFinishRequest.class);
commSpi0.record(GridDhtTxFinishRequest.class);
commSpi0.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
return msg instanceof GridDhtTxFinishRequest;
}
});
c.apply(key, clientCache);
assertEquals(key, cache.localPeek(key));
U.sleep(50);
boolean nearCache = ((IgniteCacheProxy) clientCache).context().isNear();
for (int i = 1; i < NODES; i++) {
Ignite node = ignite(i);
if (nearCache && node == client && !node.affinity(ccfg.getName()).isPrimaryOrBackup(node.cluster().localNode(), key))
assertEquals("Invalid value for node: " + i, key, ignite(i).cache(DEFAULT_CACHE_NAME).localPeek(key));
else
assertNull("Invalid value for node: " + i, ignite(i).cache(DEFAULT_CACHE_NAME).localPeek(key));
}
commSpi0.stopBlock(true);
waitKeyUpdated(ignite, ccfg.getBackups() + 1, ccfg.getName(), key);
List<Object> msgs = commSpiClient.recordedMessages(true);
assertEquals(1, msgs.size());
GridNearTxFinishRequest req = (GridNearTxFinishRequest) msgs.get(0);
assertEquals(PRIMARY_SYNC, req.syncMode());
msgs = commSpi0.recordedMessages(true);
assertEquals(ccfg.getBackups(), msgs.size());
clientCache.remove(key);
waitKeyRemoved(ccfg.getName(), key);
c.apply(key, clientCache);
waitKeyUpdated(ignite, ccfg.getBackups() + 1, ccfg.getName(), key);
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest in project ignite by apache.
the class TxPartitionCounterStateAbstractTest method runOnPartition.
/**
* Runs a scenario.
*
* @param partId Partition id.
* @param part2Sup Optional second partition supplier.
* @param nodesCnt Nodes count.
* @param clo Callback closure which produces {@link TxCallback}.
* @param sizes Sizes.
*/
protected Map<Integer, T2<Ignite, List<Ignite>>> runOnPartition(int partId, @Nullable Supplier<Integer> part2Sup, int backups, int nodesCnt, IgniteClosure<Map<Integer, T2<Ignite, List<Ignite>>>, TxCallback> clo, int[] sizes) throws Exception {
this.backups = backups;
IgniteEx crd = startGrids(nodesCnt);
crd.cluster().active(true);
configureBaselineAutoAdjust();
assertEquals(0, crd.cache(DEFAULT_CACHE_NAME).size());
int[][] ranges = new int[sizes.length][2];
int totalKeys = 0;
for (int i = 0; i < sizes.length; i++) {
int size = sizes[i];
ranges[i] = new int[] { totalKeys, size };
totalKeys += size;
}
IgniteEx client = startClientGrid(CLIENT_GRID_NAME);
// Preload one key to partition to enable historical rebalance.
List<Integer> preloadKeys = loadDataToPartition(partId, "client", DEFAULT_CACHE_NAME, PRELOAD_KEYS_CNT, 0);
forceCheckpoint();
assertPartitionsSame(idleVerify(client, DEFAULT_CACHE_NAME));
List<Integer> keys = partitionKeys(crd.cache(DEFAULT_CACHE_NAME), partId, totalKeys, PRELOAD_KEYS_CNT);
assertFalse(preloadKeys.get(0).equals(keys.get(0)));
Ignite prim = primaryNode(keys.get(0), DEFAULT_CACHE_NAME);
List<Ignite> backupz = backups == 0 ? null : backups == 1 ? Collections.singletonList(backupNode(keys.get(0), DEFAULT_CACHE_NAME)) : backupNodes(keys.get(0), DEFAULT_CACHE_NAME);
final TestRecordingCommunicationSpi clientWrappedSpi = spi(client);
Map<IgniteUuid, GridCacheVersion> futMap = new ConcurrentHashMap<>();
Map<GridCacheVersion, GridCacheVersion> nearToLocVerMap = new ConcurrentHashMap<>();
Map<Integer, T2<Ignite, List<Ignite>>> txTop = new HashMap<>();
txTop.put(partId, new T2<>(prim, backupz));
List<Integer> keysPart2 = part2Sup == null ? null : partitionKeys(crd.cache(DEFAULT_CACHE_NAME), part2Sup.get(), sizes.length, 0);
log.info("TX: topology [part1=" + partId + ", primary=" + prim.name() + ", backups=" + F.transform(backupz, Ignite::name));
if (part2Sup != null) {
int partId2 = part2Sup.get();
Ignite prim2 = primaryNode(keysPart2.get(0), DEFAULT_CACHE_NAME);
assertNotSame(prim, prim2);
List<Ignite> backupz2 = backupNodes(keysPart2.get(0), DEFAULT_CACHE_NAME);
txTop.put(partId2, new T2<>(prim2, backupz2));
log.info("TX: topology [part2=" + partId2 + ", primary=" + prim2.name() + ", backups=" + F.transform(backupz2, Ignite::name));
}
TxCallback cb = clo.apply(txTop);
clientWrappedSpi.blockMessages((node, msg) -> {
if (msg instanceof GridNearTxPrepareRequest) {
IgniteEx to = IgnitionEx.gridxx(node.id());
GridNearTxPrepareRequest req = (GridNearTxPrepareRequest) msg;
if (!req.last())
return false;
futMap.put(req.futureId(), req.version());
return cb.beforePrimaryPrepare(to, req.version().asIgniteUuid(), createSendFuture(clientWrappedSpi, msg));
} else if (msg instanceof GridNearTxFinishRequest) {
IgniteEx to = IgnitionEx.gridxx(node.id());
GridNearTxFinishRequest req = (GridNearTxFinishRequest) msg;
futMap.put(req.futureId(), req.version());
IgniteInternalTx tx = findTx(to, req.version(), true);
assertNotNull(tx);
return cb.beforePrimaryFinish(to, tx, createSendFuture(clientWrappedSpi, msg));
}
return false;
});
spi(prim).blockMessages(createPrimaryMessagePredicate(spi(prim), futMap, nearToLocVerMap, cb));
if (part2Sup != null) {
Ignite prim2 = txTop.get(part2Sup.get()).get1();
spi(prim2).blockMessages(createPrimaryMessagePredicate(spi(prim2), futMap, nearToLocVerMap, cb));
}
if (backupz != null) {
for (Ignite backup : backupz) spi(backup).blockMessages(createBackupMessagePredicate(spi(backup), futMap, cb));
if (part2Sup != null) {
for (Ignite backup : txTop.get(part2Sup.get()).get2()) spi(backup).blockMessages(createBackupMessagePredicate(spi(backup), futMap, cb));
}
}
assertNotNull(client.cache(DEFAULT_CACHE_NAME));
AtomicInteger idx = new AtomicInteger();
CyclicBarrier b = new CyclicBarrier(sizes.length);
IgniteInternalFuture<Long> fut = runMultiThreadedAsync(() -> {
int txIdx = idx.getAndIncrement();
int[] range = ranges[txIdx];
String lb = "t" + txIdx;
try (Transaction tx = client.transactions().withLabel(lb).txStart()) {
cb.onTxStart(tx, txIdx);
// Wait should always success.
U.awaitQuiet(b);
for (Integer key : keys.subList(range[0], range[0] + range[1])) client.cache(DEFAULT_CACHE_NAME).put(key, 0);
if (keysPart2 != null) {
// Force 2PC.
client.cache(DEFAULT_CACHE_NAME).put(keysPart2.get(txIdx), 0);
}
tx.commit();
} catch (Throwable ignored) {
// No-op.
}
}, sizes.length, "tx-thread");
try {
// TODO verify all created futures.
fut.get(TEST_TIMEOUT);
} catch (IgniteCheckedException e) {
Throwable err = testFailed.get();
if (err != null)
log.error("Test execution failed", err);
fail("Test is timed out");
}
return txTop;
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest in project ignite by apache.
the class TxRecoveryWithConcurrentRollbackTest method testTxDoesntBecomePreparedAfterError.
/**
* Start 3 servers,
* start 2 clients,
* start two OPTIMISTIC transactions with the same key from different client nodes,
* trying to transfer both to PREPARED state,
* stop one client node.
*/
@Test
public void testTxDoesntBecomePreparedAfterError() throws Exception {
backups = 2;
persistence = true;
syncMode = FULL_ASYNC;
final IgniteEx node0 = startGrids(3);
node0.cluster().state(ACTIVE);
final IgniteEx client1 = startGrid("client1");
final IgniteEx client2 = startGrid("client2");
awaitPartitionMapExchange();
final IgniteCache<Object, Object> cache = client1.cache(DEFAULT_CACHE_NAME);
final IgniteCache<Object, Object> cache2 = client2.cache(DEFAULT_CACHE_NAME);
final Integer pk = primaryKey(node0.cache(DEFAULT_CACHE_NAME));
CountDownLatch txPrepareLatch = new CountDownLatch(1);
GridTestUtils.runMultiThreadedAsync(() -> {
try (final Transaction tx = client1.transactions().withLabel("tx1").txStart(OPTIMISTIC, READ_COMMITTED, 5000, 1)) {
cache.put(pk, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
// To prevent tx rollback on exit from try-with-resource block, this should cause another tx timeout fail.
spi(client1).blockMessages((node, msg) -> msg instanceof GridNearTxFinishRequest);
log.info("Test, preparing tx: xid=" + tx.xid() + ", tx=" + tx);
// Doing only prepare to try to lock the key, commit is not needed here.
p.tx().prepareNearTxLocal();
p.tx().currentPrepareFuture().listen(fut -> txPrepareLatch.countDown());
} catch (Exception e) {
// No-op.
}
}, 1, "tx1-thread");
try (final Transaction tx = client2.transactions().withLabel("tx2").txStart(OPTIMISTIC, READ_COMMITTED, 5000, 1)) {
cache2.put(pk, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
log.info("Test, preparing tx: xid=" + tx.xid() + ", tx=" + tx);
p.tx().prepareNearTxLocal();
p.tx().currentPrepareFuture().listen(fut -> txPrepareLatch.countDown());
txPrepareLatch.await(6, TimeUnit.SECONDS);
if (txPrepareLatch.getCount() > 0)
fail("Failed to await for tx prepare.");
AtomicReference<GridDhtTxLocal> dhtTxLocRef = new AtomicReference<>();
assertTrue(waitForCondition(() -> {
dhtTxLocRef.set((GridDhtTxLocal) txs(node0).stream().filter(t -> t.state() == TransactionState.PREPARING).findFirst().orElse(null));
return dhtTxLocRef.get() != null;
}, 6_000));
assertNotNull(dhtTxLocRef.get());
UUID clientNodeToFail = dhtTxLocRef.get().eventNodeId();
GridDhtTxPrepareFuture prep = GridTestUtils.getFieldValue(dhtTxLocRef.get(), "prepFut");
prep.get();
List<IgniteInternalTx> txs = txs(node0);
String txsStr = txs.stream().map(Object::toString).collect(Collectors.joining(", "));
log.info("Transactions check point [count=" + txs.size() + ", txs=" + txsStr + "]");
if (clientNodeToFail.equals(client1.localNode().id()))
client1.close();
else if (clientNodeToFail.equals(client2.localNode().id()))
client2.close();
} catch (Exception e) {
log.error(e.getMessage(), e);
}
U.sleep(500);
assertEquals(3, grid(1).context().discovery().aliveServerNodes().size());
assertEquals(txs(client1).toString() + ", " + txs(client2).toString(), 1, txs(client1).size() + txs(client2).size());
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest in project ignite by apache.
the class TxRollbackAsyncTest method testRollbackDelayFinishRequest.
/**
* Tests rollback with concurrent commit.
*/
@Test
public void testRollbackDelayFinishRequest() throws Exception {
final Ignite client = startClient();
final Ignite prim = primaryNode(0, CACHE_NAME);
final TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) client.configuration().getCommunicationSpi();
final AtomicReference<Transaction> txRef = new AtomicReference<>();
// Block commit request to primary node.
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridNearTxFinishRequest) {
GridNearTxFinishRequest r = (GridNearTxFinishRequest) msg;
return r.commit() && node.equals(prim.cluster().localNode());
}
return false;
}
});
final IgniteInternalFuture<Void> rollbackFut = runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
spi.waitForBlocked();
final IgniteFuture<?> fut = txRef.get().rollbackAsync();
doSleep(500);
spi.stopBlock(true);
fut.get();
return null;
}
}, "tx-rollback-thread");
try (final Transaction tx = client.transactions().txStart()) {
txRef.set(tx);
client.cache(CACHE_NAME).put(0, 0);
tx.commit();
} catch (CacheException e) {
assertTrue(X.hasCause(e, TransactionRollbackException.class));
}
rollbackFut.get();
doSleep(500);
checkFutures();
}
Aggregations