use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class GridCommandHandlerTest method testKillHangingRemoteTransactions.
/**
* Simulate uncommitted backup transactions and test rolling back using utility.
*/
@Test
public void testKillHangingRemoteTransactions() throws Exception {
final int cnt = 3;
startGridsMultiThreaded(cnt);
Ignite[] clients = new Ignite[] { startGrid("client1"), startGrid("client2"), startGrid("client3"), startGrid("client4") };
clients[0].getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setBackups(2).setAtomicityMode(TRANSACTIONAL).setWriteSynchronizationMode(FULL_SYNC).setAffinity(new RendezvousAffinityFunction(false, 64)));
awaitPartitionMapExchange();
for (Ignite client : clients) {
assertTrue(client.configuration().isClientMode());
assertNotNull(client.cache(DEFAULT_CACHE_NAME));
}
LongAdder progress = new LongAdder();
AtomicInteger idx = new AtomicInteger();
int tc = clients.length;
CountDownLatch lockLatch = new CountDownLatch(1);
CountDownLatch commitLatch = new CountDownLatch(1);
Ignite prim = primaryNode(0L, DEFAULT_CACHE_NAME);
TestRecordingCommunicationSpi primSpi = TestRecordingCommunicationSpi.spi(prim);
primSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message message) {
return message instanceof GridDhtTxFinishRequest;
}
});
Set<IgniteUuid> xidSet = new GridConcurrentHashSet<>();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
int id = idx.getAndIncrement();
Ignite client = clients[id];
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED, 0, 1)) {
xidSet.add(tx.xid());
IgniteCache<Long, Long> cache = client.cache(DEFAULT_CACHE_NAME);
if (id != 0)
U.awaitQuiet(lockLatch);
cache.invoke(0L, new IncrementClosure(), null);
if (id == 0) {
lockLatch.countDown();
U.awaitQuiet(commitLatch);
// Wait until candidates will enqueue.
doSleep(500);
}
tx.commit();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
progress.increment();
}
}, tc, "invoke-thread");
U.awaitQuiet(lockLatch);
commitLatch.countDown();
primSpi.waitForBlocked(clients.length);
// Unblock only finish messages from clients from 2 to 4.
primSpi.stopBlock(true, blockedMsg -> {
GridIoMessage iom = blockedMsg.ioMessage();
Message m = iom.message();
if (m instanceof GridDhtTxFinishRequest) {
GridDhtTxFinishRequest r = (GridDhtTxFinishRequest) m;
return !r.nearNodeId().equals(clients[0].cluster().localNode().id());
}
return true;
});
// Wait until queue is stable
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteInternalTx tx : txs) if (!tx.local()) {
IgniteTxEntry entry = tx.writeEntries().iterator().next();
GridCacheEntryEx cached = entry.cached();
Collection<GridCacheMvccCandidate> candidates = cached.remoteMvccSnapshot();
if (candidates.size() != clients.length)
return false;
}
return true;
}
}, 10_000);
}
CommandHandler h = new CommandHandler();
// Check listing.
validate(h, map -> {
for (int i = 0; i < cnt; i++) {
IgniteEx grid = grid(i);
// Skip primary.
if (grid.localNode().id().equals(prim.cluster().localNode().id()))
continue;
VisorTxTaskResult res = map.get(grid.localNode());
List<VisorTxInfo> infos = res.getInfos().stream().filter(info -> xidSet.contains(info.getNearXid())).collect(Collectors.toList());
// Validate queue length on backups.
assertEquals(clients.length, infos.size());
}
}, "--tx");
// Check kill.
validate(h, map -> {
// No-op.
}, "--tx", "--kill");
// Wait for all remote txs to finish.
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
for (IgniteInternalTx tx : txs) if (!tx.local())
tx.finishFuture().get();
}
// Unblock finish message from client1.
primSpi.stopBlock(true);
fut.get();
Long cur = (Long) clients[0].cache(DEFAULT_CACHE_NAME).get(0L);
assertEquals(tc - 1, cur.longValue());
checkUserFutures();
}
use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method testUpdateExplicitPartitionsWithoutReducer.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateExplicitPartitionsWithoutReducer() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, 10).setIndexedTypes(Integer.class, Integer.class);
Ignite ignite = startGridsMultiThreaded(4);
awaitPartitionMapExchange();
IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
Affinity<Object> affinity = internalCache0(cache).affinity();
int keysCnt = 10, retryCnt = 0;
Integer test = 0;
Map<Integer, Integer> vals = new LinkedHashMap<>();
while (vals.size() < keysCnt) {
int partition = affinity.partition(test);
if (partition == 1 || partition == 2)
vals.put(test, 0);
else
assertTrue("Maximum retry number exceeded", ++retryCnt < 1000);
test++;
}
cache.putAll(vals);
SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer set _val=2").setPartitions(1, 2);
List<List<?>> all = cache.query(qry).getAll();
assertEquals(Long.valueOf(keysCnt), all.stream().findFirst().orElseThrow(AssertionError::new).get(0));
List<List<?>> rows = cache.query(new SqlFieldsQuery("SELECT _val FROM Integer")).getAll();
assertEquals(keysCnt, rows.size());
assertTrue(rows.stream().map(r -> r.get(0)).map(Integer.class::cast).allMatch(v -> v == 2));
}
use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapIsProcessedSimple.
/**
* @throws Exception if failed.
*/
private void checkUpdateCountersGapIsProcessedSimple(CacheMode cacheMode) throws Exception {
testSpi = true;
final int srvCnt = 4;
final int backups = srvCnt - 1;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx nearNode = startGrid(srvCnt);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, backups, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
List<Integer> keys = primaryKeys(primary.cache(DEFAULT_CACHE_NAME), 3);
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// Initial value.
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0))).getAll();
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
final AtomicInteger dhtPrepMsgLimiter = new AtomicInteger();
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return dhtPrepMsgLimiter.getAndIncrement() < backups;
if (msg instanceof GridContinuousMessage)
return true;
return false;
}
});
// First tx. Expect it will be prepared only on the primary node and GridDhtTxPrepareRequests to remotes
// will be swallowed.
Transaction txA = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1))).getAll();
txA.commitAsync();
// Wait until first tx changes it's status to PREPARING.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean preparing = nearNode.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
boolean allPrepsSwallowed = dhtPrepMsgLimiter.get() == backups;
return preparing && allPrepsSwallowed;
}
}, 3_000);
// Second tx.
GridTestUtils.runAsync(() -> {
try (Transaction txB = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(2)));
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
// There were three updates: init, first and second.
assertEquals(3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(3, SECONDS));
assertEquals(2, arrivedEvts.size());
assertEquals(keys.get(0), arrivedEvts.get(0).getKey());
assertEquals(keys.get(2), arrivedEvts.get(1).getKey());
cur.close();
nearNode.close();
}
Aggregations