use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest in project ignite by apache.
the class TxPartitionCounterStateAbstractTest method createPrimaryMessagePredicate.
/**
* @param wrappedPrimSpi Wrapped prim spi.
* @param futMap Future map.
* @param nearToLocVerMap Near to local version map.
* @param cb Callback.
*/
private IgniteBiPredicate<ClusterNode, Message> createPrimaryMessagePredicate(TestRecordingCommunicationSpi wrappedPrimSpi, Map<IgniteUuid, GridCacheVersion> futMap, Map<GridCacheVersion, GridCacheVersion> nearToLocVerMap, TxCallback cb) {
return (node, msg) -> {
if (msg instanceof GridDhtTxPrepareRequest) {
IgniteEx to = IgnitionEx.gridxx(node.id());
GridDhtTxPrepareRequest req = (GridDhtTxPrepareRequest) msg;
if (!req.last())
return false;
futMap.put(req.futureId(), req.nearXidVersion());
nearToLocVerMap.put(req.version(), req.nearXidVersion());
IgniteEx from = fromNode(wrappedPrimSpi);
IgniteInternalTx primTx = findTx(from, req.nearXidVersion(), true);
return cb.beforeBackupPrepare(from, to, primTx, createSendFuture(wrappedPrimSpi, msg));
} else if (msg instanceof GridDhtTxFinishRequest) {
IgniteEx to = IgnitionEx.gridxx(node.id());
GridDhtTxFinishRequest req = (GridDhtTxFinishRequest) msg;
GridCacheVersion nearVer = nearToLocVerMap.get(req.version());
futMap.put(req.futureId(), nearVer);
IgniteEx from = fromNode(wrappedPrimSpi);
IgniteInternalTx primTx = findTx(from, nearVer, true);
IgniteInternalTx backupTx = findTx(to, nearVer, false);
return cb.beforeBackupFinish(from, to, primTx, backupTx, nearVer.asIgniteUuid(), createSendFuture(wrappedPrimSpi, msg));
} else if (msg instanceof GridNearTxPrepareResponse) {
GridNearTxPrepareResponse resp = (GridNearTxPrepareResponse) msg;
IgniteEx from = fromNode(wrappedPrimSpi);
GridCacheVersion ver = futMap.get(resp.futureId());
IgniteInternalTx primTx = findTx(from, ver, true);
return cb.afterPrimaryPrepare(from, primTx, ver.asIgniteUuid(), createSendFuture(wrappedPrimSpi, msg));
} else if (msg instanceof GridNearTxFinishResponse) {
IgniteEx to = IgnitionEx.gridxx(node.id());
GridNearTxFinishResponse req = (GridNearTxFinishResponse) msg;
IgniteEx from = fromNode(wrappedPrimSpi);
IgniteUuid nearVer = futMap.get(req.futureId()).asIgniteUuid();
return cb.afterPrimaryFinish(from, nearVer, createSendFuture(wrappedPrimSpi, msg));
}
return false;
};
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapsClosed.
/**
* @throws Exception If failed.
*/
private void checkUpdateCountersGapsClosed(CacheMode cacheMode) throws Exception {
testSpi = true;
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
IgniteEx nearNode = grid(srvCnt - 1);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, srvCnt - 1, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
Affinity<Object> aff = nearNode.affinity(cache.getName());
int[] nearBackupParts = aff.backupPartitions(nearNode.localNode());
int[] primaryParts = aff.primaryPartitions(primary.localNode());
Collection<Integer> nearSet = new HashSet<>();
for (int part : nearBackupParts) nearSet.add(part);
Collection<Integer> primarySet = new HashSet<>();
for (int part : primaryParts) primarySet.add(part);
// We need backup partitions on the near node.
nearSet.retainAll(primarySet);
List<Integer> keys = singlePartKeys(primary.cache(DEFAULT_CACHE_NAME), 20, nearSet.iterator().next());
int range = 3;
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(range * 2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
private final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < srvCnt - 1;
return false;
}
});
Transaction txA = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
for (int i = 0; i < range; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 2);
txA.commitAsync();
GridTestUtils.runAsync(() -> {
try (Transaction tx = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range; i < range * 2; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 1);
tx.commit();
}
}).get();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return primary.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
}
}, 3_000);
GridTestUtils.runAsync(() -> {
try (Transaction txB = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range * 2; i < range * 3; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 3);
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
assertEquals(range * 3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(range * 2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(5, SECONDS));
assertEquals(range * 2, arrivedEvts.size());
cur.close();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest in project ignite by apache.
the class CacheMvccTxRecoveryTest method testUpdateCountersGapIsClosed.
/**
* @throws Exception if failed.
*/
@Test
public void testUpdateCountersGapIsClosed() throws Exception {
int srvCnt = 3;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx ign = startGrid(srvCnt);
IgniteCache<Object, Object> cache = ign.getOrCreateCache(basicCcfg().setBackups(2));
int vid = 1;
IgniteEx victim = grid(vid);
ArrayList<Integer> keys = new ArrayList<>();
Integer part = null;
Affinity<Object> aff = ign.affinity(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
int p = aff.partition(i);
if (aff.isPrimary(victim.localNode(), i)) {
if (part == null)
part = p;
if (p == part)
keys.add(i);
if (keys.size() == 2)
break;
}
}
assert keys.size() == 2;
Transaction txA = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
// prevent first transaction prepare on backups
((TestRecordingCommunicationSpi) victim.configuration().getCommunicationSpi()).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < 2;
return false;
}
});
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0)));
txA.commitAsync();
GridCacheVersion aXidVer = ((TransactionProxyImpl) txA).tx().xidVersion();
assertConditionEventually(() -> txsOnNode(victim, aXidVer).stream().anyMatch(tx -> tx.state() == PREPARING));
GridTestUtils.runAsync(() -> {
try (Transaction txB = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1)));
txB.commit();
}
}).get();
long victimUpdCntr = updateCounter(victim.cachex(DEFAULT_CACHE_NAME).context(), keys.get(0));
List<IgniteEx> backupNodes = grids(srvCnt, i -> i != vid);
List<IgniteInternalTx> backupTxsA = backupNodes.stream().map(node -> txsOnNode(node, aXidVer)).flatMap(Collection::stream).collect(Collectors.toList());
// drop primary
victim.close();
assertConditionEventually(() -> backupTxsA.stream().allMatch(tx -> tx.state() == ROLLED_BACK));
backupNodes.stream().map(node -> node.cache(DEFAULT_CACHE_NAME)).forEach(c -> {
assertEquals(1, c.query(new SqlFieldsQuery("select * from Integer")).getAll().size());
});
backupNodes.forEach(node -> {
for (Integer k : keys) assertEquals(victimUpdCntr, updateCounter(node.cachex(DEFAULT_CACHE_NAME).context(), k));
});
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest in project ignite by apache.
the class GridExchangeFreeCellularSwitchTxCountersTest method blockPrepareMessages.
/**
*/
protected void blockPrepareMessages(Ignite igniteTo, CountDownLatch prepMsgLatch, AtomicInteger blockedMsgCnt) {
for (Ignite ignite : G.allGrids()) {
TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite.configuration().getCommunicationSpi();
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest) {
IgniteEx to = IgnitionEx.gridxx(node.id());
assert prepMsgLatch.getCount() > 0;
boolean block = to.equals(igniteTo);
if (block)
blockedMsgCnt.incrementAndGet();
prepMsgLatch.countDown();
return block;
}
return false;
}
});
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapIsProcessedSimple.
/**
* @throws Exception if failed.
*/
private void checkUpdateCountersGapIsProcessedSimple(CacheMode cacheMode) throws Exception {
testSpi = true;
final int srvCnt = 4;
final int backups = srvCnt - 1;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx nearNode = startGrid(srvCnt);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, backups, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
List<Integer> keys = primaryKeys(primary.cache(DEFAULT_CACHE_NAME), 3);
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// Initial value.
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0))).getAll();
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
final AtomicInteger dhtPrepMsgLimiter = new AtomicInteger();
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return dhtPrepMsgLimiter.getAndIncrement() < backups;
if (msg instanceof GridContinuousMessage)
return true;
return false;
}
});
// First tx. Expect it will be prepared only on the primary node and GridDhtTxPrepareRequests to remotes
// will be swallowed.
Transaction txA = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1))).getAll();
txA.commitAsync();
// Wait until first tx changes it's status to PREPARING.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean preparing = nearNode.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
boolean allPrepsSwallowed = dhtPrepMsgLimiter.get() == backups;
return preparing && allPrepsSwallowed;
}
}, 3_000);
// Second tx.
GridTestUtils.runAsync(() -> {
try (Transaction txB = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(2)));
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
// There were three updates: init, first and second.
assertEquals(3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(3, SECONDS));
assertEquals(2, arrivedEvts.size());
assertEquals(keys.get(0), arrivedEvts.get(0).getKey());
assertEquals(keys.get(2), arrivedEvts.get(1).getKey());
cur.close();
nearNode.close();
}
Aggregations