use of org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC in project ignite by apache.
the class OpenCensusTxTracingConfigurationTest method testTxTraceDoesNotIncludeCommunicationTracesInCaseOfEmptyIncludedScopes.
/**
* Ensure that TX traces doesn't include COMMUNICATION sub-traces in case of empty set of included scopes.
*
* @throws Exception If Failed.
*/
@Test
public void testTxTraceDoesNotIncludeCommunicationTracesInCaseOfEmptyIncludedScopes() throws Exception {
IgniteEx client = startGrid("client");
client.tracingConfiguration().set(new TracingConfigurationCoordinates.Builder(TX).build(), new TracingConfigurationParameters.Builder().withSamplingRate(SAMPLING_RATE_ALWAYS).build());
Transaction tx = client.transactions().txStart(PESSIMISTIC, SERIALIZABLE);
client.cache(DEFAULT_CACHE_NAME).put(1, 1);
tx.commit();
handler().flush();
SpanId parentSpanId = handler().allSpans().filter(span -> SpanType.TX_NEAR_PREPARE.spanName().equals(span.getName())).collect(Collectors.toList()).get(0).getContext().getSpanId();
java.util.List<SpanData> gotSpans = handler().allSpans().filter(span -> parentSpanId.equals(span.getParentSpanId()) && SpanType.COMMUNICATION_SOCKET_WRITE.spanName().equals(span.getName())).collect(Collectors.toList());
assertTrue(gotSpans.isEmpty());
}
use of org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC in project ignite by apache.
the class OpenCensusTxTracingConfigurationTest method testTxConfigurationSamplingRateHalfSamplesSomethingAboutHalfTransactions.
/**
* Ensure that specifying 0 < sapling rate < 1 within TX scope will trace some but not all transactions.
* Cause of probability nature of sampling, it's not possible to check that 0.5 sampling rate
* will result in exactly half of the transactions being traced.
*
* @throws Exception If Failed.
*/
@Test
public void testTxConfigurationSamplingRateHalfSamplesSomethingAboutHalfTransactions() throws Exception {
IgniteEx client = startGrid("client");
client.tracingConfiguration().set(new TracingConfigurationCoordinates.Builder(TX).build(), new TracingConfigurationParameters.Builder().withSamplingRate(0.5).build());
final int txAmount = 100;
for (int i = 0; i < txAmount; i++) client.transactions().txStart(PESSIMISTIC, SERIALIZABLE).commit();
handler().flush();
java.util.List<SpanData> gotSpans = handler().allSpans().filter(span -> SpanType.TX.spanName().equals(span.getName())).collect(Collectors.toList());
// Cause of probability nature of sampling, it's not possible to check that 0.5 sampling rate will end with
// 5 sampling transactions out of {@code txAmount},
// so we just check that some and not all transactions were traced.
assertTrue(!gotSpans.isEmpty() && gotSpans.size() < txAmount);
}
use of org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC in project ignite by apache.
the class CacheMvccTxRecoveryTest method checkRecoveryNearFailure.
/**
*/
private void checkRecoveryNearFailure(TxEndResult endRes, NodeMode nearNodeMode) throws Exception {
int gridCnt = 4;
int baseCnt = gridCnt - 1;
boolean commit = endRes == COMMIT;
startGridsMultiThreaded(baseCnt);
// tweak client/server near
client = nearNodeMode == CLIENT;
IgniteEx nearNode = startGrid(baseCnt);
IgniteCache<Object, Object> cache = nearNode.getOrCreateCache(basicCcfg().setBackups(1));
Affinity<Object> aff = nearNode.affinity(DEFAULT_CACHE_NAME);
List<Integer> keys = new ArrayList<>();
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(0).localNode(), i) && aff.isBackup(grid(1).localNode(), i)) {
keys.add(i);
break;
}
}
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(grid(1).localNode(), i) && aff.isBackup(grid(2).localNode(), i)) {
keys.add(i);
break;
}
}
assert keys.size() == 2;
TestRecordingCommunicationSpi nearComm = (TestRecordingCommunicationSpi) nearNode.configuration().getCommunicationSpi();
if (!commit)
nearComm.blockMessages(GridNearTxPrepareRequest.class, grid(1).name());
GridTestUtils.runAsync(() -> {
// run in separate thread to exclude tx from thread-local map
GridNearTxLocal nearTx = ((TransactionProxyImpl) nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)).tx();
for (Integer k : keys) cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(k));
List<IgniteInternalTx> txs = IntStream.range(0, baseCnt).mapToObj(i -> txsOnNode(grid(i), nearTx.xidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
IgniteInternalFuture<?> prepareFut = nearTx.prepareNearTxLocal();
if (commit)
prepareFut.get();
else
assertConditionEventually(() -> txs.stream().anyMatch(tx -> tx.state() == PREPARED));
// drop near
nearNode.close();
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == (commit ? COMMITTED : ROLLED_BACK)));
return null;
}).get();
if (commit) {
assertConditionEventually(() -> {
int rowsCnt = grid(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
return rowsCnt == keys.size();
});
} else {
int rowsCnt = G.allGrids().get(0).cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll().size();
assertEquals(0, rowsCnt);
}
assertPartitionCountersAreConsistent(keys, grids(baseCnt, i -> true));
}
use of org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC in project ignite by apache.
the class CacheMvccTxRecoveryTest method testCountersNeighborcastServerFailed.
/**
* @throws Exception if failed.
*/
@Test
public void testCountersNeighborcastServerFailed() throws Exception {
// Reopen https://issues.apache.org/jira/browse/IGNITE-10766 if starts failing
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx ign = startGrid(srvCnt);
IgniteCache<Object, Object> cache = ign.getOrCreateCache(basicCcfg().setBackups(2));
ArrayList<Integer> keys = new ArrayList<>();
int vid = 3;
IgniteEx victim = grid(vid);
Affinity<Object> aff = ign.affinity(DEFAULT_CACHE_NAME);
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(victim.localNode(), i) && !aff.isBackup(grid(0).localNode(), i)) {
keys.add(i);
break;
}
}
for (int i = 0; i < 100; i++) {
if (aff.isPrimary(victim.localNode(), i) && !aff.isBackup(grid(1).localNode(), i)) {
keys.add(i);
break;
}
}
assert keys.size() == 2 && !keys.contains(99);
// prevent prepare on one backup
((TestRecordingCommunicationSpi) victim.configuration().getCommunicationSpi()).blockMessages(GridDhtTxPrepareRequest.class, grid(0).name());
GridNearTxLocal nearTx = ((TransactionProxyImpl) ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)).tx();
for (Integer k : keys) cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(k));
List<IgniteInternalTx> txs = IntStream.range(0, srvCnt).mapToObj(this::grid).filter(g -> g != victim).map(g -> txsOnNode(g, nearTx.xidVersion())).flatMap(Collection::stream).collect(Collectors.toList());
nearTx.commitAsync();
// await tx partially prepared
assertConditionEventually(() -> txs.stream().anyMatch(tx -> tx.state() == PREPARED));
CountDownLatch latch1 = new CountDownLatch(1);
CountDownLatch latch2 = new CountDownLatch(1);
IgniteInternalFuture<Object> backgroundTxFut = GridTestUtils.runAsync(() -> {
try (Transaction ignored = ign.transactions().txStart()) {
boolean upd = false;
for (int i = 100; i < 200; i++) {
if (!aff.isPrimary(victim.localNode(), i)) {
cache.put(i, 11);
upd = true;
break;
}
}
assert upd;
latch1.countDown();
latch2.await(getTestTimeout(), TimeUnit.MILLISECONDS);
}
return null;
});
latch1.await(getTestTimeout(), TimeUnit.MILLISECONDS);
// drop primary
victim.close();
// do all assertions before rebalance
assertConditionEventually(() -> txs.stream().allMatch(tx -> tx.state() == ROLLED_BACK));
List<IgniteEx> liveNodes = grids(srvCnt, i -> i != vid);
assertPartitionCountersAreConsistent(keys, liveNodes);
latch2.countDown();
backgroundTxFut.get(getTestTimeout());
assertTrue(liveNodes.stream().map(node -> node.cache(DEFAULT_CACHE_NAME).query(new SqlFieldsQuery("select * from Integer")).getAll()).allMatch(Collection::isEmpty));
}
use of org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapIsProcessedSimple.
/**
* @throws Exception if failed.
*/
private void checkUpdateCountersGapIsProcessedSimple(CacheMode cacheMode) throws Exception {
testSpi = true;
final int srvCnt = 4;
final int backups = srvCnt - 1;
startGridsMultiThreaded(srvCnt);
client = true;
IgniteEx nearNode = startGrid(srvCnt);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, backups, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
List<Integer> keys = primaryKeys(primary.cache(DEFAULT_CACHE_NAME), 3);
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// Initial value.
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(0))).getAll();
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
final AtomicInteger dhtPrepMsgLimiter = new AtomicInteger();
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return dhtPrepMsgLimiter.getAndIncrement() < backups;
if (msg instanceof GridContinuousMessage)
return true;
return false;
}
});
// First tx. Expect it will be prepared only on the primary node and GridDhtTxPrepareRequests to remotes
// will be swallowed.
Transaction txA = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(1))).getAll();
txA.commitAsync();
// Wait until first tx changes it's status to PREPARING.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean preparing = nearNode.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
boolean allPrepsSwallowed = dhtPrepMsgLimiter.get() == backups;
return preparing && allPrepsSwallowed;
}
}, 3_000);
// Second tx.
GridTestUtils.runAsync(() -> {
try (Transaction txB = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, 42)").setArgs(keys.get(2)));
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
// There were three updates: init, first and second.
assertEquals(3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(3, SECONDS));
assertEquals(2, arrivedEvts.size());
assertEquals(keys.get(0), arrivedEvts.get(0).getKey());
assertEquals(keys.get(2), arrivedEvts.get(1).getKey());
cur.close();
nearNode.close();
}
Aggregations