use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project ignite by apache.
the class ClientSlowDiscoveryTransactionRemapTest method testTransactionRemap.
/**
*/
@Test
public void testTransactionRemap() throws Exception {
TestTransactionEngine engine = new TestTransactionEngine<>(clnt.cache(CACHE_NAME));
IgniteInternalFuture<?> txFut = GridTestUtils.runAsync(() -> {
try (Transaction tx = clnt.transactions().txStart(concurrency, isolation)) {
operation.apply(engine);
tx.commit();
}
});
try {
txFut.get(1, TimeUnit.SECONDS);
} catch (IgniteFutureTimeoutCheckedException te) {
// Expected.
} finally {
clientDiscoSpiBlock.countDown();
}
// After resume second client join, transaction should succesfully await new affinity and commit.
txFut.get();
// Check consistency after transaction commit.
engine.consistencyCheck();
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project ignite by apache.
the class IgniteTxOriginatingNodeFailureAbstractSelfTest method testTxOriginatingNodeFails.
/**
* @param keys Keys to update.
* @param partial Flag indicating whether to simulate partial prepared state.
* @throws Exception If failed.
*/
protected void testTxOriginatingNodeFails(Collection<Integer> keys, final boolean partial) throws Exception {
assertFalse(keys.isEmpty());
final Collection<IgniteKernal> grids = new ArrayList<>();
ClusterNode txNode = grid(originatingNode()).localNode();
for (int i = 1; i < gridCount(); i++) grids.add((IgniteKernal) grid(i));
final Map<Integer, String> map = new HashMap<>();
final String initVal = "initialValue";
for (Integer key : keys) {
grid(originatingNode()).cache(DEFAULT_CACHE_NAME).put(key, initVal);
map.put(key, String.valueOf(key));
}
Map<Integer, Collection<ClusterNode>> nodeMap = new HashMap<>();
info("Node being checked: " + grid(1).localNode().id());
for (Integer key : keys) {
Collection<ClusterNode> nodes = new ArrayList<>();
nodes.addAll(grid(1).affinity(DEFAULT_CACHE_NAME).mapKeyToPrimaryAndBackups(key));
nodes.remove(txNode);
nodeMap.put(key, nodes);
}
info("Starting optimistic tx " + "[values=" + map + ", topVer=" + (grid(1)).context().discovery().topologyVersion() + ']');
if (partial)
ignoreMessages(grid(1).localNode().id(), ignoreMessageClass());
final Ignite txIgniteNode = G.ignite(txNode.id());
GridTestUtils.runAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
IgniteCache<Integer, String> cache = txIgniteNode.cache(DEFAULT_CACHE_NAME);
assertNotNull(cache);
TransactionProxyImpl tx = (TransactionProxyImpl) txIgniteNode.transactions().txStart();
GridNearTxLocal txEx = tx.tx();
assertTrue(txEx.optimistic());
cache.putAll(map);
try {
txEx.prepareNearTxLocal().get(3, TimeUnit.SECONDS);
} catch (IgniteFutureTimeoutCheckedException ignored) {
info("Failed to wait for prepare future completion: " + partial);
}
return null;
}
}).get();
info("Stopping originating node " + txNode);
G.stop(G.ignite(txNode.id()).name(), true);
info("Stopped grid, waiting for transactions to complete.");
boolean txFinished = GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteKernal g : grids) {
GridCacheSharedContext<Object, Object> ctx = g.context().cache().context();
int txNum = ctx.tm().idMapSize();
if (txNum != 0)
return false;
}
return true;
}
}, 10000);
assertTrue(txFinished);
info("Transactions finished.");
for (Map.Entry<Integer, Collection<ClusterNode>> e : nodeMap.entrySet()) {
final Integer key = e.getKey();
final String val = map.get(key);
assertFalse(e.getValue().isEmpty());
for (ClusterNode node : e.getValue()) {
compute(G.ignite(node.id()).cluster().forNode(node)).call(new IgniteCallable<Void>() {
/**
*/
@IgniteInstanceResource
private Ignite ignite;
@Override
public Void call() throws Exception {
IgniteCache<Integer, String> cache = ignite.cache(DEFAULT_CACHE_NAME);
assertNotNull(cache);
assertEquals(partial ? initVal : val, cache.localPeek(key));
return null;
}
});
}
}
for (Map.Entry<Integer, String> e : map.entrySet()) {
for (Ignite g : G.allGrids()) {
UUID locNodeId = g.cluster().localNode().id();
assertEquals("Check failed for node: " + locNodeId, partial ? initVal : e.getValue(), g.cache(DEFAULT_CACHE_NAME).get(e.getKey()));
}
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project ignite by apache.
the class GridCacheColocatedDebugTest method testConcurrentCheckThreadChain.
/**
* Covers scenario when thread chain locks acquisition for XID 1 should be continued during unsuccessful attempt
* to acquire lock on certain key for XID 2 (XID 1 with uncompleted chain becomes owner of this key instead).
*
* @throws Exception If failed.
*/
protected void testConcurrentCheckThreadChain(TransactionConcurrency txConcurrency) throws Exception {
storeEnabled = false;
startGrid(0);
try {
final AtomicLong iterCnt = new AtomicLong();
int commonKey = 1000;
int otherKeyPickVariance = 10;
int otherKeysCnt = 5;
int maxIterCnt = MAX_ITER_CNT * 10;
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
long threadId = Thread.currentThread().getId();
long itNum;
while ((itNum = iterCnt.getAndIncrement()) < maxIterCnt) {
Map<Integer, String> vals = U.newLinkedHashMap(otherKeysCnt * 2 + 1);
for (int i = 0; i < otherKeysCnt; i++) {
int key = ThreadLocalRandom.current().nextInt(otherKeyPickVariance * i, otherKeyPickVariance * (i + 1));
vals.put(key, String.valueOf(key) + threadId);
}
vals.put(commonKey, String.valueOf(commonKey) + threadId);
for (int i = 0; i < otherKeysCnt; i++) {
int key = ThreadLocalRandom.current().nextInt(commonKey + otherKeyPickVariance * (i + 1), otherKeyPickVariance * (i + 2) + commonKey);
vals.put(key, String.valueOf(key) + threadId);
}
try (Transaction tx = grid(0).transactions().txStart(txConcurrency, READ_COMMITTED)) {
jcache(0).putAll(vals);
tx.commit();
}
if (itNum > 0 && itNum % 5000 == 0)
info(">>> " + itNum + " iterations completed.");
}
}
}, THREAD_CNT);
while (true) {
long prevIterCnt = iterCnt.get();
try {
fut.get(5_000);
break;
} catch (IgniteFutureTimeoutCheckedException ignored) {
if (iterCnt.get() == prevIterCnt) {
Collection<IgniteInternalTx> hangingTxes = ignite(0).context().cache().context().tm().activeTransactions();
fail(hangingTxes.toString());
}
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project ignite by apache.
the class IgniteCacheCrossCacheTxFailoverTest method crossCacheTxFailover.
/**
* @param cacheMode Cache mode.
* @param sameAff If {@code false} uses different number of partitions for caches.
* @param concurrency Transaction concurrency.
* @param isolation Transaction isolation.
* @throws Exception If failed.
*/
private void crossCacheTxFailover(CacheMode cacheMode, boolean sameAff, final TransactionConcurrency concurrency, final TransactionIsolation isolation) throws Exception {
IgniteKernal ignite0 = (IgniteKernal) ignite(0);
final AtomicBoolean stop = new AtomicBoolean();
try {
ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256));
ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128));
awaitCacheOnClient(grid(GRID_CNT - 1), CACHE2);
final AtomicInteger threadIdx = new AtomicInteger();
IgniteInternalFuture<?> fut = runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
int idx = threadIdx.getAndIncrement();
Ignite ignite = ignite(idx % GRID_CNT);
log.info("Started update thread [node=" + ignite.name() + ", client=" + ignite.configuration().isClientMode() + ']');
IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1);
IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2);
assertNotSame(cache1, cache2);
IgniteTransactions txs = ignite.transactions();
ThreadLocalRandom rnd = ThreadLocalRandom.current();
long iter = 0;
while (!stop.get()) {
boolean sameKey = rnd.nextBoolean();
try {
try (Transaction tx = txs.txStart(concurrency, isolation)) {
if (sameKey) {
TestKey key = new TestKey(rnd.nextLong(KEY_RANGE));
cacheOperation(rnd, cache1, key);
cacheOperation(rnd, cache2, key);
} else {
TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE));
TestKey key2 = new TestKey(key1.key() + 1);
cacheOperation(rnd, cache1, key1);
cacheOperation(rnd, cache2, key2);
}
tx.commit();
}
} catch (CacheException | IgniteException e) {
log.info("Update error: " + e);
}
if (iter++ % 500 == 0)
log.info("Iteration: " + iter);
}
return null;
}
/**
* @param rnd Random.
* @param cache Cache.
* @param key Key.
*/
private void cacheOperation(ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) {
switch(rnd.nextInt(4)) {
case 0:
cache.put(key, new TestValue(rnd.nextLong()));
break;
case 1:
cache.remove(key);
break;
case 2:
cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null));
break;
case 3:
cache.get(key);
break;
default:
assert false;
}
}
}, 10, "tx-thread");
long stopTime = System.currentTimeMillis() + SF.applyLB((int) TEST_TIME, 20_000);
long topVer = ignite0.cluster().topologyVersion();
boolean failed = false;
while (System.currentTimeMillis() < stopTime) {
log.info("Start node.");
IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT);
assertFalse(ignite.configuration().isClientMode());
topVer++;
IgniteInternalFuture<?> affFut = ignite.context().cache().context().exchange().affinityReadyFuture(new AffinityTopologyVersion(topVer));
try {
if (affFut != null)
affFut.get(30_000);
} catch (IgniteFutureTimeoutCheckedException ignored) {
log.error("Failed to wait for affinity future after start: " + topVer);
failed = true;
break;
}
Thread.sleep(500);
log.info("Stop node.");
stopGrid(GRID_CNT);
topVer++;
affFut = ignite0.context().cache().context().exchange().affinityReadyFuture(new AffinityTopologyVersion(topVer));
try {
if (affFut != null)
affFut.get(30_000);
} catch (IgniteFutureTimeoutCheckedException ignored) {
log.error("Failed to wait for affinity future after stop: " + topVer);
failed = true;
break;
}
}
stop.set(true);
fut.get();
assertFalse("Test failed, see log for details.", failed);
} finally {
stop.set(true);
ignite0.destroyCache(CACHE1);
ignite0.destroyCache(CACHE2);
AffinityTopologyVersion topVer = ignite0.context().cache().context().exchange().lastTopologyFuture().get();
for (Ignite ignite : G.allGrids()) ((IgniteKernal) ignite).context().cache().context().exchange().affinityReadyFuture(topVer).get();
awaitPartitionMapExchange();
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project ignite by apache.
the class BlockedEvictionsTest method testCacheGroupDestroy_Volatile.
/**
* @throws Exception If failed.
*/
@Test
public void testCacheGroupDestroy_Volatile() throws Exception {
AtomicReference<IgniteInternalFuture> ref = new AtomicReference<>();
testOperationDuringEviction(false, 1, new Runnable() {
@Override
public void run() {
IgniteInternalFuture fut = runAsync(new Runnable() {
@Override
public void run() {
grid(0).destroyCache(DEFAULT_CACHE_NAME);
}
});
doSleep(500);
// Cache stop should be blocked by concurrent unfinished eviction.
assertFalse(fut.isDone());
ref.set(fut);
}
});
try {
ref.get().get(10_000);
} catch (IgniteFutureTimeoutCheckedException e) {
fail(X.getFullStackTrace(e));
}
PartitionsEvictManager mgr = grid(0).context().cache().context().evict();
// Group eviction context should remain in map.
Map evictionGroupsMap = U.field(mgr, "evictionGroupsMap");
assertEquals("Group context must be cleaned up", 0, evictionGroupsMap.size());
grid(0).getOrCreateCache(cacheConfiguration());
assertEquals(0, evictionGroupsMap.size());
assertPartitionsSame(idleVerify(grid(0), DEFAULT_CACHE_NAME));
}
Aggregations