use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class Updater method testTransactional.
/**
* More complex - init some state. Start a new transaction, and midway trigger a rehash. Then complete transaction
* and test results.
*/
@Test
public void testTransactional() throws Throwable {
final List<MagicKey> keys = init();
final CountDownLatch l = new CountDownLatch(1);
final AtomicBoolean rollback = new AtomicBoolean(false);
Future<Void> future = fork(() -> {
try {
// start a transaction on c1.
TransactionManager t1 = TestingUtil.getTransactionManager(c1);
t1.begin();
c1.put(keys.get(0), "transactionally_replaced");
Transaction tx = t1.getTransaction();
tx.enlistResource(new XAResourceAdapter() {
public int prepare(Xid id) {
// this would be called *after* the cache prepares.
try {
log.debug("Unblocking commit");
l.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return XAResource.XA_OK;
}
});
t1.commit();
} catch (Exception e) {
log.error("Error committing transaction", e);
rollback.set(true);
throw new RuntimeException(e);
}
});
log.info("Invoking rehash event");
performRehashEvent(true);
l.countDown();
future.get(30, TimeUnit.SECONDS);
// ownership can only be verified after the rehashing has completed
waitForRehashCompletion();
log.info("Rehash complete");
// only check for these values if tx was not rolled back
if (!rollback.get()) {
// the ownership of k1 might change during the tx and a cache might end up with it in L1
assertOwnershipAndNonOwnership(keys.get(0), true);
assertOwnershipAndNonOwnership(keys.get(1), false);
assertOwnershipAndNonOwnership(keys.get(2), false);
assertOwnershipAndNonOwnership(keys.get(3), false);
// checking the values will bring the keys to L1, so we want to do it after checking ownership
assertOnAllCaches(keys.get(0), "transactionally_replaced");
assertOnAllCaches(keys.get(1), "v0");
assertOnAllCaches(keys.get(2), "v0");
assertOnAllCaches(keys.get(3), "v0");
}
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class StaleLocksWithLockOnlyTxDuringStateTransferTest method testSync.
public void testSync() throws Throwable {
final StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("st", "st:block_get_transactions", "st:resume_get_transactions", "st:block_ch_update_on_0", "st:block_ch_update_on_1", "st:resume_ch_update_on_0", "st:resume_ch_update_on_1");
sequencer.logicalThread("tx", "tx:before_lock", "tx:block_remote_lock", "tx:resume_remote_lock", "tx:after_commit");
// The lock will be acquired after rebalance has started, but before cache0 starts sending the transaction data to cache1
sequencer.order("st:block_get_transactions", "tx:before_lock", "tx:block_remote_lock", "st:resume_get_transactions");
// The tx will be committed (1PC) after cache1 has received all the state, but before the topology is updated
sequencer.order("st:block_ch_update_on_1", "tx:resume_remote_lock", "tx:after_commit", "st:resume_ch_update_on_0");
ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
cfg.clustering().cacheMode(CacheMode.DIST_SYNC).stateTransfer().awaitInitialTransfer(false).transaction().lockingMode(LockingMode.PESSIMISTIC);
manager(0).defineConfiguration(CACHE_NAME, cfg.build());
manager(1).defineConfiguration(CACHE_NAME, cfg.build());
AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
TransactionManager tm0 = cache0.getTransactionManager();
DistributionManager dm0 = cache0.getDistributionManager();
int initialTopologyId = dm0.getCacheTopology().getTopologyId();
int rebalanceTopologyId = initialTopologyId + 1;
final int finalTopologyId = rebalanceTopologyId + 3;
// Block state request commands on cache0 until the lock command has been sent to cache1
advanceOnComponentMethod(sequencer, cache0, StateProvider.class, matchMethodCall("getTransactionsForSegments").build()).before("st:block_get_transactions", "st:resume_get_transactions");
// Block the final topology update until the tx has finished
advanceOnGlobalComponentMethod(sequencer, manager(0), LocalTopologyManager.class, matchMethodCall("handleTopologyUpdate").withMatcher(0, CoreMatchers.equalTo(CACHE_NAME)).withMatcher(1, new CacheTopologyMatcher(finalTopologyId)).build()).before("st:block_ch_update_on_0", "st:resume_ch_update_on_0");
advanceOnGlobalComponentMethod(sequencer, manager(1), LocalTopologyManager.class, matchMethodCall("handleTopologyUpdate").withMatcher(0, CoreMatchers.equalTo(CACHE_NAME)).withMatcher(1, new CacheTopologyMatcher(finalTopologyId)).build()).before("st:block_ch_update_on_1", "st:resume_ch_update_on_1");
// Start cache 1, but the state request will be blocked on cache 0
AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);
// Block the remote lock command on cache 1
advanceOnInboundRpc(sequencer, cache(1, CACHE_NAME), matchCommand(LockControlCommand.class).matchCount(0).withCache(CACHE_NAME).build()).before("tx:block_remote_lock", "tx:resume_remote_lock");
// Wait for the rebalance to start
sequencer.advance("tx:before_lock");
assertEquals(rebalanceTopologyId, dm0.getCacheTopology().getTopologyId());
// Start a transaction on cache 0
MagicKey key = new MagicKey("testkey", cache0);
tm0.begin();
cache0.lock(key);
tm0.commit();
// Let the rebalance finish
sequencer.advance("tx:after_commit");
TestingUtil.waitForNoRebalance(caches(CACHE_NAME));
assertEquals(finalTopologyId, dm0.getCacheTopology().getTopologyId());
// Check for stale locks
final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
eventually(() -> tt0.getLocalTxCount() == 0 && tt1.getRemoteTxCount() == 0);
sequencer.stop();
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class StaleTxWithCommitDuringStateTransferTest method doTest.
private void doTest(final boolean commit) throws Throwable {
ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
cfg.clustering().cacheMode(CacheMode.DIST_SYNC).stateTransfer().awaitInitialTransfer(false).transaction().lockingMode(LockingMode.PESSIMISTIC);
manager(0).defineConfiguration(CACHE_NAME, cfg.build());
manager(1).defineConfiguration(CACHE_NAME, cfg.build());
final CheckPoint checkpoint = new CheckPoint();
final AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
final TransactionManager tm0 = cache0.getTransactionManager();
// Block state request commands on cache 0
StateProvider stateProvider = TestingUtil.extractComponent(cache0, StateProvider.class);
StateProvider spyProvider = spy(stateProvider);
doAnswer(invocation -> {
Object[] arguments = invocation.getArguments();
Address source = (Address) arguments[0];
int topologyId = (Integer) arguments[1];
CompletionStage<?> result = (CompletionStage<?>) invocation.callRealMethod();
return result.thenApply(transactions -> {
try {
checkpoint.trigger("post_get_transactions_" + topologyId + "_from_" + source);
checkpoint.awaitStrict("resume_get_transactions_" + topologyId + "_from_" + source, 10, SECONDS);
return transactions;
} catch (InterruptedException | TimeoutException e) {
throw new TestException(e);
}
});
}).when(spyProvider).getTransactionsForSegments(any(Address.class), anyInt(), any());
TestingUtil.replaceComponent(cache0, StateProvider.class, spyProvider, true);
// Start a transaction on cache 0, which will block on cache 1
MagicKey key = new MagicKey("testkey", cache0);
tm0.begin();
cache0.put(key, "v0");
final Transaction tx = tm0.suspend();
// Start cache 1, but the tx data request will be blocked on cache 0
DistributionManager dm0 = cache0.getDistributionManager();
int initialTopologyId = dm0.getCacheTopology().getTopologyId();
int rebalanceTopologyId = initialTopologyId + 1;
AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);
checkpoint.awaitStrict("post_get_transactions_" + rebalanceTopologyId + "_from_" + address(1), 10, SECONDS);
// The commit/rollback command should be invoked on cache 1 and it should block until the tx is created there
Future<Object> future = fork(() -> {
tm0.resume(tx);
if (commit) {
tm0.commit();
} else {
tm0.rollback();
}
return null;
});
// Check that the rollback command is blocked on cache 1
try {
future.get(1, SECONDS);
fail("Commit/Rollback command should have been blocked");
} catch (TimeoutException e) {
// expected;
}
// Let cache 1 receive the tx from cache 0.
checkpoint.trigger("resume_get_transactions_" + rebalanceTopologyId + "_from_" + address(1));
TestingUtil.waitForNoRebalance(caches(CACHE_NAME));
// Wait for the tx finish
future.get(10, SECONDS);
// Check the key on all caches
if (commit) {
assertEquals("v0", TestingUtil.extractComponent(cache0, InternalDataContainer.class).get(key).getValue());
assertEquals("v0", TestingUtil.extractComponent(cache1, InternalDataContainer.class).get(key).getValue());
} else {
assertNull(TestingUtil.extractComponent(cache0, InternalDataContainer.class).get(key));
assertNull(TestingUtil.extractComponent(cache1, InternalDataContainer.class).get(key));
}
// Check for stale locks
final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
eventually(() -> tt0.getLocalTxCount() == 0 && tt1.getRemoteTxCount() == 0);
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class StaleLocksWithCommitDuringStateTransferTest method doTestSuspect.
/**
* Check that the transaction commit/rollback recovers if the remote node dies during the RPC
*/
private void doTestSuspect(boolean commit) throws Exception {
MagicKey k1 = new MagicKey("k1", c1);
MagicKey k2 = new MagicKey("k2", c2);
tm(c1).begin();
c1.put(k1, "v1");
c1.put(k2, "v2");
// We split the transaction commit in two phases by calling the TransactionCoordinator methods directly
TransactionTable txTable = TestingUtil.extractComponent(c1, TransactionTable.class);
TransactionCoordinator txCoordinator = TestingUtil.extractComponent(c1, TransactionCoordinator.class);
// Execute the prepare on both nodes
LocalTransaction localTx = txTable.getLocalTransaction(tm(c1).getTransaction());
CompletionStages.join(txCoordinator.prepare(localTx));
// Delay the commit on the remote node. Can't used blockNewTransactions because we don't want a StateTransferInProgressException
AsyncInterceptorChain c2ic = c2.getAdvancedCache().getAsyncInterceptorChain();
c2ic.addInterceptorBefore(new DelayCommandInterceptor(), StateTransferInterceptor.class);
// Schedule the remote node to stop on another thread since the main thread will be busy with the commit call
Thread worker = new Thread("RehasherSim,StaleLocksWithCommitDuringStateTransferTest") {
@Override
public void run() {
try {
// should be much larger than the lock acquisition timeout
Thread.sleep(1000);
manager(c2).stop();
// stLock.unblockNewTransactions(1000);
} catch (InterruptedException e) {
log.errorf(e, "Error stopping cache");
}
}
};
worker.start();
try {
// finally commit or rollback the transaction
if (commit) {
CompletionStages.join(txCoordinator.commit(localTx, false));
} else {
CompletionStages.join(txCoordinator.rollback(localTx));
}
// make the transaction manager forget about our tx so that we don't get rollback exceptions in the log
tm(c1).suspend();
} finally {
// don't leak threads
worker.join();
}
// test that we don't leak locks
assertEventuallyNotLocked(c1, k1);
assertEventuallyNotLocked(c1, k2);
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class GetWithForceWriteLockRetryTest method testRetryAfterLeave.
public void testRetryAfterLeave() throws Exception {
EmbeddedCacheManager cm1 = manager(0);
Cache<Object, Object> c1 = cm1.getCache();
EmbeddedCacheManager cm2 = manager(1);
Cache c2 = cm2.getCache();
EmbeddedCacheManager cm3 = manager(2);
Cache c3 = cm3.getCache();
DelayInterceptor di3 = new DelayInterceptor(LockControlCommand.class, c3);
extractInterceptorChain(c3).addInterceptorBefore(di3, PessimisticLockingInterceptor.class);
Object key = new MagicKey(c3);
TransactionManager tm1 = tm(c1);
Future<Object> f = fork(() -> {
log.tracef("Initiating a transaction on backup owner %s", c2);
tm1.begin();
try {
c1.getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK).get(key);
} finally {
// Even if the remote lock failed, this will remove the transaction
tm1.commit();
}
return null;
});
// The prepare command is replicated to cache c1, and it blocks in the DelayInterceptor
di3.waitUntilBlocked(1);
// Kill c3
killMember(2);
waitForNoRebalance(c1, c2);
// Check that the lock succeeded
f.get(10, SECONDS);
// Unblock the remote command on c3 - shouldn't make any difference
di3.unblock(1);
}
Aggregations