use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class BasicSingleLockReplOptTest method testMultipleLocksInSameTx.
public void testMultipleLocksInSameTx() throws Exception {
final Object k1 = new MagicKey("k1", cache(0));
final Object k2 = new MagicKey("k2", cache(0));
tm(0).begin();
cache(0).put(k1, "v");
cache(0).put(k2, "v");
EmbeddedTransaction dtm = (EmbeddedTransaction) tm(0).getTransaction();
dtm.runPrepare();
assert lockManager(0).isLocked(k1);
assert lockManager(0).isLocked(k2);
assert !lockManager(1).isLocked(k1);
assert !lockManager(1).isLocked(k2);
assert !lockManager(1).isLocked(k2);
assert !lockManager(2).isLocked(k2);
dtm.runCommit(false);
assertNotLocked(k1);
assertNotLocked(k2);
assertValue(k1, false);
assertValue(k2, false);
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class BasicSingleLockReplOptTest method testSecondTxCannotPrepare.
public void testSecondTxCannotPrepare() throws Exception {
Object k0 = new MagicKey("k0", cache(0));
tm(0).begin();
cache(0).put(k0, "v");
EmbeddedTransaction dtm = (EmbeddedTransaction) tm(0).getTransaction();
dtm.runPrepare();
tm(0).suspend();
assert checkTxCount(0, 1, 0);
assert checkTxCount(1, 0, 1);
assert checkTxCount(2, 0, 1);
tm(0).begin();
cache(0).put(k0, "other");
try {
tm(0).commit();
assert false;
} catch (Throwable e) {
// ignore
}
eventually(() -> checkTxCount(0, 1, 0) && checkTxCount(1, 0, 1) && checkTxCount(2, 0, 1));
tm(1).begin();
cache(1).put(k0, "other");
try {
tm(1).commit();
assert false;
} catch (Throwable e) {
// expected
}
eventually(() -> checkTxCount(0, 1, 0) && checkTxCount(1, 0, 1) && checkTxCount(2, 0, 1));
tm(0).resume(dtm);
dtm.runCommit(false);
assertValue(k0, false);
eventually(() -> noPendingTransactions(0) && noPendingTransactions(1) && noPendingTransactions(2));
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class InfinispanNodeFailureTest method killedNodeDoesNotBreakReplaceCommand.
public void killedNodeDoesNotBreakReplaceCommand() throws Exception {
defineConfigurationOnAllManagers(TEST_CACHE, new ConfigurationBuilder().read(manager(0).getDefaultCacheConfiguration()));
waitForClusterToForm(TEST_CACHE);
waitForNoRebalance(caches(TEST_CACHE));
final Object replaceKey = new MagicKey("X", cache(0, TEST_CACHE));
final Object putKey = new MagicKey("Z", cache(1, TEST_CACHE));
cache(0, TEST_CACHE).put(replaceKey, INITIAL_VALUE);
// prepare third node to notify us when put command is in progress so we can kill the node
final CountDownLatch beforeKill = new CountDownLatch(1);
final CountDownLatch afterKill = new CountDownLatch(1);
advancedCache(1, TEST_CACHE).getAsyncInterceptorChain().addInterceptor(new BaseCustomAsyncInterceptor() {
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
LockControlCommand cmd = (LockControlCommand) rCommand;
if (putKey.equals(cmd.getSingleKey())) {
// notify main thread it can start killing third node
beforeKill.countDown();
// wait for completion and proceed
afterKill.await(10, TimeUnit.SECONDS);
}
});
}
}, 1);
// execute replace command in separate thread so we can do something else meanwhile
Future<Boolean> firstResult = fork(() -> {
try {
tm(0, TEST_CACHE).begin();
// this should replace and lock REPLACE_KEY so other transactions can't pass this barrier
boolean result = cache(0, TEST_CACHE).replace(replaceKey, INITIAL_VALUE, REPLACING_VALUE);
// issue put command so it is retried while node-c is being killed
cache(0, TEST_CACHE).put(putKey, "some-value");
// apply new view
viewLatch.countDown();
tm(0, TEST_CACHE).commit();
return result;
} catch (Throwable t) {
return null;
}
});
// wait third node to complete replace command and kill it
assertTrue(beforeKill.await(10, TimeUnit.SECONDS));
// kill node-c, do not wait rehash, it is important to continue with put-retry before new view is received
killMember(2, TEST_CACHE, false);
afterKill.countDown();
tm(1, TEST_CACHE).begin();
// this replace should never succeed because first node has already replaced and locked value
// but during put command replace lock is lost, so we can successfully replace the same value again, which is a bug
boolean secondResult = cache(1, TEST_CACHE).replace(replaceKey, INITIAL_VALUE, REPLACING_VALUE);
tm(1, TEST_CACHE).commit();
// check that first node did not fail
assertEquals(Boolean.TRUE, firstResult.get());
assertEquals(REPLACING_VALUE, cache(0, TEST_CACHE).get(replaceKey));
assertEquals(REPLACING_VALUE, cache(1, TEST_CACHE).get(replaceKey));
// check that second node state is inconsistent, second result should be FALSE in read committed pessimistic cache
// uncomment when this bug is fixed
assertEquals(false, secondResult);
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class NoLockLostOnLongTxTest method testLongTx.
@Test(dataProvider = "long-tx-test")
public void testLongTx(LongTxTestParameter testParameter) throws Exception {
String cacheName = testParameter.cacheName();
defineConfigurationOnAllManagers(cacheName, testParameter.config());
AdvancedCache<MagicKey, String> cache = this.<MagicKey, String>cache(0, cacheName).getAdvancedCache();
AdvancedCache<MagicKey, String> owner = this.<MagicKey, String>cache(1, cacheName).getAdvancedCache();
TransactionTable ownerTxTable = owner.getComponentRegistry().getTransactionTable();
TransactionTable cacheTxTable = cache.getComponentRegistry().getTransactionTable();
Method cleanupMethod = extractCleanupMethod();
final MagicKey key = new MagicKey("key", owner);
EmbeddedTransactionManager tm = (EmbeddedTransactionManager) cache.getTransactionManager();
tm.begin();
cache.put(key, "a");
testParameter.beforeAdvanceTime(tm);
// get the local gtx. should be the same as remote
GlobalTransaction gtx = cacheTxTable.getGlobalTransaction(tm.getTransaction());
assertTrue("RemoteTransaction must exists after key is locked!", ownerTxTable.containRemoteTx(gtx));
// completedTxTimeout is 10'000 ms. we advance 11'000
timeService.advance(COMPLETED_TX_TIMEOUT + 1000);
// check if the remote-tx is eligible for timeout
RemoteTransaction rtx = ownerTxTable.getRemoteTransaction(gtx);
assertNotNull("RemoteTransaction must exists after key is locked!", rtx);
assertTrue("RemoteTransaction is not eligible for timeout.", rtx.getCreationTime() - getCreationTimeCutoff() < 0);
// instead of waiting for the reaper, invoke the method directly
cleanupMethod.invoke(ownerTxTable);
// it should keep the tx
assertTrue("RemoteTransaction should be live after cleanup.", ownerTxTable.containRemoteTx(gtx));
testParameter.afterAdvanceTime(tm);
assertEquals("Wrong value in originator", "a", cache.get(key));
assertEquals("Wrong value in owner", "a", owner.get(key));
}
use of org.infinispan.distribution.MagicKey in project infinispan by infinispan.
the class OptimisticPartialCommitTest method testNonOwnerBecomesOwnerDuringCommit.
public void testNonOwnerBecomesOwnerDuringCommit() throws Exception {
final Object k1 = new MagicKey("k1", cache(1), cache(2));
final Object k2 = new MagicKey("k2", cache(2), cache(3));
cache(0).put(k1, "v1_0");
cache(0).put(k2, "v2_0");
// commit on cache 0 -> send commit to 1, 2, 3 -> block commit on 2 -> wait for the commit on 1 to finish
// -> kill 3 -> rebalance -> 1 applies state from 2 -> 2 resends commit to 1 -> 1 commits again (including k2)
// Without the fix, the second commit is ignored and k2 is not updated
StateSequencer ss = new StateSequencer();
ss.logicalThread("main", "after_commit_on_1", "before_kill_3", "after_state_applied_on_1", "before_commit_on_2", "after_commit_on_2");
advanceOnInterceptor(ss, cache(1), StateTransferInterceptor.class, matchCommand(VersionedCommitCommand.class).matchCount(0).build()).after("after_commit_on_1");
advanceOnInterceptor(ss, cache(2), StateTransferInterceptor.class, matchCommand(VersionedCommitCommand.class).matchCount(0).build()).before("before_commit_on_2").after("after_commit_on_2");
InvocationMatcher stateAppliedOn0Matcher = matchMethodCall("handleRebalancePhaseConfirm").withParam(1, address(1)).build();
advanceOnGlobalComponentMethod(ss, manager(0), ClusterTopologyManager.class, stateAppliedOn0Matcher).after("after_state_applied_on_1");
Future<Object> txFuture = fork(() -> {
tm(0).begin();
try {
cache(0).put(k1, "v1_1");
cache(0).put(k2, "v2_1");
} finally {
tm(0).commit();
}
return null;
});
ss.advance("before_kill_3");
controlledCHFactory.setOwnerIndexes(new int[][] { { 1, 2 }, { 2, 1 } });
manager(3).stop();
cacheManagers.remove(3);
txFuture.get(30, TimeUnit.SECONDS);
assertEquals("v1_1", cache(1).get(k1));
assertEquals("v2_1", cache(1).get(k2));
assertEquals("v1_1", cache(2).get(k1));
assertEquals("v2_1", cache(2).get(k2));
}
Aggregations