use of org.apache.geode.internal.cache.TXManagerImpl in project geode by apache.
the class TXJUnitTest method testSuspendResume.
@Test
public void testSuspendResume() {
TXManagerImpl txMgrImpl = (TXManagerImpl) this.txMgr;
assertTrue(!this.txMgr.exists());
assertEquals(null, txMgrImpl.internalSuspend());
TXStateProxy txProxy = null;
txMgrImpl.internalResume(txProxy);
assertTrue(!this.txMgr.exists());
this.txMgr.begin();
TransactionId origId = this.txMgr.getTransactionId();
assertTrue(this.txMgr.exists());
{
TXStateProxy tx = txMgrImpl.internalSuspend();
assertTrue(!this.txMgr.exists());
this.txMgr.begin();
try {
txMgrImpl.internalResume(tx);
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
}
this.txMgr.rollback();
assertTrue(!this.txMgr.exists());
txMgrImpl.internalResume(tx);
}
assertTrue(this.txMgr.exists());
assertEquals(origId, this.txMgr.getTransactionId());
this.txMgr.rollback();
}
use of org.apache.geode.internal.cache.TXManagerImpl in project geode by apache.
the class BaseCommand method execute.
@Override
public void execute(Message clientMessage, ServerConnection serverConnection) {
// Read the request and update the statistics
long start = DistributionStats.getStatTime();
if (EntryLogger.isEnabled() && serverConnection != null) {
EntryLogger.setSource(serverConnection.getMembershipID(), "c2s");
}
boolean shouldMasquerade = shouldMasqueradeForTx(clientMessage, serverConnection);
try {
if (shouldMasquerade) {
InternalCache cache = serverConnection.getCache();
InternalDistributedMember member = (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
TXManagerImpl txMgr = cache.getTxManager();
TXStateProxy tx = null;
try {
tx = txMgr.masqueradeAs(clientMessage, member, false);
cmdExecute(clientMessage, serverConnection, start);
tx.updateProxyServer(txMgr.getMemberId());
} finally {
txMgr.unmasquerade(tx);
}
} else {
cmdExecute(clientMessage, serverConnection, start);
}
} catch (TransactionException | CopyException | SerializationException | CacheWriterException | CacheLoaderException | GemFireSecurityException | PartitionOfflineException | MessageTooLargeException e) {
handleExceptionNoDisconnect(clientMessage, serverConnection, e);
} catch (EOFException eof) {
BaseCommand.handleEOFException(clientMessage, serverConnection, eof);
} catch (InterruptedIOException e) {
// Solaris only
BaseCommand.handleInterruptedIOException(serverConnection, e);
} catch (IOException e) {
BaseCommand.handleIOException(clientMessage, serverConnection, e);
} catch (DistributedSystemDisconnectedException e) {
BaseCommand.handleShutdownException(clientMessage, serverConnection, e);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
BaseCommand.handleThrowable(clientMessage, serverConnection, e);
} finally {
EntryLogger.clearSource();
}
}
use of org.apache.geode.internal.cache.TXManagerImpl in project geode by apache.
the class CommitCommand method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
serverConnection.setAsTrue(REQUIRES_RESPONSE);
TXManagerImpl txMgr = (TXManagerImpl) serverConnection.getCache().getCacheTransactionManager();
InternalDistributedMember client = (InternalDistributedMember) serverConnection.getProxyID().getDistributedMember();
int uniqId = clientMessage.getTransactionId();
TXId txId = new TXId(client, uniqId);
TXCommitMessage commitMsg = null;
if (txMgr.isHostedTxRecentlyCompleted(txId)) {
commitMsg = txMgr.getRecentlyCompletedMessage(txId);
if (logger.isDebugEnabled()) {
logger.debug("TX: returning a recently committed txMessage for tx: {}", txId);
}
if (!txMgr.isExceptionToken(commitMsg)) {
writeCommitResponse(commitMsg, clientMessage, serverConnection);
// fixes bug 46529
commitMsg.setClientVersion(null);
serverConnection.setAsTrue(RESPONDED);
} else {
sendException(clientMessage, serverConnection, txMgr.getExceptionForToken(commitMsg, txId));
}
txMgr.removeHostedTXState(txId);
return;
}
// fixes bug 43350
boolean wasInProgress = txMgr.setInProgress(true);
final TXStateProxy txProxy = txMgr.getTXState();
Assert.assertTrue(txProxy != null);
if (logger.isDebugEnabled()) {
logger.debug("TX: committing client tx: {}", txId);
}
try {
txId = txProxy.getTxId();
txProxy.setCommitOnBehalfOfRemoteStub(true);
txMgr.commit();
commitMsg = txProxy.getCommitMessage();
writeCommitResponse(commitMsg, clientMessage, serverConnection);
serverConnection.setAsTrue(RESPONDED);
} catch (Exception e) {
sendException(clientMessage, serverConnection, e);
} finally {
if (txId != null) {
txMgr.removeHostedTXState(txId);
}
if (!wasInProgress) {
txMgr.setInProgress(false);
}
if (commitMsg != null) {
// fixes bug 46529
commitMsg.setClientVersion(null);
}
}
}
use of org.apache.geode.internal.cache.TXManagerImpl in project geode by apache.
the class RegionReliabilityTestCase method testCommitDistributionException.
@Test
public void testCommitDistributionException() throws Exception {
if (getRegionScope().isGlobal())
// skip test under Global
return;
if (getRegionScope().isDistributedNoAck())
// skip test under DistributedNoAck
return;
final String name = this.getUniqueName();
final String roleA = name + "-A";
final String[] requiredRoles = { roleA };
Set requiredRolesSet = new HashSet();
for (int i = 0; i < requiredRoles.length; i++) {
requiredRolesSet.add(InternalRole.getRole(requiredRoles[i]));
}
assertEquals(requiredRoles.length, requiredRolesSet.size());
// connect controller to system...
Properties config = new Properties();
config.setProperty(ROLES, "");
getSystem(config);
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
RegionMembershipListener listener = new RegionMembershipListenerAdapter() {
public void afterRemoteRegionDeparture(RegionEvent event) {
synchronized (detectedDeparture_testCommitDistributionException) {
detectedDeparture_testCommitDistributionException[0] = Boolean.TRUE;
detectedDeparture_testCommitDistributionException.notify();
}
}
};
// create region in controller...
MembershipAttributes ra = new MembershipAttributes(requiredRoles, LossAction.NO_ACCESS, ResumptionAction.NONE);
AttributesFactory fac = new AttributesFactory();
fac.setMembershipAttributes(ra);
fac.setScope(getRegionScope());
fac.addCacheListener(listener);
RegionAttributes attr = fac.create();
Region region = createRootRegion(name, attr);
// use vm1 to create role
Host.getHost(0).getVM(1).invoke(new CacheSerializableRunnable("Create Region") {
public void run2() throws CacheException {
createConnection(new String[] { roleA });
AttributesFactory fac = new AttributesFactory();
fac.setScope(getRegionScope());
RegionAttributes attr = fac.create();
createRootRegion(name, attr);
}
});
// define the afterReleaseLocalLocks callback
SerializableRunnableIF removeRequiredRole = new SerializableRunnableIF() {
public void run() {
Host.getHost(0).getVM(1).invoke(new SerializableRunnable("Close Region") {
public void run() {
getRootRegion(name).close();
}
});
try {
synchronized (detectedDeparture_testCommitDistributionException) {
while (detectedDeparture_testCommitDistributionException[0] == Boolean.FALSE) {
detectedDeparture_testCommitDistributionException.wait();
}
}
} catch (InterruptedException e) {
fail("interrupted");
}
}
};
// define the add and remove expected exceptions
final String expectedExceptions = "org.apache.geode.internal.cache.CommitReplyException";
SerializableRunnable addExpectedExceptions = new CacheSerializableRunnable("addExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
}
};
SerializableRunnable removeExpectedExceptions = new CacheSerializableRunnable("removeExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
}
};
// perform the actual test...
CacheTransactionManager ctm = cache.getCacheTransactionManager();
ctm.begin();
TXStateInterface txStateProxy = ((TXManagerImpl) ctm).getTXState();
((TXStateProxyImpl) txStateProxy).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) txStateProxy).getRealDeal(null, null);
txState.setBeforeSend(() -> {
try {
removeRequiredRole.run();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
// now start a transaction and commit it
region.put("KEY", "VAL");
addExpectedExceptions.run();
Host.getHost(0).getVM(1).invoke(addExpectedExceptions);
try {
ctm.commit();
fail("Should have thrown CommitDistributionException");
} catch (CommitDistributionException e) {
// pass
} finally {
removeExpectedExceptions.run();
Host.getHost(0).getVM(1).invoke(removeExpectedExceptions);
}
}
use of org.apache.geode.internal.cache.TXManagerImpl in project geode by apache.
the class TXDistributedDUnitTest method testHighAvailabilityFeatures.
// GEODE-635: eats and logs exceptions, retry loops
@Category(FlakyTest.class)
@Test
public void testHighAvailabilityFeatures() throws Exception {
IgnoredException.addIgnoredException("DistributedSystemDisconnectedException");
final String rgnName = getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
Region rgn = getCache().createRegion(rgnName, factory.create());
Invoke.invokeInEveryVM(new SerializableRunnable("testHighAvailabilityFeatures: intial region configuration") {
public void run() {
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
getCache().createRegion(rgnName, factory2.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
});
// create entries
rgn.put("key0", "val0_0");
rgn.put("key1", "val1_0");
Host host = Host.getHost(0);
// This test assumes that there are at least three VMs; the origin and two recipients
assertTrue(host.getVMCount() >= 3);
final VM originVM = host.getVM(0);
// Test that there is no commit after a partial commit message
// send (only sent to a minority of the recipients)
originVM.invoke(new SerializableRunnable("Flakey DuringIndividualSend Transaction") {
public void run() {
final Region rgn1 = getCache().getRegion(rgnName);
assertNotNull(rgn1);
try {
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
final CacheTransactionManager txMgrImpl = txMgr2;
txMgr2.begin();
// 1. setup an internal callback on originVM that will call
// disconnectFromDS() on the 2nd duringIndividualSend
// call.
((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).getRealDeal(null, null);
txState.setDuringIndividualSend(new Runnable() {
private int numCalled = 0;
public synchronized void run() {
++numCalled;
rgn1.getCache().getLogger().info("setDuringIndividualSend Runnable called " + numCalled + " times");
if (numCalled > 1) {
MembershipManagerHelper.crashDistributedSystem(getSystem());
}
}
});
rgn1.put("key0", "val0_1");
rgn1.put("key1", "val1_1");
// 2. commit a transaction in originVM, it will disconnect from the DS
txMgr2.commit();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
rgn1.getCache().getLogger().warning("Ignoring Exception", e);
} finally {
// Allow this VM to re-connect to the DS upon getCache() call
closeCache();
}
}
});
// 3. verify on all VMs that the transaction was not committed
final SerializableRunnable noChangeValidator = new SerializableRunnable("testHighAvailabilityFeatures: validate no change in Region") {
public void run() {
Region rgn1 = getCache().getRegion(rgnName);
if (rgn1 == null) {
// Expect a null region from originVM
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
rgn1 = getCache().createRegion(rgnName, factory2.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
Region.Entry re = rgn1.getEntry("key0");
assertNotNull(re);
assertEquals("val0_0", re.getValue());
re = rgn1.getEntry("key1");
assertNotNull(re);
assertEquals("val1_0", re.getValue());
}
};
Invoke.invokeInEveryVM(noChangeValidator);
// Test that there is no commit after sending to all recipients
// but prior to sending the "commit process" message
originVM.invoke(new SerializableRunnable("Flakey AfterIndividualSend Transaction") {
public void run() {
final Region rgn1 = getCache().getRegion(rgnName);
assertNotNull(rgn1);
try {
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
final CacheTransactionManager txMgrImpl = txMgr2;
txMgr2.begin();
// 1. setup an internal callback on originVM that will call
// disconnectFromDS() on AfterIndividualSend
((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).getRealDeal(null, null);
txState.setAfterIndividualSend(new Runnable() {
public synchronized void run() {
MembershipManagerHelper.crashDistributedSystem(getSystem());
}
});
rgn1.put("key0", "val0_2");
rgn1.put("key1", "val1_2");
// 2. commit a transaction in originVM, it will disconnect from the DS
txMgr2.commit();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
rgn1.getCache().getLogger().warning("Ignoring Exception", e);
} finally {
// Allow this VM to re-connect to the DS upon getCache() call
closeCache();
}
}
});
// 3. verify on all VMs, including the origin, that the transaction was not committed
Invoke.invokeInEveryVM(noChangeValidator);
// Test commit success upon a single commit process message received.
originVM.invoke(new SerializableRunnable("Flakey DuringIndividualCommitProcess Transaction") {
public void run() {
final Region rgn1 = getCache().getRegion(rgnName);
assertNotNull(rgn1);
try {
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
final CacheTransactionManager txMgrImpl = txMgr2;
txMgr2.begin();
((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).getRealDeal(null, null);
// 1. setup an internal callback on originVM that will call
// disconnectFromDS() on the 2nd internalDuringIndividualCommitProcess
// call.
txState.setDuringIndividualCommitProcess(new Runnable() {
private int numCalled = 0;
public synchronized void run() {
++numCalled;
rgn1.getCache().getLogger().info("setDuringIndividualCommitProcess Runnable called " + numCalled + " times");
if (numCalled > 1) {
MembershipManagerHelper.crashDistributedSystem(getSystem());
}
}
});
rgn1.put("key0", "val0_3");
rgn1.put("key1", "val1_3");
// 2. commit a transaction in originVM, it will disconnect from the DS
txMgr2.commit();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
rgn1.getCache().getLogger().warning("Ignoring Exception", e);
} finally {
// Allow this VM to re-connect to the DS upon getCache() call
closeCache();
}
}
});
// 3. verify on all VMs that the transaction was committed (including the orgin, due to GII)
SerializableRunnable nonSoloChangeValidator1 = new SerializableRunnable("testHighAvailabilityFeatures: validate v1 non-solo Region changes") {
public void run() {
Region rgn1 = getCache().getRegion(rgnName);
if (rgn1 == null) {
// Expect a null region from originVM
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
rgn1 = getCache().createRegion(rgnName, factory2.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
long giveUp = System.currentTimeMillis() + 10000;
while (giveUp > System.currentTimeMillis()) {
try {
Region.Entry re = rgn1.getEntry("key0");
assertNotNull(re);
assertEquals("val0_3", re.getValue());
re = rgn1.getEntry("key1");
assertNotNull(re);
assertEquals("val1_3", re.getValue());
break;
} catch (AssertionError e) {
if (giveUp > System.currentTimeMillis()) {
throw e;
}
}
}
}
};
Invoke.invokeInEveryVM(nonSoloChangeValidator1);
// Verify successful solo region commit after duringIndividualSend
// (same as afterIndividualSend).
// Create a region that only exists on the origin and another VM
final String soloRegionName = getUniqueName() + "_solo";
SerializableRunnable createSoloRegion = new SerializableRunnable("testHighAvailabilityFeatures: solo region configuration") {
public void run() {
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
Region rgn1 = getCache().createRegion(soloRegionName, factory2.create());
rgn1.put("soloKey0", "soloVal0_0");
rgn1.put("soloKey1", "soloVal1_0");
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
};
final VM soloRegionVM = host.getVM(1);
originVM.invoke(createSoloRegion);
soloRegionVM.invoke(createSoloRegion);
originVM.invoke(new SerializableRunnable("Flakey solo region DuringIndividualSend Transaction") {
public void run() {
final Region soloRgn = getCache().getRegion(soloRegionName);
assertNotNull(soloRgn);
try {
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
final CacheTransactionManager txMgrImpl = txMgr2;
txMgr2.begin();
// 1. setup an internal callback on originVM that will call
// disconnectFromDS() on the 2nd duringIndividualSend
// call.
((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).getRealDeal(null, null);
txState.setDuringIndividualSend(new Runnable() {
private int numCalled = 0;
public synchronized void run() {
++numCalled;
soloRgn.getCache().getLogger().info("setDuringIndividualSend Runnable called " + numCalled + " times");
if (numCalled > 1) {
MembershipManagerHelper.crashDistributedSystem(getSystem());
}
}
});
soloRgn.put("soloKey0", "soloVal0_1");
soloRgn.put("soloKey1", "soloVal1_1");
// 2. commit a transaction in originVM, it will disconnect from the DS
txMgr2.commit();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
soloRgn.getCache().getLogger().warning("Ignoring Exception", e);
} finally {
// Allow this VM to re-connect to the DS upon getCache() call
closeCache();
}
}
});
// 3. verify on the soloRegionVM that the transaction was committed
final SerializableRunnable soloRegionCommitValidator1 = new SerializableRunnable("testHighAvailabilityFeatures: validate successful v1 commit in solo Region") {
public void run() {
Region soloRgn = getCache().getRegion(soloRegionName);
if (soloRgn == null) {
// Expect a null region from originVM
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
soloRgn = getCache().createRegion(soloRegionName, factory2.create());
} catch (CacheException e) {
Assert.fail("While creating region ", e);
}
}
Region.Entry re = soloRgn.getEntry("soloKey0");
assertNotNull(re);
assertEquals("soloVal0_1", re.getValue());
re = soloRgn.getEntry("soloKey1");
assertNotNull(re);
assertEquals("soloVal1_1", re.getValue());
}
};
originVM.invoke(soloRegionCommitValidator1);
soloRegionVM.invoke(soloRegionCommitValidator1);
// verify no change in nonSolo region, re-establish region in originVM
Invoke.invokeInEveryVM(nonSoloChangeValidator1);
// Verify no commit for failed send (afterIndividualSend) for solo
// Region combined with non-solo Region
originVM.invoke(new SerializableRunnable("Flakey mixed (solo+non-solo) region DuringIndividualSend Transaction") {
public void run() {
final Region rgn1 = getCache().getRegion(rgnName);
assertNotNull(rgn1);
final Region soloRgn = getCache().getRegion(soloRegionName);
assertNotNull(soloRgn);
try {
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
final CacheTransactionManager txMgrImpl = txMgr2;
txMgr2.begin();
// 1. setup an internal callback on originVM that will call
// disconnectFromDS() on the afterIndividualSend
// call.
((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).getRealDeal(null, null);
txState.setAfterIndividualSend(new Runnable() {
public synchronized void run() {
MembershipManagerHelper.crashDistributedSystem(getSystem());
}
});
rgn1.put("key0", "val0_4");
rgn1.put("key1", "val1_4");
soloRgn.put("soloKey0", "soloVal0_2");
soloRgn.put("soloKey1", "soloVal1_2");
// 2. commit a transaction in originVM, it will disconnect from the DS
txMgr2.commit();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
rgn1.getCache().getLogger().warning("Ignoring Exception", e);
} finally {
// Allow this VM to re-connect to the DS upon getCache() call
closeCache();
}
}
});
// Origin and Solo Region VM should be the same as last validation
originVM.invoke(soloRegionCommitValidator1);
soloRegionVM.invoke(soloRegionCommitValidator1);
Invoke.invokeInEveryVM(nonSoloChangeValidator1);
// Verify commit after sending a single
// (duringIndividualCommitProcess) commit process for solo Region
// combined with non-solo Region
originVM.invoke(new SerializableRunnable("Flakey mixed (solo+non-solo) region DuringIndividualCommitProcess Transaction") {
public void run() {
final Region rgn1 = getCache().getRegion(rgnName);
assertNotNull(rgn1);
final Region soloRgn = getCache().getRegion(soloRegionName);
assertNotNull(soloRgn);
try {
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
final CacheTransactionManager txMgrImpl = txMgr2;
txMgr2.begin();
// 1. setup an internal callback on originVM that will call
// disconnectFromDS() on the afterIndividualSend
// call.
((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) ((TXManagerImpl) txMgrImpl).getTXState()).getRealDeal(null, null);
txState.setAfterIndividualSend(new Runnable() {
private int numCalled = 0;
public synchronized void run() {
++numCalled;
rgn1.getCache().getLogger().info("setDuringIndividualCommitProcess Runnable called " + numCalled + " times");
if (numCalled > 1) {
MembershipManagerHelper.crashDistributedSystem(getSystem());
}
}
});
rgn1.put("key0", "val0_5");
rgn1.put("key1", "val1_5");
soloRgn.put("soloKey0", "soloVal0_3");
soloRgn.put("soloKey1", "soloVal1_3");
// 2. commit a transaction in originVM, it will disconnect from the DS
txMgr2.commit();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
rgn1.getCache().getLogger().warning("Ignoring Exception", e);
} finally {
// Allow this VM to re-connect to the DS upon getCache() call
closeCache();
}
}
});
final SerializableRunnable soloRegionCommitValidator2 = new SerializableRunnable("testHighAvailabilityFeatures: validate successful v2 commit in solo Region") {
public void run() {
Region soloRgn = getCache().getRegion(soloRegionName);
if (soloRgn == null) {
// Expect a null region from originVM
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
soloRgn = getCache().createRegion(soloRegionName, factory2.create());
} catch (CacheException e) {
Assert.fail("While creating region ", e);
}
}
Region.Entry re = soloRgn.getEntry("soloKey0");
assertNotNull(re);
assertEquals("soloVal0_3", re.getValue());
re = soloRgn.getEntry("soloKey1");
assertNotNull(re);
assertEquals("soloVal1_3", re.getValue());
}
};
originVM.invoke(soloRegionCommitValidator2);
soloRegionVM.invoke(soloRegionCommitValidator2);
SerializableRunnable nonSoloChangeValidator2 = new SerializableRunnable("testHighAvailabilityFeatures: validate v2 non-solo Region changes") {
public void run() {
Region rgn1 = getCache().getRegion(rgnName);
if (rgn1 == null) {
// Expect a null region from originVM
try {
AttributesFactory factory2 = new AttributesFactory();
factory2.setScope(Scope.DISTRIBUTED_ACK);
factory2.setEarlyAck(false);
factory2.setDataPolicy(DataPolicy.REPLICATE);
rgn1 = getCache().createRegion(rgnName, factory2.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
Region.Entry re = rgn1.getEntry("key0");
assertNotNull(re);
assertEquals("val0_5", re.getValue());
re = rgn1.getEntry("key1");
assertNotNull(re);
assertEquals("val1_5", re.getValue());
}
};
Invoke.invokeInEveryVM(nonSoloChangeValidator2);
}
Aggregations