use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class LRUEvictionControllerDUnitTest method testReplicationAndTransactions.
/**
* Create two regions, one a "feed" that performs transactions which are replicated to a region
* with an Entry LRU set to one Asserts that the LRU rules are observed
*
* @throws Exception
*/
@Test
public void testReplicationAndTransactions() throws Exception {
final String r1 = this.getUniqueName() + "-1";
final String r2 = this.getUniqueName() + "-2";
final String r3 = this.getUniqueName() + "-3";
VM feeder = Host.getHost(0).getVM(3);
VM repl = Host.getHost(0).getVM(2);
final int maxEntries = 1;
final int numEntries = 10000;
final int txBatchSize = 10;
// need at least one batch
assertTrue(numEntries > txBatchSize);
CacheSerializableRunnable createRegion = new CacheSerializableRunnable("Create Replicate Region") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeapEnabled());
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.OVERFLOW_TO_DISK));
factory.setDataPolicy(DataPolicy.REPLICATE);
File[] diskDirs = new File[1];
diskDirs[0] = new File("overflowDir/" + OSProcess.getId());
diskDirs[0].mkdirs();
factory.setDiskStoreName(getCache().createDiskStoreFactory().setDiskDirs(diskDirs).create("LRUEvictionControllerDUnitTest").getName());
factory.setDiskSynchronous(true);
factory.setScope(Scope.DISTRIBUTED_ACK);
RegionAttributes a = factory.create();
createRegion(r1, a);
createRegion(r2, a);
createRegion(r3, a);
}
};
feeder.invoke(createRegion);
repl.invoke(createRegion);
feeder.invoke(new CacheSerializableRunnable("put " + numEntries + " entries and assert " + maxEntries + " max entries") {
public void run2() throws CacheException {
Cache c = getCache();
CacheTransactionManager txm = c.getCacheTransactionManager();
Region reg1 = getRootRegion().getSubregion(r1);
assertNotNull(reg1);
Region reg2 = getRootRegion().getSubregion(r2);
assertNotNull(reg2);
Region reg3 = getRootRegion().getSubregion(r3);
assertNotNull(reg3);
boolean startTx = false;
final Region[] r = { reg1, reg2, reg3 };
for (int i = 0; i < numEntries; i++) {
if (i % txBatchSize == 0) {
txm.begin();
startTx = true;
}
reg1.create("r1-key-" + i, "r1-value-" + i);
reg2.create("r2-key-" + i, "r2-value-" + i);
reg3.create("r3-key-" + i, "r3-value-" + i);
if (i % txBatchSize == (txBatchSize - 1)) {
txm.commit();
try {
// allow stats to get a sample in
Thread.sleep(20);
} catch (InterruptedException ie) {
fail("interrupted");
}
startTx = false;
}
}
if (startTx) {
txm.commit();
}
for (int i = 0; i < r.length; i++) {
assertEquals(numEntries, r[i].size());
{
LocalRegion lr = (LocalRegion) r[i];
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getLimit());
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getCounter());
}
}
}
});
repl.invoke(new CacheSerializableRunnable("Replicate asserts " + maxEntries + " max entries") {
public void run2() throws CacheException {
getCache();
Region reg1 = getRootRegion().getSubregion(r1);
Region reg2 = getRootRegion().getSubregion(r2);
Region reg3 = getRootRegion().getSubregion(r3);
final Region[] r = { reg1, reg2, reg3 };
for (int i = 0; i < r.length; i++) {
assertNotNull(r[i]);
assertEquals(numEntries, r[i].size());
{
LocalRegion lr = (LocalRegion) r[i];
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getLimit());
assertEquals(maxEntries, lr.getEvictionController().getLRUHelper().getStats().getCounter());
}
}
}
});
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class DiskRegCacheXmlJUnitTest method closeCache.
/** Close the cache */
private synchronized void closeCache() {
if (cache != null) {
try {
if (!cache.isClosed()) {
CacheTransactionManager txMgr = cache.getCacheTransactionManager();
if (txMgr != null) {
if (txMgr.exists()) {
// make sure we cleanup this threads txid stored in a thread local
txMgr.rollback();
}
}
cache.close();
}
} finally {
cache = null;
}
}
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class NestedTransactionFunction method execute.
public void execute(FunctionContext context) {
Cache cache = CacheFactory.getAnyInstance();
ArrayList args = (ArrayList) context.getArguments();
TXId txId = null;
int action = 0;
try {
txId = (TXId) args.get(0);
action = (Integer) args.get(1);
} catch (ClassCastException e) {
logger.info("CommitFunction should be invoked with a TransactionId as an argument i.e. setArguments(txId).execute(function)");
throw e;
}
CacheTransactionManager txMgr = cache.getCacheTransactionManager();
Boolean result = false;
final boolean isDebugEnabled = logger.isDebugEnabled();
if (txMgr.tryResume(txId)) {
if (isDebugEnabled) {
logger.debug("CommitFunction: resumed transaction: {}", txId);
}
if (action == COMMIT) {
if (isDebugEnabled) {
logger.debug("CommitFunction: committing transaction: {}", txId);
}
txMgr.commit();
} else if (action == ROLLBACK) {
if (isDebugEnabled) {
logger.debug("CommitFunction: rolling back transaction: {}", txId);
}
txMgr.rollback();
} else {
throw new IllegalStateException("unknown transaction termination action");
}
result = true;
}
if (isDebugEnabled) {
logger.debug("CommitFunction: for transaction: {} sending result: {}", txId, result);
}
context.getResultSender().lastResult(result);
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class CallbackArgDUnitTest method doCommitOtherVm.
private void doCommitOtherVm() {
VM vm = getOtherVm();
vm.invoke(new CacheSerializableRunnable("create root") {
public void run2() throws CacheException {
AttributesFactory af = new AttributesFactory();
CacheListener cl1 = new CacheListenerAdapter() {
public void afterCreate(EntryEvent e) {
assertEquals(callbackArg, e.getCallbackArgument());
}
};
af.addCacheListener(cl1);
af.setScope(Scope.DISTRIBUTED_ACK);
Region r1 = createRootRegion("r1", af.create());
Region r2 = r1.createSubregion("r2", af.create());
Region r3 = r2.createSubregion("r3", af.create());
CacheTransactionManager ctm = getCache().getCacheTransactionManager();
TransactionListener tl1 = new TransactionListenerAdapter() {
public void afterCommit(TransactionEvent e) {
assertEquals(6, e.getEvents().size());
Iterator it = e.getEvents().iterator();
while (it.hasNext()) {
EntryEvent ee = (EntryEvent) it.next();
assertEquals(callbackArg, ee.getCallbackArgument());
assertEquals(true, ee.isCallbackArgumentAvailable());
}
}
};
ctm.addListener(tl1);
ctm.begin();
r2.put("b", "value1", callbackArg);
r3.put("c", "value2", callbackArg);
r1.put("a", "value3", callbackArg);
r1.put("a2", "value4", callbackArg);
r3.put("c2", "value5", callbackArg);
r2.put("b2", "value6", callbackArg);
ctm.commit();
}
});
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class RegionReliabilityTestCase method testCommitDistributionException.
@Test
public void testCommitDistributionException() throws Exception {
if (getRegionScope().isGlobal())
// skip test under Global
return;
if (getRegionScope().isDistributedNoAck())
// skip test under DistributedNoAck
return;
final String name = this.getUniqueName();
final String roleA = name + "-A";
final String[] requiredRoles = { roleA };
Set requiredRolesSet = new HashSet();
for (int i = 0; i < requiredRoles.length; i++) {
requiredRolesSet.add(InternalRole.getRole(requiredRoles[i]));
}
assertEquals(requiredRoles.length, requiredRolesSet.size());
// connect controller to system...
Properties config = new Properties();
config.setProperty(ROLES, "");
getSystem(config);
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
RegionMembershipListener listener = new RegionMembershipListenerAdapter() {
public void afterRemoteRegionDeparture(RegionEvent event) {
synchronized (detectedDeparture_testCommitDistributionException) {
detectedDeparture_testCommitDistributionException[0] = Boolean.TRUE;
detectedDeparture_testCommitDistributionException.notify();
}
}
};
// create region in controller...
MembershipAttributes ra = new MembershipAttributes(requiredRoles, LossAction.NO_ACCESS, ResumptionAction.NONE);
AttributesFactory fac = new AttributesFactory();
fac.setMembershipAttributes(ra);
fac.setScope(getRegionScope());
fac.addCacheListener(listener);
RegionAttributes attr = fac.create();
Region region = createRootRegion(name, attr);
// use vm1 to create role
Host.getHost(0).getVM(1).invoke(new CacheSerializableRunnable("Create Region") {
public void run2() throws CacheException {
createConnection(new String[] { roleA });
AttributesFactory fac = new AttributesFactory();
fac.setScope(getRegionScope());
RegionAttributes attr = fac.create();
createRootRegion(name, attr);
}
});
// define the afterReleaseLocalLocks callback
SerializableRunnableIF removeRequiredRole = new SerializableRunnableIF() {
public void run() {
Host.getHost(0).getVM(1).invoke(new SerializableRunnable("Close Region") {
public void run() {
getRootRegion(name).close();
}
});
try {
synchronized (detectedDeparture_testCommitDistributionException) {
while (detectedDeparture_testCommitDistributionException[0] == Boolean.FALSE) {
detectedDeparture_testCommitDistributionException.wait();
}
}
} catch (InterruptedException e) {
fail("interrupted");
}
}
};
// define the add and remove expected exceptions
final String expectedExceptions = "org.apache.geode.internal.cache.CommitReplyException";
SerializableRunnable addExpectedExceptions = new CacheSerializableRunnable("addExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
}
};
SerializableRunnable removeExpectedExceptions = new CacheSerializableRunnable("removeExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
}
};
// perform the actual test...
CacheTransactionManager ctm = cache.getCacheTransactionManager();
ctm.begin();
TXStateInterface txStateProxy = ((TXManagerImpl) ctm).getTXState();
((TXStateProxyImpl) txStateProxy).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) txStateProxy).getRealDeal(null, null);
txState.setBeforeSend(() -> {
try {
removeRequiredRole.run();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
// now start a transaction and commit it
region.put("KEY", "VAL");
addExpectedExceptions.run();
Host.getHost(0).getVM(1).invoke(addExpectedExceptions);
try {
ctm.commit();
fail("Should have thrown CommitDistributionException");
} catch (CommitDistributionException e) {
// pass
} finally {
removeExpectedExceptions.run();
Host.getHost(0).getVM(1).invoke(removeExpectedExceptions);
}
}
Aggregations