use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class DistTXDebugDUnitTest method testTXDestroy_invalidate.
@Test
public void testTXDestroy_invalidate() throws Exception {
createCacheInAllVms();
Object[] prAttrs = new Object[] { "pregion1", 1, null, 3, null, Boolean.FALSE, Boolean.FALSE };
createPartitionedRegion(prAttrs);
Object[] rrAttrs = new Object[] { "rregion1", Boolean.FALSE };
createReplicatedRegion(rrAttrs);
SerializableCallable TxOps = new SerializableCallable("TxOps") {
@Override
public Object call() throws CacheException {
PartitionedRegion pr1 = (PartitionedRegion) basicGetCache().getRegion("pregion1");
Region rr1 = basicGetCache().getRegion("rregion1");
// put some data (non tx ops)
for (int i = 1; i <= 6; i++) {
DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(i);
LogWriterUtils.getLogWriter().info(" calling non-tx put");
pr1.put(dummy, "1_entry__" + i);
rr1.put(dummy, "1_entry__" + i);
}
CacheTransactionManager ctx = basicGetCache().getCacheTransactionManager();
ctx.setDistributed(true);
// destroy data in tx and commit
ctx.begin();
for (int i = 1; i <= 3; i++) {
DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(i);
LogWriterUtils.getLogWriter().info(" calling pr1.destroy in tx key=" + dummy);
pr1.destroy(dummy);
LogWriterUtils.getLogWriter().info(" calling rr1.destroy in tx key=" + i);
rr1.destroy(dummy);
}
for (int i = 4; i <= 6; i++) {
DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(i);
LogWriterUtils.getLogWriter().info(" calling pr1.invalidate in tx key=" + dummy);
pr1.invalidate(dummy);
LogWriterUtils.getLogWriter().info(" calling rr1.invalidate in tx key=" + i);
rr1.invalidate(dummy);
}
ctx.commit();
// verify the data
for (int i = 1; i <= 6; i++) {
DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(i);
LogWriterUtils.getLogWriter().info(" calling pr1.get");
assertEquals(null, pr1.get(dummy));
LogWriterUtils.getLogWriter().info(" calling rr1.get");
assertEquals(null, rr1.get(i));
}
return null;
}
};
accessor.invoke(TxOps);
// verify data size on all replicas
SerializableCallable verifySize = new SerializableCallable("getOps") {
@Override
public Object call() throws CacheException {
PartitionedRegion pr1 = (PartitionedRegion) basicGetCache().getRegion("pregion1");
Region rr1 = basicGetCache().getRegion("rregion1");
LogWriterUtils.getLogWriter().info(" calling pr1.getLocalSize " + pr1.getLocalSize());
assertEquals(2, pr1.getLocalSize());
LogWriterUtils.getLogWriter().info(" calling rr1.size " + rr1.size());
assertEquals(3, rr1.size());
return null;
}
};
dataStore1.invoke(verifySize);
dataStore2.invoke(verifySize);
dataStore3.invoke(verifySize);
accessor.invoke(() -> DistTXDebugDUnitTest.destroyPR("pregion1"));
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class DistTXPersistentDebugDUnitTest method testBasicDistributedTX.
@Test
public void testBasicDistributedTX() throws Exception {
createCacheInAllVms();
final String regionName = "persistentCustomerPRRegion";
Object[] attrs = new Object[] { regionName };
createPersistentPR(attrs);
SerializableCallable TxOps = new SerializableCallable() {
@Override
public Object call() throws Exception {
CacheTransactionManager mgr = basicGetCache().getCacheTransactionManager();
mgr.setDistributed(true);
LogWriterUtils.getLogWriter().fine("SJ:TX BEGIN");
mgr.begin();
Region<CustId, Customer> prRegion = basicGetCache().getRegion(regionName);
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1", "addr1");
LogWriterUtils.getLogWriter().fine("SJ:TX PUT 1");
prRegion.put(custIdOne, customerOne);
CustId custIdTwo = new CustId(2);
Customer customerTwo = new Customer("name2", "addr2");
LogWriterUtils.getLogWriter().fine("SJ:TX PUT 2");
prRegion.put(custIdTwo, customerTwo);
LogWriterUtils.getLogWriter().fine("SJ:TX COMMIT");
mgr.commit();
return null;
}
};
dataStore2.invoke(TxOps);
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class DistTXReleasesOffHeapOnCloseJUnitTest method createCache.
@Override
protected void createCache() {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
props.setProperty(ConfigurationProperties.OFF_HEAP_MEMORY_SIZE, "1m");
props.put(ConfigurationProperties.DISTRIBUTED_TRANSACTIONS, "true");
cache = new CacheFactory(props).create();
CacheTransactionManager txmgr = cache.getCacheTransactionManager();
assert (txmgr.isDistributed());
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class DistTXDebugDUnitTest method testTXPRRR2_putall.
@Test
public void testTXPRRR2_putall() throws Exception {
createCacheInAllVms();
Object[] prAttrs = new Object[] { "pregion1", 1, null, 3, null, Boolean.FALSE, Boolean.FALSE };
createPartitionedRegion(prAttrs);
Object[] rrAttrs = new Object[] { "rregion1", Boolean.FALSE };
createReplicatedRegion(rrAttrs);
SerializableCallable TxOps = new SerializableCallable("TxOps") {
@Override
public Object call() throws CacheException {
PartitionedRegion pr1 = (PartitionedRegion) basicGetCache().getRegion("pregion1");
Region rr1 = basicGetCache().getRegion("rregion1");
CacheTransactionManager ctx = basicGetCache().getCacheTransactionManager();
ctx.setDistributed(true);
ctx.begin();
HashMap<DummyKeyBasedRoutingResolver, String> phm = new HashMap<DummyKeyBasedRoutingResolver, String>();
HashMap<Integer, String> rhm = new HashMap<Integer, String>();
for (int i = 1; i <= 3; i++) {
DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(i);
phm.put(dummy, "2_entry__" + i);
rhm.put(i, "2_entry__" + i);
}
pr1.putAll(phm);
rr1.putAll(rhm);
ctx.commit();
// verify the data
for (int i = 1; i <= 3; i++) {
DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(i);
LogWriterUtils.getLogWriter().info(" calling pr.get " + pr1.get(dummy));
assertEquals("2_entry__" + i, pr1.get(dummy));
LogWriterUtils.getLogWriter().info(" calling rr.get " + rr1.get(new Integer(i)));
assertEquals("2_entry__" + i, rr1.get(new Integer(i)));
}
return null;
}
};
accessor.invoke(TxOps);
// verify data size on all replicas
SerializableCallable verifySize = new SerializableCallable("getOps") {
@Override
public Object call() throws CacheException {
Region rr1 = basicGetCache().getRegion("rregion1");
LogWriterUtils.getLogWriter().info(" calling rr.getLocalSize " + rr1.size());
assertEquals(3, rr1.size());
PartitionedRegion pr1 = (PartitionedRegion) basicGetCache().getRegion("pregion1");
LogWriterUtils.getLogWriter().info(" calling pr.getLocalSize " + pr1.getLocalSize());
assertEquals(2, pr1.getLocalSize());
return null;
}
};
dataStore1.invoke(verifySize);
dataStore2.invoke(verifySize);
dataStore3.invoke(verifySize);
// accessor.invoke(TxOps);
}
use of org.apache.geode.cache.CacheTransactionManager in project geode by apache.
the class DistributedTransactionDUnitTest method testMultipleOpsOnSameKeyInTx.
@Test
public void testMultipleOpsOnSameKeyInTx() throws Exception {
Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM server3 = host.getVM(2);
createPR(new VM[] { server1, server2, server3 });
execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
CacheTransactionManager mgr = getGemfireCache().getTxManager();
mgr.setDistributed(true);
mgr.begin();
mgr.commit();
mgr.begin();
Region<CustId, Customer> custRegion = getCache().getRegion(CUSTOMER_PR);
CustId custId = new CustId(1);
Customer expectedCustomer = custRegion.get(custId);
assertNull(expectedCustomer);
// Perform a put
CustId custIdOne = new CustId(1);
Customer customerOne = new Customer("name1", "addr1");
custRegion.put(custIdOne, customerOne);
// Rollback the transaction
mgr.rollback();
mgr.begin();
// Verify that the entry is rolled back
expectedCustomer = custRegion.get(custId);
assertNull(expectedCustomer);
// Add more data
CustId custIdTwo = new CustId(2);
Customer customerTwo = new Customer("name2", "addr2");
CustId custIdThree = new CustId(3);
Customer customerThree = new Customer("name3", "addr3");
custRegion.put(custIdTwo, customerTwo);
custRegion.put(custIdThree, customerThree);
mgr.commit();
mgr.begin();
// Verify data
assertEquals(2, custRegion.size());
assertTrue(custRegion.containsKey(custIdTwo));
assertTrue(custRegion.containsKey(custIdThree));
assertEquals(customerTwo, custRegion.get(custIdTwo));
assertEquals(customerThree, custRegion.get(custIdThree));
// Update the values for the same keys multiple times
custRegion.put(custIdOne, new Customer("name1_mod1", "addr1_mod1"));
custRegion.put(custIdTwo, new Customer("name2_mod1", "addr2_mod1"));
custRegion.put(custIdOne, new Customer("name1_mod2", "addr1_mod2"));
custRegion.put(custIdOne, new Customer("name1_mod3", "addr1_mod3"));
custRegion.put(custIdTwo, new Customer("name2_mod2", "addr2_mod2"));
assertEquals(3, custRegion.size());
mgr.commit();
assertEquals(3, custRegion.size());
Customer c = custRegion.get(custIdOne);
return null;
}
});
}
Aggregations