use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class DistributedNoAckRegionCCEDUnitTest method testOneHopKnownIssues.
@Test
public void testOneHopKnownIssues() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
// this VM, but treat as a remote for uniformity
VM vm3 = host.getVM(3);
// create an empty region in vm0 and replicated regions in VM 1 and 3,
// then perform concurrent ops
// on the same key while creating the region in VM2. Afterward make
// sure that all three regions are consistent
final String name = this.getUniqueName() + "-CC";
SerializableRunnable createRegion = new SerializableRunnable("Create Region") {
public void run() {
try {
final RegionFactory f;
int vmNumber = VM.getCurrentVMNum();
switch(vmNumber) {
case 0:
f = getCache().createRegionFactory(getRegionAttributes(RegionShortcut.REPLICATE_PROXY.toString()));
break;
case 1:
f = getCache().createRegionFactory(getRegionAttributes(RegionShortcut.REPLICATE.toString()));
f.setDataPolicy(DataPolicy.NORMAL);
break;
default:
f = getCache().createRegionFactory(getRegionAttributes());
break;
}
CCRegion = (LocalRegion) f.create(name);
} catch (CacheException ex) {
Assert.fail("While creating region", ex);
}
}
};
// empty
vm0.invoke(createRegion);
// normal
vm1.invoke(createRegion);
// replicate
vm2.invoke(createRegion);
// case 1: entry already invalid on vm2 (replicate) is invalidated by vm0 (empty)
final String invalidationKey = "invalidationKey";
final String destroyKey = "destroyKey";
SerializableRunnable test = new SerializableRunnable("case 1: second invalidation not applied or distributed") {
public void run() {
CCRegion.put(invalidationKey, "initialValue");
int invalidationCount = CCRegion.getCachePerfStats().getInvalidates();
CCRegion.invalidate(invalidationKey);
CCRegion.invalidate(invalidationKey);
assertEquals(invalidationCount + 1, CCRegion.getCachePerfStats().getInvalidates());
// also test destroy() while we're at it. It should throw an exception
int destroyCount = CCRegion.getCachePerfStats().getDestroys();
CCRegion.destroy(invalidationKey);
try {
CCRegion.destroy(invalidationKey);
fail("expected an EntryNotFoundException");
} catch (EntryNotFoundException e) {
// expected
}
assertEquals(destroyCount + 1, CCRegion.getCachePerfStats().getDestroys());
}
};
vm0.invoke(test);
// now do the same with the datapolicy=normal region
test.setName("case 2: second invalidation not applied or distributed");
vm1.invoke(test);
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class DistributedAckRegionCCEDUnitTest method testEntryVersionRollover.
@Test
public void testEntryVersionRollover() throws Exception {
assumeTrue(getClass() == DistributedAckRegionCCEDUnitTest.class);
final String name = this.getUniqueName() + "-CC";
final int numEntries = 1;
SerializableRunnable createRegion = new SerializableRunnable("Create Region") {
public void run() {
try {
RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
CCRegion = (LocalRegion) f.create(name);
for (int i = 0; i < numEntries; i++) {
CCRegion.put("cckey" + i, "ccvalue");
}
assertEquals("expected no conflated events", 0, CCRegion.getCachePerfStats().getConflatedEventsCount());
} catch (CacheException ex) {
org.apache.geode.test.dunit.Assert.fail("While creating region", ex);
}
}
};
VM vm0 = Host.getHost(0).getVM(0);
vm0.invoke(createRegion);
try {
createRegion.run();
VersionTag tag = new VMVersionTag();
// set the version to the max - it should make the system think there's a rollover and reject
// the change. Then apply it to the cache as if it is a replayed client operation. That should
// cause the cache to apply the op locally
tag.setEntryVersion(0xFFFFFF);
tag.setDistributedSystemId(1);
tag.setRegionVersion(CCRegion.getVersionVector().getNextVersion());
VersionTagHolder holder = new VersionTagHolder(tag);
ClientProxyMembershipID id = ClientProxyMembershipID.getNewProxyMembership(CCRegion.getDistributionManager().getSystem());
CCRegion.basicBridgePut("cckey0", "newvalue", null, true, null, id, true, holder);
vm0.invoke(new SerializableRunnable("check conflation count") {
public void run() {
assertEquals("expected one conflated event", 1, CCRegion.getCachePerfStats().getConflatedEventsCount());
}
});
} finally {
disconnectAllFromDS();
}
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class DistributedAckRegionCCEDUnitTest method testAggressiveTombstoneReaping.
/**
* Test for bug #46087 and #46089 where the waiting thread pool is flooded with threads performing
* distributed-GC. This could be moved to a JUnit test class.
*/
@Test
public void testAggressiveTombstoneReaping() {
assumeTrue(getClass() == DistributedAckRegionCCEDUnitTest.class);
final String name = this.getUniqueName() + "-CC";
final int saveExpiredTombstoneLimit = TombstoneService.EXPIRED_TOMBSTONE_LIMIT;
final long saveTombstoneTimeout = TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT;
TombstoneService.EXPIRED_TOMBSTONE_LIMIT = 50;
TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 500;
try {
// create some destroyed entries so the GC service is populated
RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
CCRegion = (LocalRegion) f.create(name);
final long initialCount = CCRegion.getCachePerfStats().getTombstoneGCCount();
for (int i = 0; i < 100; i++) {
CCRegion.put("cckey" + i, "ccvalue" + i);
CCRegion.destroy("cckey" + i);
}
// now simulate a low free-memory condition
TombstoneService.FORCE_GC_MEMORY_EVENTS = true;
WaitCriterion waitForGC = new WaitCriterion() {
public boolean done() {
return CCRegion.getCachePerfStats().getTombstoneGCCount() > initialCount;
}
public String description() {
return "waiting for GC to occur";
}
};
Wait.waitForCriterion(waitForGC, 20000, 1000, true);
Wait.pause(5000);
long gcCount = CCRegion.getCachePerfStats().getTombstoneGCCount();
assertTrue("expected a few GCs, but not " + (gcCount - initialCount), gcCount < (initialCount + 20));
} catch (CacheException ex) {
org.apache.geode.test.dunit.Assert.fail("While creating region", ex);
} finally {
TombstoneService.EXPIRED_TOMBSTONE_LIMIT = saveExpiredTombstoneLimit;
TombstoneService.FORCE_GC_MEMORY_EVENTS = false;
TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = saveTombstoneTimeout;
}
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class QueryTestUtils method createPartitionRegion.
public void createPartitionRegion(String name, Class constraint) {
ExpirationAttributes expiration = ExpirationAttributes.DEFAULT;
PartitionAttributesFactory paf = new PartitionAttributesFactory();
RegionFactory factory = cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
if (constraint != null) {
factory.setValueConstraint(constraint);
}
factory.create(name);
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class QueryWithBucketParameterIntegrationTest method setUp.
@Before
public void setUp() throws Exception {
String regionName = "pr1";
int totalBuckets = 40;
int numValues = 80;
CacheUtils.startCache();
Cache cache = CacheUtils.getCache();
PartitionAttributesFactory pAFactory = getPartitionAttributesFactoryWithPartitionResolver(totalBuckets);
RegionFactory rf = cache.createRegionFactory(RegionShortcut.PARTITION);
rf.setPartitionAttributes(pAFactory.create());
PartitionedRegion pr1 = (PartitionedRegion) rf.create(regionName);
populateRegion(pr1, numValues);
QueryService qs = pr1.getCache().getQueryService();
String query = "select distinct e1.value from /pr1 e1";
queryExecutor = (DefaultQuery) CacheUtils.getQueryService().newQuery(query);
Set<Integer> set = createAndPopulateSet(totalBuckets);
lds = new LocalDataSet(pr1, set);
}
Aggregations