use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MemoryThresholdsOffHeapDUnitTest method prRemotePutRejection.
private void prRemotePutRejection(boolean cacheClose, boolean localDestroy, final boolean useTx) throws Exception {
final Host host = Host.getHost(0);
final VM accessor = host.getVM(0);
final VM[] servers = new VM[3];
servers[0] = host.getVM(1);
servers[1] = host.getVM(2);
servers[2] = host.getVM(3);
final String regionName = "offHeapPRRemotePutRejection";
final int redundancy = 1;
startCacheServer(servers[0], 0f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
startCacheServer(servers[1], 0f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
startCacheServer(servers[2], 0f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getOffHeapProperties());
getCache();
AttributesFactory factory = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setLocalMaxMemory(0);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
factory.setOffHeap(true);
createRegion(regionName, factory.create());
return null;
}
});
doPuts(accessor, regionName, false, false);
final Range r1 = Range.DEFAULT;
doPutAlls(accessor, regionName, false, false, r1);
servers[0].invoke(addExpectedException);
servers[1].invoke(addExpectedException);
servers[2].invoke(addExpectedException);
setUsageAboveCriticalThreshold(servers[0], regionName);
final Set<InternalDistributedMember> criticalMembers = (Set) servers[0].invoke(new SerializableCallable() {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
final int hashKey = PartitionedRegionHelper.getHashKey(pr, null, "oh5", null, null);
return pr.getRegionAdvisor().getBucketOwners(hashKey);
}
});
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "remote bucket not marked sick";
}
public boolean done() {
boolean keyFoundOnSickMember = false;
boolean caughtException = false;
for (int i = 0; i < 20; i++) {
Integer key = Integer.valueOf(i);
int hKey = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
Set<InternalDistributedMember> owners = pr.getRegionAdvisor().getBucketOwners(hKey);
final boolean hasCriticalOwners = owners.removeAll(criticalMembers);
if (hasCriticalOwners) {
keyFoundOnSickMember = true;
try {
if (useTx)
getCache().getCacheTransactionManager().begin();
pr.getCache().getLogger().fine("SWAP:putting in tx:" + useTx);
pr.put(key, "value");
if (useTx)
getCache().getCacheTransactionManager().commit();
} catch (LowMemoryException ex) {
caughtException = true;
if (useTx)
getCache().getCacheTransactionManager().rollback();
}
} else {
// puts on healthy member should continue
pr.put(key, "value");
}
}
return keyFoundOnSickMember && caughtException;
}
};
Wait.waitForCriterion(wc, 10000, 10, true);
return null;
}
});
{
Range r2 = new Range(r1, r1.width() + 1);
doPutAlls(accessor, regionName, false, true, r2);
}
// Find all VMs that have a critical region
SerializableCallable getMyId = new SerializableCallable() {
public Object call() throws Exception {
return ((GemFireCacheImpl) getCache()).getMyId();
}
};
final Set<VM> criticalServers = new HashSet<VM>();
for (final VM server : servers) {
DistributedMember member = (DistributedMember) server.invoke(getMyId);
if (criticalMembers.contains(member)) {
criticalServers.add(server);
}
}
if (localDestroy) {
// local destroy the region on sick members
for (final VM vm : criticalServers) {
vm.invoke(new SerializableCallable("local destroy sick member") {
public Object call() throws Exception {
Region r = getRootRegion().getSubregion(regionName);
LogWriterUtils.getLogWriter().info("PRLocalDestroy");
r.localDestroyRegion();
return null;
}
});
}
} else if (cacheClose) {
// close cache on sick members
for (final VM vm : criticalServers) {
vm.invoke(new SerializableCallable("close cache sick member") {
public Object call() throws Exception {
getCache().close();
return null;
}
});
}
} else {
setUsageBelowEviction(servers[0], regionName);
servers[0].invoke(removeExpectedException);
servers[1].invoke(removeExpectedException);
servers[2].invoke(removeExpectedException);
}
// do put all in a loop to allow distribution of message
accessor.invoke(new SerializableCallable("Put in a loop") {
public Object call() throws Exception {
final Region r = getRootRegion().getSubregion(regionName);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "pr should have gone un-critical";
}
public boolean done() {
boolean done = true;
for (int i = 0; i < 20; i++) {
try {
r.put(i, "value");
} catch (LowMemoryException e) {
// expected
done = false;
}
}
return done;
}
};
Wait.waitForCriterion(wc, 10000, 10, true);
return null;
}
});
doPutAlls(accessor, regionName, false, false, r1);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PartitionRegionHelperDUnitTest method testMoveSingleBucket.
@Test
public void testMoveSingleBucket() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableCallable createPrRegion = new SerializableCallable("createRegion") {
public Object call() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setTotalNumBuckets(12);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
return cache.getDistributedSystem().getDistributedMember();
}
};
final DistributedMember member0 = (DistributedMember) vm0.invoke(createPrRegion);
final DistributedMember member1 = (DistributedMember) vm1.invoke(createPrRegion);
// populate the region with data so we have some buckets
vm0.invoke(new SerializableRunnable("create data") {
public void run() {
for (int i = 0; i < 8; i++) {
Region<Object, Object> region = getCache().getRegion("region1");
region.put(i, "one");
}
}
});
// Create VM 2 later so that it doesn't have any buckets
final DistributedMember member2 = (DistributedMember) vm2.invoke(createPrRegion);
// Try some explicit moves
vm0.invoke(new SerializableRunnable("create data") {
public void run() {
Region<Object, Object> region = getCache().getRegion("region1");
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
// Try to move a bucket to a member that already has the bucket
try {
PartitionRegionHelper.moveBucketByKey(region, member0, member1, 1);
fail("Should have received an exception");
} catch (IllegalStateException expected) {
System.err.println(expected);
}
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
// Try to move the bucket from a member that doesn't have the bucket
try {
PartitionRegionHelper.moveBucketByKey(region, member2, member2, 1);
fail("Should have received an exception");
} catch (IllegalStateException expected) {
System.err.println(expected);
}
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
// Try to move the bucket from an invalid member
try {
PartitionRegionHelper.moveBucketByKey(region, member2, new InternalDistributedMember("localhost", 5), 1);
fail("Should have received an exception");
} catch (IllegalStateException expected) {
System.err.println(expected);
}
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
// Try to move the bucket that doesn't exist
try {
PartitionRegionHelper.moveBucketByKey(region, member0, member2, 10);
fail("Should have received an exception");
} catch (IllegalStateException expected) {
System.err.println(expected);
}
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 10));
// Do some successful moves.
PartitionRegionHelper.moveBucketByKey(region, member0, member2, 1);
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member1, member2);
PartitionRegionHelper.moveBucketByKey(region, member2, member0, 1);
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
PartitionRegionHelper.moveBucketByKey(region, member0, member2, 2);
PartitionRegionHelper.moveBucketByKey(region, member1, member2, 3);
PartitionRegionHelper.moveBucketByKey(region, member1, member2, 4);
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 2), member1, member2);
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 3), member0, member2);
assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 4), member0, member2);
}
});
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MemoryThresholdsDUnitTest method startCacheServer.
/**
* Starts up a CacheServer.
*
* @return a {@link ServerPorts} containing the CacheServer ports.
*/
private ServerPorts startCacheServer(VM server, final float evictionThreshold, final float criticalThreshold, final String regionName, final boolean createPR, final boolean notifyBySubscription, final int prRedundancy) throws Exception {
return (ServerPorts) server.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getServerProperties());
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager irm = cache.getInternalResourceManager();
HeapMemoryMonitor hmm = irm.getHeapMonitor();
hmm.setTestMaxMemoryBytes(1000);
HeapMemoryMonitor.setTestBytesUsedForThresholdSet(500);
irm.setEvictionHeapPercentage(evictionThreshold);
irm.setCriticalHeapPercentage(criticalThreshold);
AttributesFactory factory = new AttributesFactory();
if (createPR) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(prRedundancy);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
} else {
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
}
Region region = createRegion(regionName, factory.create());
if (createPR) {
assertTrue(region instanceof PartitionedRegion);
} else {
assertTrue(region instanceof DistributedRegion);
}
CacheServer cacheServer = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailableTCPPorts(1)[0];
cacheServer.setPort(port);
cacheServer.setNotifyBySubscription(notifyBySubscription);
cacheServer.start();
return new ServerPorts(port);
}
});
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MemoryThresholdsDUnitTest method prRemotePutRejection.
private void prRemotePutRejection(boolean cacheClose, boolean localDestroy, final boolean useTx) throws Exception {
final Host host = Host.getHost(0);
final VM accessor = host.getVM(0);
final VM server1 = host.getVM(1);
final VM server2 = host.getVM(2);
final VM server3 = host.getVM(3);
final String regionName = "testPrRejection";
final int redundancy = 1;
final ServerPorts ports1 = startCacheServer(server1, 80f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
ServerPorts ports2 = startCacheServer(server2, 80f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
ServerPorts ports3 = startCacheServer(server3, 80f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getServerProperties());
getCache();
AttributesFactory factory = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setLocalMaxMemory(0);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
createRegion(regionName, factory.create());
return null;
}
});
doPuts(accessor, regionName, false, false);
final Range r1 = Range.DEFAULT;
doPutAlls(accessor, regionName, false, false, r1);
SerializableCallable getMyId = new SerializableCallable() {
public Object call() throws Exception {
return ((GemFireCacheImpl) getCache()).getMyId();
}
};
final DistributedMember server1Id = (DistributedMember) server1.invoke(getMyId);
setUsageAboveCriticalThreshold(server1);
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
final String regionPath = getRootRegion().getSubregion(regionName).getFullPath();
// server1 is sick, look for a key on server1, and attempt put again
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "remote bucket not marked sick";
}
public boolean done() {
boolean keyFoundOnSickMember = false;
boolean caughtException = false;
for (int i = 0; i < 20; i++) {
Integer key = Integer.valueOf(i);
int hKey = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
Set<InternalDistributedMember> owners = pr.getRegionAdvisor().getBucketOwners(hKey);
if (owners.contains(server1Id)) {
keyFoundOnSickMember = true;
try {
if (useTx)
getCache().getCacheTransactionManager().begin();
pr.getCache().getLogger().fine("SWAP:putting in tx:" + useTx);
pr.put(key, "value");
if (useTx)
getCache().getCacheTransactionManager().commit();
} catch (LowMemoryException ex) {
caughtException = true;
if (useTx)
getCache().getCacheTransactionManager().rollback();
}
} else {
// puts on healthy member should continue
pr.put(key, "value");
}
}
return keyFoundOnSickMember && caughtException;
}
};
Wait.waitForCriterion(wc, 30000, 10, true);
return null;
}
});
{
Range r2 = new Range(r1, r1.width() + 1);
doPutAlls(accessor, regionName, false, true, r2);
}
if (localDestroy) {
// local destroy the region on sick member
server1.invoke(new SerializableCallable("local destroy sick member") {
public Object call() throws Exception {
Region r = getRootRegion().getSubregion(regionName);
LogWriterUtils.getLogWriter().info("PRLocalDestroy");
r.localDestroyRegion();
return null;
}
});
} else if (cacheClose) {
// close cache on sick member
server1.invoke(new SerializableCallable("close cache sick member") {
public Object call() throws Exception {
getCache().close();
return null;
}
});
} else {
setUsageBelowEviction(server1);
}
// do put all in a loop to allow distribution of message
accessor.invoke(new SerializableCallable("Put in a loop") {
public Object call() throws Exception {
final Region r = getRootRegion().getSubregion(regionName);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "pr should have gone un-critical";
}
public boolean done() {
boolean done = true;
for (int i = 0; i < 20; i++) {
try {
r.put(i, "value");
} catch (LowMemoryException e) {
// expected
done = false;
}
}
return done;
}
};
Wait.waitForCriterion(wc, 30000, 10, true);
return null;
}
});
doPutAlls(accessor, regionName, false, false, r1);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class MemoryThresholdsDUnitTest method createPR.
private CacheSerializableRunnable createPR(final String rName, final boolean accessor, final int fakeHeapMaxSize, final float criticalHeapThresh) {
return new CacheSerializableRunnable("create PR accessor") {
@Override
public void run2() throws CacheException {
// Assert some level of connectivity
InternalDistributedSystem ds = getSystem();
assertTrue(ds.getDistributionManager().getNormalDistributionManagerIds().size() >= 2);
// below
final long fakeHeapUsage = Math.round(fakeHeapMaxSize * (criticalHeapThresh - 0.5f));
// critical
// by
// 50%
InternalResourceManager irm = (InternalResourceManager) getCache().getResourceManager();
HeapMemoryMonitor hmm = irm.getHeapMonitor();
assertTrue(fakeHeapMaxSize > 0);
hmm.setTestMaxMemoryBytes(fakeHeapMaxSize);
HeapMemoryMonitor.setTestBytesUsedForThresholdSet(fakeHeapUsage);
irm.setCriticalHeapPercentage((criticalHeapThresh * 100.0f));
assertFalse(hmm.getState().isCritical());
AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
if (!accessor) {
af.setCacheLoader(new CacheLoader<Integer, String>() {
final AtomicInteger numLoaderInvocations = new AtomicInteger();
public String load(LoaderHelper<Integer, String> helper) throws CacheLoaderException {
Integer expectedInvocations = (Integer) helper.getArgument();
final int actualInvocations = this.numLoaderInvocations.getAndIncrement();
if (expectedInvocations.intValue() != actualInvocations) {
throw new CacheLoaderException("Expected " + expectedInvocations + " invocations, actual is " + actualInvocations);
}
return helper.getKey().toString();
}
public void close() {
}
});
af.setPartitionAttributes(new PartitionAttributesFactory().create());
} else {
af.setPartitionAttributes(new PartitionAttributesFactory().setLocalMaxMemory(0).create());
}
getCache().createRegion(rName, af.create());
}
};
}
Aggregations