use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class MemoryThresholdsOffHeapDUnitTest method prRemotePutRejection.
private void prRemotePutRejection(boolean cacheClose, boolean localDestroy, final boolean useTx) throws Exception {
final Host host = Host.getHost(0);
final VM accessor = host.getVM(0);
final VM[] servers = new VM[3];
servers[0] = host.getVM(1);
servers[1] = host.getVM(2);
servers[2] = host.getVM(3);
final String regionName = "offHeapPRRemotePutRejection";
final int redundancy = 1;
startCacheServer(servers[0], 0f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
startCacheServer(servers[1], 0f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
startCacheServer(servers[2], 0f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getOffHeapProperties());
getCache();
AttributesFactory factory = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setLocalMaxMemory(0);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
factory.setOffHeap(true);
createRegion(regionName, factory.create());
return null;
}
});
doPuts(accessor, regionName, false, false);
final Range r1 = Range.DEFAULT;
doPutAlls(accessor, regionName, false, false, r1);
servers[0].invoke(addExpectedException);
servers[1].invoke(addExpectedException);
servers[2].invoke(addExpectedException);
setUsageAboveCriticalThreshold(servers[0], regionName);
final Set<InternalDistributedMember> criticalMembers = (Set) servers[0].invoke(new SerializableCallable() {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
final int hashKey = PartitionedRegionHelper.getHashKey(pr, null, "oh5", null, null);
return pr.getRegionAdvisor().getBucketOwners(hashKey);
}
});
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "remote bucket not marked sick";
}
public boolean done() {
boolean keyFoundOnSickMember = false;
boolean caughtException = false;
for (int i = 0; i < 20; i++) {
Integer key = Integer.valueOf(i);
int hKey = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
Set<InternalDistributedMember> owners = pr.getRegionAdvisor().getBucketOwners(hKey);
final boolean hasCriticalOwners = owners.removeAll(criticalMembers);
if (hasCriticalOwners) {
keyFoundOnSickMember = true;
try {
if (useTx)
getCache().getCacheTransactionManager().begin();
pr.getCache().getLogger().fine("SWAP:putting in tx:" + useTx);
pr.put(key, "value");
if (useTx)
getCache().getCacheTransactionManager().commit();
} catch (LowMemoryException ex) {
caughtException = true;
if (useTx)
getCache().getCacheTransactionManager().rollback();
}
} else {
// puts on healthy member should continue
pr.put(key, "value");
}
}
return keyFoundOnSickMember && caughtException;
}
};
Wait.waitForCriterion(wc, 10000, 10, true);
return null;
}
});
{
Range r2 = new Range(r1, r1.width() + 1);
doPutAlls(accessor, regionName, false, true, r2);
}
// Find all VMs that have a critical region
SerializableCallable getMyId = new SerializableCallable() {
public Object call() throws Exception {
return ((GemFireCacheImpl) getCache()).getMyId();
}
};
final Set<VM> criticalServers = new HashSet<VM>();
for (final VM server : servers) {
DistributedMember member = (DistributedMember) server.invoke(getMyId);
if (criticalMembers.contains(member)) {
criticalServers.add(server);
}
}
if (localDestroy) {
// local destroy the region on sick members
for (final VM vm : criticalServers) {
vm.invoke(new SerializableCallable("local destroy sick member") {
public Object call() throws Exception {
Region r = getRootRegion().getSubregion(regionName);
LogWriterUtils.getLogWriter().info("PRLocalDestroy");
r.localDestroyRegion();
return null;
}
});
}
} else if (cacheClose) {
// close cache on sick members
for (final VM vm : criticalServers) {
vm.invoke(new SerializableCallable("close cache sick member") {
public Object call() throws Exception {
getCache().close();
return null;
}
});
}
} else {
setUsageBelowEviction(servers[0], regionName);
servers[0].invoke(removeExpectedException);
servers[1].invoke(removeExpectedException);
servers[2].invoke(removeExpectedException);
}
// do put all in a loop to allow distribution of message
accessor.invoke(new SerializableCallable("Put in a loop") {
public Object call() throws Exception {
final Region r = getRootRegion().getSubregion(regionName);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "pr should have gone un-critical";
}
public boolean done() {
boolean done = true;
for (int i = 0; i < 20; i++) {
try {
r.put(i, "value");
} catch (LowMemoryException e) {
// expected
done = false;
}
}
return done;
}
};
Wait.waitForCriterion(wc, 10000, 10, true);
return null;
}
});
doPutAlls(accessor, regionName, false, false, r1);
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class ResourceManagerDUnitTest method testGetInternalPRDetails.
/**
* Creates partitioned regions in multiple vms and fully exercises the internal-only
* getInternalPRDetails API on ResourceManager.
*/
@Test
public void testGetInternalPRDetails() {
// two regions
final String[] regionPath = new String[] { getUniqueName() + "-PR-0", getUniqueName() + "-PR-1" };
// numBuckets config for the two regions
final int[] numBuckets = new int[] { 100, 90 };
// redundantCopies config for the two regions
final int[] redundantCopies = new int[] { 1, 0 };
// localMaxMemory config to use for three members
final int[] localMaxMemory = new int[] { 50, 100, 0 };
// bucketKeys to use for making three bckets in first PR
final Integer[] bucketKeys = new Integer[] { Integer.valueOf(0), Integer.valueOf(42), Integer.valueOf(76) };
assertEquals(0, bucketKeys[0].hashCode());
assertEquals(42, bucketKeys[1].hashCode());
assertEquals(76, bucketKeys[2].hashCode());
createRegion(Host.getHost(0).getVM(0), regionPath[0], localMaxMemory[0], numBuckets[0], redundantCopies[0]);
createRegion(Host.getHost(0).getVM(1), regionPath[0], localMaxMemory[1], numBuckets[0], redundantCopies[0]);
createRegion(Host.getHost(0).getVM(2), regionPath[0], localMaxMemory[2], numBuckets[0], redundantCopies[0]);
createRegion(Host.getHost(0).getVM(0), regionPath[1], localMaxMemory[0], numBuckets[1], redundantCopies[1]);
// 2 MB in size
final byte[] value = new byte[1024 * 1024 * 2];
createBuckets(0, regionPath[0], bucketKeys, value);
// identify the members and their config values
final InternalDistributedMember[] members = new InternalDistributedMember[3];
final long[] memberSizes = new long[members.length];
final int[] memberBucketCounts = new int[members.length];
final int[] memberPrimaryCounts = new int[members.length];
for (int i = 0; i < members.length; i++) {
final int vm = i;
members[vm] = (InternalDistributedMember) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
return getSystem().getDistributedMember();
}
});
memberSizes[vm] = ((Long) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds == null) {
return Long.valueOf(0);
} else {
return Long.valueOf(getSize(ds));
}
}
})).longValue();
memberBucketCounts[vm] = ((Integer) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds == null) {
return new Integer(0);
} else {
return new Integer(ds.getBucketsManaged());
}
}
})).intValue();
memberPrimaryCounts[vm] = ((Integer) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds == null) {
return new Integer(0);
} else {
return new Integer(ds.getNumberOfPrimaryBucketsManaged());
}
}
})).intValue();
}
// test everything here
for (int i = 0; i < localMaxMemory.length; i++) {
final int vm = i;
Host.getHost(0).getVM(vm).invoke(new SerializableRunnable() {
public void run() {
Set<InternalPRInfo> detailsSet = new HashSet<InternalPRInfo>();
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
for (PartitionedRegion pr : cache.getPartitionedRegions()) {
InternalPRInfo info = pr.getRedundancyProvider().buildPartitionedRegionInfo(true, cache.getInternalResourceManager().getLoadProbe());
detailsSet.add(info);
}
if (vm == 0) {
assertEquals(2, detailsSet.size());
} else {
assertEquals(1, detailsSet.size());
}
// iterate over each InternalPRDetails
for (Iterator<InternalPRInfo> prIter = detailsSet.iterator(); prIter.hasNext(); ) {
InternalPRInfo details = prIter.next();
// NOTE: getRegionPath() contains the Region.SEPARATOR + regionPath
assertTrue("Unknown regionPath=" + details.getRegionPath(), details.getRegionPath().contains(regionPath[0]) || details.getRegionPath().contains(regionPath[1]));
if (details.getRegionPath().contains(regionPath[0])) {
assertEquals(numBuckets[0], details.getConfiguredBucketCount());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(redundantCopies[0], details.getConfiguredRedundantCopies());
assertEquals(redundantCopies[0], details.getActualRedundantCopies());
assertNull(details.getColocatedWith());
Set<InternalPartitionDetails> memberDetails = details.getInternalPartitionDetails();
assertNotNull(memberDetails);
assertEquals(localMaxMemory.length - 1, memberDetails.size());
// iterate over each InternalPartitionDetails (datastores only)
for (Iterator<InternalPartitionDetails> mbrIter = memberDetails.iterator(); mbrIter.hasNext(); ) {
InternalPartitionDetails mbrDetails = mbrIter.next();
assertNotNull(mbrDetails);
DistributedMember mbr = mbrDetails.getDistributedMember();
assertNotNull(mbr);
int membersIdx = -1;
for (int idx = 0; idx < members.length; idx++) {
if (mbr.equals(members[idx])) {
membersIdx = idx;
}
}
assertEquals(localMaxMemory[membersIdx] * 1024 * 1024, mbrDetails.getConfiguredMaxMemory());
assertEquals(memberSizes[membersIdx], mbrDetails.getSize());
assertEquals(memberBucketCounts[membersIdx], mbrDetails.getBucketCount());
assertEquals(memberPrimaryCounts[membersIdx], mbrDetails.getPrimaryCount());
PRLoad load = mbrDetails.getPRLoad();
assertNotNull(load);
assertEquals((float) localMaxMemory[membersIdx], load.getWeight(), 0);
int totalBucketBytes = 0;
int primaryCount = 0;
for (int bid = 0; bid < numBuckets[0]; bid++) {
long bucketBytes = mbrDetails.getBucketSize(bid);
assertTrue(bucketBytes >= 0);
totalBucketBytes += bucketBytes;
// validate against the PRLoad
assertEquals((float) bucketBytes, load.getReadLoad(bid), 0);
if (load.getWriteLoad(bid) > 0) {
// found a primary
primaryCount++;
}
}
// assertIndexDetailsEquals(memberSizes[membersIdx] * (1024* 1024),
// totalBucketBytes);
assertEquals(memberPrimaryCounts[membersIdx], primaryCount);
if (mbr.equals(getSystem().getDistributedMember())) {
// PartitionMemberDetails represents the local member
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(details.getRegionPath());
assertEquals(pr.getLocalMaxMemory() * (1024L * 1024L), mbrDetails.getConfiguredMaxMemory());
PartitionedRegionDataStore ds = pr.getDataStore();
assertNotNull(ds);
assertEquals(getSize(ds), mbrDetails.getSize());
assertEquals(ds.getBucketsManaged(), mbrDetails.getBucketCount());
assertEquals(ds.getNumberOfPrimaryBucketsManaged(), mbrDetails.getPrimaryCount());
}
}
} else {
// found the other PR which has only one datastore and we know
// this system memberId is the only entry in mbrDetails
assertEquals(numBuckets[1], details.getConfiguredBucketCount());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(redundantCopies[1], details.getConfiguredRedundantCopies());
assertEquals(redundantCopies[1], details.getActualRedundantCopies());
assertNull(details.getColocatedWith());
Set<PartitionMemberInfo> memberDetails = details.getPartitionMemberInfo();
assertNotNull(memberDetails);
assertEquals(1, memberDetails.size());
PartitionMemberInfo mbrDetails = memberDetails.iterator().next();
assertEquals(getSystem().getDistributedMember(), mbrDetails.getDistributedMember());
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(details.getRegionPath());
assertEquals(pr.getLocalMaxMemory() * (1024L * 1024L), mbrDetails.getConfiguredMaxMemory());
PartitionedRegionDataStore ds = pr.getDataStore();
assertNotNull(ds);
assertEquals(getSize(ds), mbrDetails.getSize());
assertEquals(ds.getBucketsManaged(), mbrDetails.getBucketCount());
assertEquals(ds.getNumberOfPrimaryBucketsManaged(), mbrDetails.getPrimaryCount());
}
}
}
});
}
destroyRegions(0, regionPath);
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class MemoryThresholdsDUnitTest method testEventDelivery.
/**
* Make sure appropriate events are delivered when moving between states.
*
* @throws Exception
*/
// GEODE-427: random ports, time sensitive, waitForCriterions
@Category(FlakyTest.class)
@Test
public void testEventDelivery() throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final String regionName = "testEventDelivery";
ServerPorts ports1 = startCacheServer(server1, 0f, 0f, regionName, false, /* createPR */
false, /* notifyBySubscription */
0);
ServerPorts ports2 = startCacheServer(server2, 80f, 90f, regionName, false, /* createPR */
false, /* notifyBySubscription */
0);
registerLoggingTestMemoryThresholdListener(server1);
registerTestMemoryThresholdListener(server2);
// NORMAL -> CRITICAL
server2.invoke(new SerializableCallable("NORMAL->CRITICAL") {
public Object call() throws Exception {
GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
getCache().getLoggerI18n().fine(addExpectedExString);
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(950);
getCache().getLoggerI18n().fine(removeExpectedExString);
return null;
}
});
verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server2, MemoryState.EVICTION, 1, true);
verifyListenerValue(server2, MemoryState.NORMAL, 0, true);
// make sure we get two events on remote server
verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server1, MemoryState.EVICTION, 1, true);
verifyListenerValue(server1, MemoryState.NORMAL, 0, true);
;
// CRITICAL -> EVICTION
server2.invoke(new SerializableCallable("CRITICAL->EVICTION") {
public Object call() throws Exception {
GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
getCache().getLoggerI18n().fine(addExpectedBelow);
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(850);
getCache().getLoggerI18n().fine(removeExpectedBelow);
return null;
}
});
verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server2, MemoryState.EVICTION, 2, true);
verifyListenerValue(server2, MemoryState.NORMAL, 0, true);
verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
verifyListenerValue(server1, MemoryState.NORMAL, 0, true);
;
// EVICTION -> EVICTION
server2.invoke(new SerializableCallable("EVICTION->EVICTION") {
public Object call() throws Exception {
GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(840);
return null;
}
});
verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server2, MemoryState.EVICTION, 2, true);
verifyListenerValue(server2, MemoryState.NORMAL, 0, true);
verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
verifyListenerValue(server1, MemoryState.NORMAL, 0, true);
// EVICTION -> NORMAL
server2.invoke(new SerializableCallable("EVICTION->NORMAL") {
public Object call() throws Exception {
GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(750);
return null;
}
});
verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server2, MemoryState.EVICTION, 2, true);
verifyListenerValue(server2, MemoryState.NORMAL, 1, true);
verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
verifyListenerValue(server1, MemoryState.NORMAL, 1, true);
LogWriterUtils.getLogWriter().info("before NORMAL->CRITICAL->NORMAL");
// NORMAL -> EVICTION -> NORMAL
server2.invoke(new SerializableCallable("NORMAL->CRITICAL->NORMAL") {
public Object call() throws Exception {
GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(950);
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(750);
return null;
}
});
LogWriterUtils.getLogWriter().info("after NORMAL->CRITICAL->NORMAL");
verifyListenerValue(server2, MemoryState.CRITICAL, 2, true);
verifyListenerValue(server2, MemoryState.EVICTION, 3, true);
verifyListenerValue(server2, MemoryState.NORMAL, 2, true);
verifyListenerValue(server1, MemoryState.CRITICAL, 2, true);
verifyListenerValue(server1, MemoryState.EVICTION, 3, true);
verifyListenerValue(server1, MemoryState.NORMAL, 2, true);
// NORMAL -> EVICTION
server2.invoke(new SerializableCallable("NORMAL->EVICTION") {
public Object call() throws Exception {
GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(850);
return null;
}
});
verifyListenerValue(server2, MemoryState.CRITICAL, 2, true);
verifyListenerValue(server2, MemoryState.EVICTION, 4, true);
verifyListenerValue(server2, MemoryState.NORMAL, 2, true);
verifyListenerValue(server1, MemoryState.CRITICAL, 2, true);
verifyListenerValue(server1, MemoryState.EVICTION, 4, true);
verifyListenerValue(server1, MemoryState.NORMAL, 2, true);
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class MemoryThresholdsDUnitTest method startCacheServer.
/**
* Starts up a CacheServer.
*
* @return a {@link ServerPorts} containing the CacheServer ports.
*/
private ServerPorts startCacheServer(VM server, final float evictionThreshold, final float criticalThreshold, final String regionName, final boolean createPR, final boolean notifyBySubscription, final int prRedundancy) throws Exception {
return (ServerPorts) server.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getServerProperties());
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager irm = cache.getInternalResourceManager();
HeapMemoryMonitor hmm = irm.getHeapMonitor();
hmm.setTestMaxMemoryBytes(1000);
HeapMemoryMonitor.setTestBytesUsedForThresholdSet(500);
irm.setEvictionHeapPercentage(evictionThreshold);
irm.setCriticalHeapPercentage(criticalThreshold);
AttributesFactory factory = new AttributesFactory();
if (createPR) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(prRedundancy);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
} else {
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
}
Region region = createRegion(regionName, factory.create());
if (createPR) {
assertTrue(region instanceof PartitionedRegion);
} else {
assertTrue(region instanceof DistributedRegion);
}
CacheServer cacheServer = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailableTCPPorts(1)[0];
cacheServer.setPort(port);
cacheServer.setNotifyBySubscription(notifyBySubscription);
cacheServer.start();
return new ServerPorts(port);
}
});
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class MemoryThresholdsDUnitTest method prRemotePutRejection.
private void prRemotePutRejection(boolean cacheClose, boolean localDestroy, final boolean useTx) throws Exception {
final Host host = Host.getHost(0);
final VM accessor = host.getVM(0);
final VM server1 = host.getVM(1);
final VM server2 = host.getVM(2);
final VM server3 = host.getVM(3);
final String regionName = "testPrRejection";
final int redundancy = 1;
final ServerPorts ports1 = startCacheServer(server1, 80f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
ServerPorts ports2 = startCacheServer(server2, 80f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
ServerPorts ports3 = startCacheServer(server3, 80f, 90f, regionName, true, /* createPR */
false, /* notifyBySubscription */
redundancy);
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getServerProperties());
getCache();
AttributesFactory factory = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setLocalMaxMemory(0);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
createRegion(regionName, factory.create());
return null;
}
});
doPuts(accessor, regionName, false, false);
final Range r1 = Range.DEFAULT;
doPutAlls(accessor, regionName, false, false, r1);
SerializableCallable getMyId = new SerializableCallable() {
public Object call() throws Exception {
return ((GemFireCacheImpl) getCache()).getMyId();
}
};
final DistributedMember server1Id = (DistributedMember) server1.invoke(getMyId);
setUsageAboveCriticalThreshold(server1);
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
final String regionPath = getRootRegion().getSubregion(regionName).getFullPath();
// server1 is sick, look for a key on server1, and attempt put again
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "remote bucket not marked sick";
}
public boolean done() {
boolean keyFoundOnSickMember = false;
boolean caughtException = false;
for (int i = 0; i < 20; i++) {
Integer key = Integer.valueOf(i);
int hKey = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
Set<InternalDistributedMember> owners = pr.getRegionAdvisor().getBucketOwners(hKey);
if (owners.contains(server1Id)) {
keyFoundOnSickMember = true;
try {
if (useTx)
getCache().getCacheTransactionManager().begin();
pr.getCache().getLogger().fine("SWAP:putting in tx:" + useTx);
pr.put(key, "value");
if (useTx)
getCache().getCacheTransactionManager().commit();
} catch (LowMemoryException ex) {
caughtException = true;
if (useTx)
getCache().getCacheTransactionManager().rollback();
}
} else {
// puts on healthy member should continue
pr.put(key, "value");
}
}
return keyFoundOnSickMember && caughtException;
}
};
Wait.waitForCriterion(wc, 30000, 10, true);
return null;
}
});
{
Range r2 = new Range(r1, r1.width() + 1);
doPutAlls(accessor, regionName, false, true, r2);
}
if (localDestroy) {
// local destroy the region on sick member
server1.invoke(new SerializableCallable("local destroy sick member") {
public Object call() throws Exception {
Region r = getRootRegion().getSubregion(regionName);
LogWriterUtils.getLogWriter().info("PRLocalDestroy");
r.localDestroyRegion();
return null;
}
});
} else if (cacheClose) {
// close cache on sick member
server1.invoke(new SerializableCallable("close cache sick member") {
public Object call() throws Exception {
getCache().close();
return null;
}
});
} else {
setUsageBelowEviction(server1);
}
// do put all in a loop to allow distribution of message
accessor.invoke(new SerializableCallable("Put in a loop") {
public Object call() throws Exception {
final Region r = getRootRegion().getSubregion(regionName);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "pr should have gone un-critical";
}
public boolean done() {
boolean done = true;
for (int i = 0; i < 20; i++) {
try {
r.put(i, "value");
} catch (LowMemoryException e) {
// expected
done = false;
}
}
return done;
}
};
Wait.waitForCriterion(wc, 30000, 10, true);
return null;
}
});
doPutAlls(accessor, regionName, false, false, r1);
}
Aggregations