use of org.apache.geode.internal.cache.DistributedRegion in project geode by apache.
the class SerialGatewaySenderEventProcessor method enqueueEvent.
/**
* Add the input object to the event queue
*/
@Override
public void enqueueEvent(EnumListenerEvent operation, EntryEvent event, Object substituteValue) throws IOException, CacheException {
// There is a case where the event is serialized for processing. The
// region is not
// serialized along with the event since it is a transient field. I
// created an
// intermediate object (GatewayEventImpl) to avoid this since the region
// name is
// used in the sendBatch method, and it can't be null. See EntryEventImpl
// for details.
GatewaySenderEventImpl senderEvent = null;
boolean isPrimary = sender.isPrimary();
if (!isPrimary) {
// over while we're processing an event as a secondaryEvent.
synchronized (unprocessedEventsLock) {
// Test whether this gateway is the primary.
if (sender.isPrimary()) {
isPrimary = true;
} else {
// If it is not, create an uninitialized GatewayEventImpl and
// put it into the map of unprocessed events.
// OFFHEAP
senderEvent = new GatewaySenderEventImpl(operation, event, substituteValue, false);
// ok
handleSecondaryEvent(senderEvent);
}
}
}
if (isPrimary) {
Region region = event.getRegion();
boolean isPDXRegion = (region instanceof DistributedRegion && region.getName().equals(PeerTypeRegistration.REGION_NAME));
if (!isPDXRegion) {
waitForFailoverCompletion();
}
// If it is, create and enqueue an initialized GatewayEventImpl
// OFFHEAP ok
senderEvent = new GatewaySenderEventImpl(operation, event, substituteValue);
boolean queuedEvent = false;
try {
queuedEvent = queuePrimaryEvent(senderEvent);
} finally {
// when the event is accessed through the region queue.
if (!queuedEvent) {
GatewaySenderEventImpl.release(senderEvent);
}
}
}
}
use of org.apache.geode.internal.cache.DistributedRegion in project geode by apache.
the class MemoryThresholdsDUnitTest method testDRLoadRejection.
/**
* Test that DistributedRegion cacheLoade and netLoad are passed through to the calling thread if
* the local VM is in a critical state. Once the VM has moved to a safe state then test that they
* are allowed.
*
* @throws Exception
*/
@Test
public void testDRLoadRejection() throws Exception {
final Host host = Host.getHost(0);
final VM replicate1 = host.getVM(2);
final VM replicate2 = host.getVM(3);
final String rName = getUniqueName();
final float criticalHeapThresh = 0.90f;
final int fakeHeapMaxSize = 1000;
// Make sure the desired VMs will have a fresh DS.
AsyncInvocation d1 = replicate1.invokeAsync(() -> disconnectFromDS());
AsyncInvocation d2 = replicate2.invokeAsync(() -> disconnectFromDS());
d1.join();
assertFalse(d1.exceptionOccurred());
d2.join();
assertFalse(d2.exceptionOccurred());
CacheSerializableRunnable establishConnectivity = new CacheSerializableRunnable("establishcConnectivity") {
@Override
public void run2() throws CacheException {
getSystem();
}
};
replicate1.invoke(establishConnectivity);
replicate2.invoke(establishConnectivity);
CacheSerializableRunnable createRegion = new CacheSerializableRunnable("create DistributedRegion") {
@Override
public void run2() throws CacheException {
// Assert some level of connectivity
InternalDistributedSystem ds = getSystem();
assertTrue(ds.getDistributionManager().getNormalDistributionManagerIds().size() >= 1);
// below
final long fakeHeapUsage = Math.round(fakeHeapMaxSize * (criticalHeapThresh - 0.5f));
// critical
// by
// 50%
InternalResourceManager irm = (InternalResourceManager) getCache().getResourceManager();
HeapMemoryMonitor hmm = irm.getHeapMonitor();
assertTrue(fakeHeapMaxSize > 0);
hmm.setTestMaxMemoryBytes(fakeHeapMaxSize);
HeapMemoryMonitor.setTestBytesUsedForThresholdSet(fakeHeapUsage);
irm.setCriticalHeapPercentage((criticalHeapThresh * 100.0f));
AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>();
af.setScope(Scope.DISTRIBUTED_ACK);
af.setDataPolicy(DataPolicy.REPLICATE);
getCache().createRegion(rName, af.create());
}
};
replicate1.invoke(createRegion);
replicate2.invoke(createRegion);
replicate1.invoke(addExpectedException);
replicate2.invoke(addExpectedException);
final Integer expected = (Integer) replicate1.invoke(new SerializableCallable("test Local DistributedRegion Load") {
public Object call() throws Exception {
Region<Integer, String> r = getCache().getRegion(rName);
AttributesMutator<Integer, String> am = r.getAttributesMutator();
am.setCacheLoader(new CacheLoader<Integer, String>() {
final AtomicInteger numLoaderInvocations = new AtomicInteger();
public String load(LoaderHelper<Integer, String> helper) throws CacheLoaderException {
Integer expectedInvocations = (Integer) helper.getArgument();
final int actualInvocations = this.numLoaderInvocations.getAndIncrement();
if (expectedInvocations.intValue() != actualInvocations) {
throw new CacheLoaderException("Expected " + expectedInvocations + " invocations, actual is " + actualInvocations);
}
return helper.getKey().toString();
}
public void close() {
}
});
int expectedInvocations = 0;
HeapMemoryMonitor hmm = ((InternalResourceManager) getCache().getResourceManager()).getHeapMonitor();
assertFalse(hmm.getState().isCritical());
{
Integer k = new Integer(1);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
// usage
long newfakeHeapUsage = Math.round(fakeHeapMaxSize * (criticalHeapThresh + 0.1f));
// above
// critical
// by
// 10%
assertTrue(newfakeHeapUsage > 0);
assertTrue(newfakeHeapUsage <= fakeHeapMaxSize);
hmm.updateStateAndSendEvent(newfakeHeapUsage);
assertTrue(hmm.getState().isCritical());
{
Integer k = new Integer(2);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
// below
newfakeHeapUsage = Math.round(fakeHeapMaxSize * (criticalHeapThresh - 0.3f));
// critical
// by 30%
assertTrue(fakeHeapMaxSize > 0);
getCache().getLoggerI18n().fine(addExpectedBelow);
hmm.updateStateAndSendEvent(newfakeHeapUsage);
getCache().getLoggerI18n().fine(removeExpectedBelow);
assertFalse(hmm.getState().isCritical());
{
Integer k = new Integer(3);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
return new Integer(expectedInvocations);
}
});
final CacheSerializableRunnable validateData1 = new CacheSerializableRunnable("Validate data 1") {
@Override
public void run2() throws CacheException {
Region<Integer, String> r = getCache().getRegion(rName);
Integer i1 = new Integer(1);
assertTrue(r.containsKey(i1));
assertNotNull(r.getEntry(i1));
Integer i2 = new Integer(2);
assertFalse(r.containsKey(i2));
assertNull(r.getEntry(i2));
Integer i3 = new Integer(3);
assertTrue(r.containsKey(i3));
assertNotNull(r.getEntry(i3));
}
};
replicate1.invoke(validateData1);
replicate2.invoke(validateData1);
replicate2.invoke(new SerializableCallable("test DistributedRegion netLoad") {
public Object call() throws Exception {
Region<Integer, String> r = getCache().getRegion(rName);
HeapMemoryMonitor hmm = ((InternalResourceManager) getCache().getResourceManager()).getHeapMonitor();
assertFalse(hmm.getState().isCritical());
int expectedInvocations = expected.intValue();
{
Integer k = new Integer(4);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
assertFalse(hmm.getState().isCritical());
assertTrue(r.containsKey(k));
}
// Place in a critical state for the next test
// usage
long newfakeHeapUsage = Math.round(fakeHeapMaxSize * (criticalHeapThresh + 0.1f));
// above
// critical
// by 10%
assertTrue(newfakeHeapUsage > 0);
assertTrue(newfakeHeapUsage <= fakeHeapMaxSize);
hmm.updateStateAndSendEvent(newfakeHeapUsage);
assertTrue(hmm.getState().isCritical());
{
Integer k = new Integer(5);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
assertTrue(hmm.getState().isCritical());
assertFalse(r.containsKey(k));
}
// below
newfakeHeapUsage = Math.round(fakeHeapMaxSize * (criticalHeapThresh - 0.3f));
// critical by
// 30%
assertTrue(fakeHeapMaxSize > 0);
getCache().getLoggerI18n().fine(addExpectedBelow);
hmm.updateStateAndSendEvent(newfakeHeapUsage);
getCache().getLoggerI18n().fine(removeExpectedBelow);
assertFalse(hmm.getState().isCritical());
{
Integer k = new Integer(6);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
assertFalse(hmm.getState().isCritical());
assertTrue(r.containsKey(k));
}
return new Integer(expectedInvocations);
}
});
replicate1.invoke(removeExpectedException);
replicate2.invoke(removeExpectedException);
final CacheSerializableRunnable validateData2 = new CacheSerializableRunnable("Validate data 2") {
@Override
public void run2() throws CacheException {
Region<Integer, String> r = getCache().getRegion(rName);
Integer i4 = new Integer(4);
assertTrue(r.containsKey(i4));
assertNotNull(r.getEntry(i4));
Integer i5 = new Integer(5);
assertFalse(r.containsKey(i5));
assertNull(r.getEntry(i5));
Integer i6 = new Integer(6);
assertTrue(r.containsKey(i6));
assertNotNull(r.getEntry(i6));
}
};
replicate1.invoke(validateData2);
replicate2.invoke(validateData2);
}
use of org.apache.geode.internal.cache.DistributedRegion in project geode by apache.
the class MemoryThresholdsDUnitTest method doDistributedRegionRemotePutRejection.
/**
* test that puts in a server are rejected when a remote VM crosses critical threshold
*
* @throws Exception
*/
private void doDistributedRegionRemotePutRejection(boolean localDestroy, boolean cacheClose) throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final String regionName = "rejectRemoteOp";
ServerPorts ports1 = startCacheServer(server1, 0f, 0f, regionName, false, /* createPR */
false, /* notifyBySubscription */
0);
ServerPorts ports2 = startCacheServer(server2, 0f, 90f, regionName, false, /* createPR */
false, /* notifyBySubscription */
0);
registerTestMemoryThresholdListener(server1);
registerTestMemoryThresholdListener(server2);
doPuts(server1, regionName, false, /* catchRejectedException */
false);
doPutAlls(server1, regionName, false, /* catchRejectedException */
false, /* catchLowMemoryException */
Range.DEFAULT);
// make server2 critical
setUsageAboveCriticalThreshold(server2);
verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
verifyListenerValue(server2, MemoryState.CRITICAL, 1, false);
// make sure that local server1 puts are rejected
doPuts(server1, regionName, false, /* catchRejectedException */
true);
Range r1 = new Range(Range.DEFAULT, Range.DEFAULT.width() + 1);
doPutAlls(server1, regionName, false, /* catchRejectedException */
true, /* catchLowMemoryException */
r1);
if (localDestroy) {
// local destroy the region on sick member
server2.invoke(new SerializableCallable("local destroy") {
public Object call() throws Exception {
Region r = getRootRegion().getSubregion(regionName);
r.localDestroyRegion();
return null;
}
});
} else if (cacheClose) {
server2.invoke(new SerializableCallable() {
public Object call() throws Exception {
getCache().close();
return null;
}
});
} else {
setUsageBelowEviction(server2);
}
// wait for remote region destroyed message to be processed
server1.invoke(new SerializableCallable() {
public Object call() throws Exception {
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "remote localRegionDestroyed message not received";
}
public boolean done() {
DistributedRegion dr = (DistributedRegion) getRootRegion().getSubregion(regionName);
return dr.getMemoryThresholdReachedMembers().size() == 0;
}
};
Wait.waitForCriterion(wc, 30000, 10, true);
return null;
}
});
// make sure puts succeed
doPuts(server1, regionName, false, /* catchRejectedException */
false);
Range r2 = new Range(r1, r1.width() + 1);
doPutAlls(server1, regionName, false, /* catchRejectedException */
false, /* catchLowMemoryException */
r2);
}
use of org.apache.geode.internal.cache.DistributedRegion in project geode by apache.
the class MemoryThresholdsOffHeapDUnitTest method startCacheServer.
private int startCacheServer(VM server, final float evictionThreshold, final float criticalThreshold, final String regionName, final boolean createPR, final boolean notifyBySubscription, final int prRedundancy) throws Exception {
return (Integer) server.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getOffHeapProperties());
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager irm = cache.getInternalResourceManager();
irm.setEvictionOffHeapPercentage(evictionThreshold);
irm.setCriticalOffHeapPercentage(criticalThreshold);
AttributesFactory factory = new AttributesFactory();
if (createPR) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(prRedundancy);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
factory.setOffHeap(true);
} else {
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.setOffHeap(true);
}
Region region = createRegion(regionName, factory.create());
if (createPR) {
assertTrue(region instanceof PartitionedRegion);
} else {
assertTrue(region instanceof DistributedRegion);
}
CacheServer cacheServer = getCache().addCacheServer();
cacheServer.setPort(0);
cacheServer.setNotifyBySubscription(notifyBySubscription);
cacheServer.start();
return cacheServer.getPort();
}
});
}
use of org.apache.geode.internal.cache.DistributedRegion in project geode by apache.
the class MemoryThresholdsOffHeapDUnitTest method testDRLoadRejection.
/**
* Test that DistributedRegion cacheLoade and netLoad are passed through to the calling thread if
* the local VM is in a critical state. Once the VM has moved to a safe state then test that they
* are allowed.
*/
// GEODE-438: test pollution, async actions, time sensitive,
@Category(FlakyTest.class)
// waitForCriterion, TODO: consider disconnect DS in setup
@Test
public void testDRLoadRejection() throws Exception {
final Host host = Host.getHost(0);
final VM replicate1 = host.getVM(1);
final VM replicate2 = host.getVM(2);
final String rName = getUniqueName();
// Make sure the desired VMs will have a fresh DS.
AsyncInvocation d1 = replicate1.invokeAsync(() -> disconnectFromDS());
AsyncInvocation d2 = replicate2.invokeAsync(() -> disconnectFromDS());
d1.join();
assertFalse(d1.exceptionOccurred());
d2.join();
assertFalse(d2.exceptionOccurred());
CacheSerializableRunnable establishConnectivity = new CacheSerializableRunnable("establishcConnectivity") {
@SuppressWarnings("synthetic-access")
@Override
public void run2() throws CacheException {
getSystem(getOffHeapProperties());
}
};
replicate1.invoke(establishConnectivity);
replicate2.invoke(establishConnectivity);
CacheSerializableRunnable createRegion = new CacheSerializableRunnable("create DistributedRegion") {
@Override
public void run2() throws CacheException {
// Assert some level of connectivity
InternalDistributedSystem ds = getSystem(getOffHeapProperties());
assertTrue(ds.getDistributionManager().getNormalDistributionManagerIds().size() >= 1);
InternalResourceManager irm = (InternalResourceManager) getCache().getResourceManager();
irm.setCriticalOffHeapPercentage(90f);
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.DISTRIBUTED_ACK);
af.setDataPolicy(DataPolicy.REPLICATE);
af.setOffHeap(true);
Region region = getCache().createRegion(rName, af.create());
}
};
replicate1.invoke(createRegion);
replicate2.invoke(createRegion);
replicate1.invoke(addExpectedException);
replicate2.invoke(addExpectedException);
final Integer expected = (Integer) replicate1.invoke(new SerializableCallable("test Local DistributedRegion Load") {
public Object call() throws Exception {
final DistributedRegion r = (DistributedRegion) getCache().getRegion(rName);
AttributesMutator<Integer, String> am = r.getAttributesMutator();
am.setCacheLoader(new CacheLoader<Integer, String>() {
final AtomicInteger numLoaderInvocations = new AtomicInteger(0);
public String load(LoaderHelper<Integer, String> helper) throws CacheLoaderException {
Integer expectedInvocations = (Integer) helper.getArgument();
final int actualInvocations = this.numLoaderInvocations.getAndIncrement();
if (expectedInvocations.intValue() != actualInvocations) {
throw new CacheLoaderException("Expected " + expectedInvocations + " invocations, actual is " + actualInvocations);
}
return helper.getKey().toString();
}
public void close() {
}
});
int expectedInvocations = 0;
final OffHeapMemoryMonitor ohmm = ((InternalResourceManager) getCache().getResourceManager()).getOffHeapMonitor();
assertFalse(ohmm.getState().isCritical());
{
Integer k = new Integer(1);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
r.put("oh1", new byte[838860]);
r.put("oh3", new byte[157287]);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "expected region " + r + " to set memoryThreshold";
}
public boolean done() {
return r.memoryThresholdReached.get();
}
};
Wait.waitForCriterion(wc, 30 * 1000, 10, true);
{
Integer k = new Integer(2);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
r.destroy("oh3");
wc = new WaitCriterion() {
public String description() {
return "expected region " + r + " to unset memoryThreshold";
}
public boolean done() {
return !r.memoryThresholdReached.get();
}
};
Wait.waitForCriterion(wc, 30 * 1000, 10, true);
{
Integer k = new Integer(3);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
return new Integer(expectedInvocations);
}
});
final CacheSerializableRunnable validateData1 = new CacheSerializableRunnable("Validate data 1") {
@Override
public void run2() throws CacheException {
Region r = getCache().getRegion(rName);
Integer i1 = new Integer(1);
assertTrue(r.containsKey(i1));
assertNotNull(r.getEntry(i1));
Integer i2 = new Integer(2);
assertFalse(r.containsKey(i2));
assertNull(r.getEntry(i2));
Integer i3 = new Integer(3);
assertTrue(r.containsKey(i3));
assertNotNull(r.getEntry(i3));
}
};
replicate1.invoke(validateData1);
replicate2.invoke(validateData1);
replicate2.invoke(new SerializableCallable("test DistributedRegion netLoad") {
public Object call() throws Exception {
final DistributedRegion r = (DistributedRegion) getCache().getRegion(rName);
final OffHeapMemoryMonitor ohmm = ((InternalResourceManager) getCache().getResourceManager()).getOffHeapMonitor();
assertFalse(ohmm.getState().isCritical());
int expectedInvocations = expected.intValue();
{
Integer k = new Integer(4);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
// Place in a critical state for the next test
r.put("oh3", new byte[157287]);
WaitCriterion wc = new WaitCriterion() {
public String description() {
return "expected region " + r + " to set memoryThreshold";
}
public boolean done() {
return r.memoryThresholdReached.get();
}
};
Wait.waitForCriterion(wc, 30 * 1000, 10, true);
{
Integer k = new Integer(5);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
r.destroy("oh3");
wc = new WaitCriterion() {
public String description() {
return "expected region " + r + " to unset memoryThreshold";
}
public boolean done() {
return !r.memoryThresholdReached.get();
}
};
Wait.waitForCriterion(wc, 30 * 1000, 10, true);
{
Integer k = new Integer(6);
assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
}
return new Integer(expectedInvocations);
}
});
replicate1.invoke(removeExpectedException);
replicate2.invoke(removeExpectedException);
final CacheSerializableRunnable validateData2 = new CacheSerializableRunnable("Validate data 2") {
@Override
public void run2() throws CacheException {
Region<Integer, String> r = getCache().getRegion(rName);
Integer i4 = new Integer(4);
assertTrue(r.containsKey(i4));
assertNotNull(r.getEntry(i4));
Integer i5 = new Integer(5);
assertFalse(r.containsKey(i5));
assertNull(r.getEntry(i5));
Integer i6 = new Integer(6);
assertTrue(r.containsKey(i6));
assertNotNull(r.getEntry(i6));
}
};
replicate1.invoke(validateData2);
replicate2.invoke(validateData2);
}
Aggregations