use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class SearchAndLoadDUnitTest method testOneHopNetWriteRemoteWriter.
/** same as the previous test but the cache writer is in a third, non-replicated, vm */
@Test
public void testOneHopNetWriteRemoteWriter() throws CacheException, InterruptedException {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
final String name = this.getUniqueName() + "Region";
final String objectName = "Object7";
final Integer value = new Integer(483);
final Integer updateValue = new Integer(484);
vm0.invoke(new SerializableRunnable("Create replicate Region") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating empty region", ex);
}
}
});
vm1.invoke(new SerializableRunnable("Create empty Region") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.EMPTY);
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating empty region", ex);
}
}
});
vm2.invoke(new SerializableRunnable("Create replicated region with cacheWriter") {
public void run() {
netWriteInvoked = false;
operationWasCreate = false;
originWasRemote = false;
writerInvocationCount = 0;
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.EMPTY);
factory.setCacheWriter(new CacheWriter() {
public void beforeCreate(EntryEvent e) throws CacheWriterException {
e.getRegion().getCache().getLogger().info("cache writer beforeCreate invoked for " + e);
netWriteInvoked = true;
operationWasCreate = true;
originWasRemote = e.isOriginRemote();
writerInvocationCount++;
return;
}
public void beforeUpdate(EntryEvent e) throws CacheWriterException {
e.getRegion().getCache().getLogger().info("cache writer beforeUpdate invoked for " + e);
netWriteInvoked = true;
operationWasCreate = false;
originWasRemote = e.isOriginRemote();
writerInvocationCount++;
return;
}
public void beforeDestroy(EntryEvent e) throws CacheWriterException {
}
public void beforeRegionDestroy(RegionEvent e) throws CacheWriterException {
}
public void beforeRegionClear(RegionEvent e) throws CacheWriterException {
}
public void close() {
}
});
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating replicated region", ex);
}
}
});
vm1.invoke(new SerializableRunnable("do a put that should be proxied in the other vm and invoke its cache writer") {
public void run() {
try {
getRootRegion().getSubregion(name).put(objectName, value);
} catch (CacheWriterException cwe) {
} catch (TimeoutException te) {
}
}
});
vm2.invoke(new SerializableRunnable("ensure that cache writer was invoked with correct settings in event") {
public void run() {
assertTrue("expected cache writer to be invoked", netWriteInvoked);
assertTrue("expected originRemote to be true", originWasRemote);
assertTrue("expected event to be create", operationWasCreate);
assertEquals("expected only one cache writer invocation", 1, writerInvocationCount);
// set flags for the next test - updating the same key
netWriteInvoked = false;
writerInvocationCount = 0;
}
});
vm1.invoke(new SerializableRunnable("do an update that should be proxied in the other vm and invoke its cache writer") {
public void run() {
try {
getRootRegion().getSubregion(name).put(objectName, updateValue);
} catch (CacheWriterException cwe) {
} catch (TimeoutException te) {
}
}
});
vm2.invoke(new SerializableRunnable("ensure that cache writer was invoked with correct settings in event") {
public void run() {
assertTrue("expected cache writer to be invoked", netWriteInvoked);
assertTrue("expected originRemote to be true", originWasRemote);
assertTrue("expected event to be create", operationWasCreate);
assertEquals("expected only one cache writer invocation", 1, writerInvocationCount);
}
});
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class SearchAndLoadDUnitTest method testLocalLoad.
@Test
public void testLocalLoad() throws CacheException, InterruptedException {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final String name = this.getUniqueName() + "-ACK";
final String objectName = "C";
final Integer value = new Integer(44);
remoteLoaderInvoked = false;
loaderInvoked = false;
vm0.invoke(new SerializableRunnable("Create ACK Region") {
public void run() {
remoteLoaderInvoked = false;
loaderInvoked = false;
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) {
loaderInvoked = true;
return value;
}
public void close() {
}
});
Region region = createRegion(name, factory.create());
region.create(objectName, null);
} catch (CacheException ex) {
Assert.fail("While creating ACK region", ex);
}
}
});
vm1.invoke(new SerializableRunnable("Create ACK Region") {
public void run() {
remoteLoaderInvoked = false;
loaderInvoked = false;
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) {
remoteLoaderInvoked = true;
return value;
}
public void close() {
}
});
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating ACK region", ex);
}
}
});
vm0.invoke(new SerializableRunnable("Get a value from local loader") {
public void run() {
try {
Object result = getRootRegion().getSubregion(name).get(objectName);
assertEquals(value, result);
assertEquals(new Boolean(loaderInvoked), Boolean.TRUE);
assertEquals(new Boolean(remoteLoaderInvoked), Boolean.FALSE);
} catch (CacheLoaderException cle) {
} catch (TimeoutException te) {
}
}
});
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class SlowRecDUnitTest method doTestPartialMessage.
private void doTestPartialMessage() throws Exception {
final AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_NO_ACK);
factory.setEnableAsyncConflation(true);
final Region r = createRootRegion("slowrec", factory.create());
final DM dm = getSystem().getDistributionManager();
final DMStats stats = dm.getStats();
// set others before vm0 connects
long initialQueuedMsgs = stats.getAsyncQueuedMsgs();
// create receiver in vm0 with queuing enabled
final Properties p = new Properties();
// 4 sec
p.setProperty(ASYNC_DISTRIBUTION_TIMEOUT, String.valueOf(1000 * 4));
// max value
p.setProperty(ASYNC_QUEUE_TIMEOUT, "86400000");
// max value
p.setProperty(ASYNC_MAX_QUEUE_SIZE, "1024");
getOtherVm().invoke(new CacheSerializableRunnable("Create other vm") {
public void run2() throws CacheException {
getSystem(p);
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.DISTRIBUTED_NO_ACK);
af.setDataPolicy(DataPolicy.REPLICATE);
doTestPartialMessage_Listener = new ControlListener();
af.setCacheListener(doTestPartialMessage_Listener);
createRootRegion("slowrec", af.create());
}
});
// put vm0 cache listener into wait
LogWriterUtils.getLogWriter().info("[testPartialMessage] about to put vm0 into wait");
// 5 minutes
final int millisToWait = 1000 * 60 * 5;
r.put(KEY_WAIT, new Integer(millisToWait));
// build up queue size
LogWriterUtils.getLogWriter().info("[testPartialMessage] building up queue size...");
final Object key = "key";
final int socketBufferSize = getSystem().getConfig().getSocketBufferSize();
final int VALUE_SIZE = socketBufferSize * 3;
// 1024 * 20; // 20 KB
final byte[] value = new byte[VALUE_SIZE];
int count = 0;
while (stats.getAsyncQueuedMsgs() == initialQueuedMsgs) {
count++;
r.put(key, value, new Integer(count));
}
final int partialId = count;
assertEquals(0, stats.getAsyncConflatedMsgs());
LogWriterUtils.getLogWriter().info("[testPartialMessage] After " + count + " puts of size " + VALUE_SIZE + " slowrec mode kicked in with queue size=" + stats.getAsyncQueueSize());
Wait.pause(2000);
// conflate 10 times
while (stats.getAsyncConflatedMsgs() < 10) {
count++;
r.put(key, value, new Integer(count));
if (count == partialId + 1) {
assertEquals(initialQueuedMsgs + 2, stats.getAsyncQueuedMsgs());
assertEquals(0, stats.getAsyncConflatedMsgs());
} else if (count == partialId + 2) {
assertEquals(initialQueuedMsgs + 2, stats.getAsyncQueuedMsgs());
assertEquals(1, stats.getAsyncConflatedMsgs());
}
}
final int conflateId = count;
final int[] expectedArgs = { partialId, conflateId };
// send notify to vm0
LogWriterUtils.getLogWriter().info("[testPartialMessage] wake up vm0");
getOtherVm().invoke(new SerializableRunnable("Wake up other vm") {
public void run() {
synchronized (doTestPartialMessage_Listener.CONTROL_LOCK) {
doTestPartialMessage_Listener.CONTROL_LOCK.notify();
}
}
});
// wait for queue to be flushed
LogWriterUtils.getLogWriter().info("[testPartialMessage] wait for vm0");
getOtherVm().invoke(new SerializableRunnable("Wait for other vm") {
public void run() {
try {
synchronized (doTestPartialMessage_Listener.CONTROL_LOCK) {
boolean done = false;
while (!done) {
if (doTestPartialMessage_Listener.callbackArguments.size() > 0) {
CallbackWrapper last = (CallbackWrapper) doTestPartialMessage_Listener.callbackArguments.getLast();
Integer lastId = (Integer) last.callbackArgument;
if (lastId.intValue() == conflateId) {
done = true;
} else {
doTestPartialMessage_Listener.CONTROL_LOCK.wait(millisToWait);
}
} else {
doTestPartialMessage_Listener.CONTROL_LOCK.wait(millisToWait);
}
}
}
} catch (InterruptedException ignore) {
fail("interrupted");
}
}
});
// assert values on both listeners
LogWriterUtils.getLogWriter().info("[testPartialMessage] assert callback arguments");
getOtherVm().invoke(new SerializableRunnable("Assert callback arguments") {
public void run() {
synchronized (doTestPartialMessage_Listener.CONTROL_LOCK) {
LogWriterUtils.getLogWriter().info("[testPartialMessage] " + "doTestPartialMessage_Listener.callbackArguments=" + doTestPartialMessage_Listener.callbackArguments);
assertEquals(doTestPartialMessage_Listener.callbackArguments.size(), doTestPartialMessage_Listener.callbackTypes.size());
int i = 0;
Iterator argIter = doTestPartialMessage_Listener.callbackArguments.iterator();
Iterator typeIter = doTestPartialMessage_Listener.callbackTypes.iterator();
while (argIter.hasNext()) {
CallbackWrapper wrapper = (CallbackWrapper) argIter.next();
Integer arg = (Integer) wrapper.callbackArgument;
// Integer type
typeIter.next();
if (arg.intValue() < partialId) {
continue;
}
assertEquals(new Integer(expectedArgs[i]), arg);
// assertIndexDetailsEquals(CALLBACK_UPDATE_INTEGER, type);
i++;
}
}
}
});
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class RegionReliabilityTestCase method testCommitDistributionException.
@Test
public void testCommitDistributionException() throws Exception {
if (getRegionScope().isGlobal())
// skip test under Global
return;
if (getRegionScope().isDistributedNoAck())
// skip test under DistributedNoAck
return;
final String name = this.getUniqueName();
final String roleA = name + "-A";
final String[] requiredRoles = { roleA };
Set requiredRolesSet = new HashSet();
for (int i = 0; i < requiredRoles.length; i++) {
requiredRolesSet.add(InternalRole.getRole(requiredRoles[i]));
}
assertEquals(requiredRoles.length, requiredRolesSet.size());
// connect controller to system...
Properties config = new Properties();
config.setProperty(ROLES, "");
getSystem(config);
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
RegionMembershipListener listener = new RegionMembershipListenerAdapter() {
public void afterRemoteRegionDeparture(RegionEvent event) {
synchronized (detectedDeparture_testCommitDistributionException) {
detectedDeparture_testCommitDistributionException[0] = Boolean.TRUE;
detectedDeparture_testCommitDistributionException.notify();
}
}
};
// create region in controller...
MembershipAttributes ra = new MembershipAttributes(requiredRoles, LossAction.NO_ACCESS, ResumptionAction.NONE);
AttributesFactory fac = new AttributesFactory();
fac.setMembershipAttributes(ra);
fac.setScope(getRegionScope());
fac.addCacheListener(listener);
RegionAttributes attr = fac.create();
Region region = createRootRegion(name, attr);
// use vm1 to create role
Host.getHost(0).getVM(1).invoke(new CacheSerializableRunnable("Create Region") {
public void run2() throws CacheException {
createConnection(new String[] { roleA });
AttributesFactory fac = new AttributesFactory();
fac.setScope(getRegionScope());
RegionAttributes attr = fac.create();
createRootRegion(name, attr);
}
});
// define the afterReleaseLocalLocks callback
SerializableRunnableIF removeRequiredRole = new SerializableRunnableIF() {
public void run() {
Host.getHost(0).getVM(1).invoke(new SerializableRunnable("Close Region") {
public void run() {
getRootRegion(name).close();
}
});
try {
synchronized (detectedDeparture_testCommitDistributionException) {
while (detectedDeparture_testCommitDistributionException[0] == Boolean.FALSE) {
detectedDeparture_testCommitDistributionException.wait();
}
}
} catch (InterruptedException e) {
fail("interrupted");
}
}
};
// define the add and remove expected exceptions
final String expectedExceptions = "org.apache.geode.internal.cache.CommitReplyException";
SerializableRunnable addExpectedExceptions = new CacheSerializableRunnable("addExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
}
};
SerializableRunnable removeExpectedExceptions = new CacheSerializableRunnable("removeExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
}
};
// perform the actual test...
CacheTransactionManager ctm = cache.getCacheTransactionManager();
ctm.begin();
TXStateInterface txStateProxy = ((TXManagerImpl) ctm).getTXState();
((TXStateProxyImpl) txStateProxy).forceLocalBootstrap();
TXState txState = (TXState) ((TXStateProxyImpl) txStateProxy).getRealDeal(null, null);
txState.setBeforeSend(() -> {
try {
removeRequiredRole.run();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
// now start a transaction and commit it
region.put("KEY", "VAL");
addExpectedExceptions.run();
Host.getHost(0).getVM(1).invoke(addExpectedExceptions);
try {
ctm.commit();
fail("Should have thrown CommitDistributionException");
} catch (CommitDistributionException e) {
// pass
} finally {
removeExpectedExceptions.run();
Host.getHost(0).getVM(1).invoke(removeExpectedExceptions);
}
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class RequiredRolesDUnitTest method testIsRoleInRegionMembership.
/**
* Tests RequiredRoles.isRoleInRegionMembership().
*/
@Test
public void testIsRoleInRegionMembership() throws Exception {
final String name = this.getUniqueName();
final int vm0 = 0;
final int vm1 = 1;
final int vm2 = 2;
final int vm3 = 3;
final String roleA = name + "-A";
final String roleC = name + "-C";
final String roleD = name + "-D";
// assign names to 4 vms...
final String[] requiredRoles = { roleA, roleC, roleD };
final String[] rolesProp = { "", roleA, roleA, roleC + "," + roleD };
final String[][] vmRoles = new String[][] { {}, { roleA }, { roleA }, { roleC, roleD } };
for (int i = 0; i < vmRoles.length; i++) {
final int vm = i;
Host.getHost(0).getVM(vm).invoke(new SerializableRunnable() {
public void run() {
Properties config = new Properties();
config.setProperty(ROLES, rolesProp[vm]);
getSystem(config);
}
});
}
// connect controller to system...
Properties config = new Properties();
config.setProperty(ROLES, "");
getSystem(config);
// create region in controller...
MembershipAttributes ra = new MembershipAttributes(requiredRoles, LossAction.FULL_ACCESS, ResumptionAction.NONE);
AttributesFactory fac = new AttributesFactory();
fac.setMembershipAttributes(ra);
fac.setScope(Scope.DISTRIBUTED_ACK);
RegionAttributes attr = fac.create();
Region region = createRootRegion(name, attr);
// wait for memberTimeout to expire
waitForMemberTimeout();
// assert each role is missing
final Set requiredRolesSet = region.getAttributes().getMembershipAttributes().getRequiredRoles();
for (Iterator iter = requiredRolesSet.iterator(); iter.hasNext(); ) {
Role role = (Role) iter.next();
assertFalse(RequiredRoles.isRoleInRegionMembership(region, role));
}
SerializableRunnable create = new CacheSerializableRunnable("Create Region") {
public void run2() throws CacheException {
AttributesFactory fac = new AttributesFactory();
fac.setScope(Scope.DISTRIBUTED_ACK);
RegionAttributes attr = fac.create();
createRootRegion(name, attr);
}
};
// create region in vm0... no gain for no role
Host.getHost(0).getVM(vm0).invoke(create);
for (Iterator iter = requiredRolesSet.iterator(); iter.hasNext(); ) {
Role role = (Role) iter.next();
assertFalse(RequiredRoles.isRoleInRegionMembership(region, role));
}
// create region in vm1... gain for 1st instance of redundant role
Host.getHost(0).getVM(vm1).invoke(create);
for (int i = 0; i < vmRoles[vm1].length; i++) {
Role role = InternalRole.getRole(vmRoles[vm1][i]);
assertTrue(RequiredRoles.isRoleInRegionMembership(region, role));
}
// create region in vm2... no gain for 2nd instance of redundant role
Host.getHost(0).getVM(vm2).invoke(create);
for (int i = 0; i < vmRoles[vm2].length; i++) {
Role role = InternalRole.getRole(vmRoles[vm2][i]);
assertTrue(RequiredRoles.isRoleInRegionMembership(region, role));
}
// create region in vm3... gain for 2 roles
Host.getHost(0).getVM(vm3).invoke(create);
for (int i = 0; i < vmRoles[vm3].length; i++) {
Role role = InternalRole.getRole(vmRoles[vm3][i]);
assertTrue(RequiredRoles.isRoleInRegionMembership(region, role));
}
SerializableRunnable destroy = new CacheSerializableRunnable("Destroy Region") {
public void run2() throws CacheException {
Region region = getRootRegion(name);
region.localDestroyRegion();
}
};
// destroy region in vm0... no loss of any role
Host.getHost(0).getVM(vm0).invoke(destroy);
for (Iterator iter = requiredRolesSet.iterator(); iter.hasNext(); ) {
Role role = (Role) iter.next();
assertTrue(RequiredRoles.isRoleInRegionMembership(region, role));
}
// destroy region in vm1... nothing happens in 1st removal of redundant role
Host.getHost(0).getVM(vm1).invoke(destroy);
for (Iterator iter = requiredRolesSet.iterator(); iter.hasNext(); ) {
Role role = (Role) iter.next();
assertTrue(RequiredRoles.isRoleInRegionMembership(region, role));
}
// destroy region in vm2... 2nd removal of redundant role is loss
Host.getHost(0).getVM(vm2).invoke(destroy);
for (int i = 0; i < vmRoles[vm2].length; i++) {
Role role = InternalRole.getRole(vmRoles[vm2][i]);
assertFalse(RequiredRoles.isRoleInRegionMembership(region, role));
}
// destroy region in vm3... two more roles are in loss
Host.getHost(0).getVM(vm3).invoke(destroy);
for (Iterator iter = requiredRolesSet.iterator(); iter.hasNext(); ) {
Role role = (Role) iter.next();
assertFalse(RequiredRoles.isRoleInRegionMembership(region, role));
}
}
Aggregations