use of org.apache.geode.LogWriter in project geode by apache.
the class TestFunction method execute2.
public void execute2(FunctionContext context) {
if (context instanceof RegionFunctionContext) {
RegionFunctionContext rfContext = (RegionFunctionContext) context;
rfContext.getDataSet().getCache().getLogger().info("Executing function : TestFunction2.execute " + rfContext);
if (rfContext.getArguments() instanceof Boolean) {
/* return rfContext.getArguments(); */
if (hasResult()) {
rfContext.getResultSender().lastResult((Serializable) rfContext.getArguments());
} else {
rfContext.getDataSet().getCache().getLogger().info("Executing function : TestFunction2.execute " + rfContext);
while (true && !rfContext.getDataSet().isDestroyed()) {
rfContext.getDataSet().getCache().getLogger().info("For Bug43513 ");
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
return;
}
}
}
} else if (rfContext.getArguments() instanceof String) {
String key = (String) rfContext.getArguments();
if (key.equals("TestingTimeOut")) {
// PRFunctionExecutionDUnitTest#testRemoteMultiKeyExecution_timeout
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
rfContext.getDataSet().getCache().getLogger().warning("Got Exception : Thread Interrupted" + e);
}
}
if (PartitionRegionHelper.isPartitionedRegion(rfContext.getDataSet())) {
/*
* return (Serializable)PartitionRegionHelper.getLocalDataForContext(rfContext).get(key);
*/
rfContext.getResultSender().lastResult((Serializable) PartitionRegionHelper.getLocalDataForContext(rfContext).get(key));
} else {
rfContext.getResultSender().lastResult((Serializable) rfContext.getDataSet().get(key));
}
/* return (Serializable)rfContext.getDataSet().get(key); */
} else if (rfContext.getArguments() instanceof Set) {
Set origKeys = (Set) rfContext.getArguments();
ArrayList vals = new ArrayList();
for (Object key : origKeys) {
Object val = PartitionRegionHelper.getLocalDataForContext(rfContext).get(key);
if (val != null) {
vals.add(val);
}
}
rfContext.getResultSender().lastResult(vals);
/* return vals; */
} else if (rfContext.getArguments() instanceof HashMap) {
HashMap putData = (HashMap) rfContext.getArguments();
for (Iterator i = putData.entrySet().iterator(); i.hasNext(); ) {
Map.Entry me = (Map.Entry) i.next();
rfContext.getDataSet().put(me.getKey(), me.getValue());
}
rfContext.getResultSender().lastResult(Boolean.TRUE);
} else {
rfContext.getResultSender().lastResult(Boolean.FALSE);
}
} else {
if (hasResult()) {
context.getResultSender().lastResult(Boolean.FALSE);
} else {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
LogWriter logger = ds.getLogWriter();
logger.info("Executing in TestFunction on Server : " + ds.getDistributedMember() + "with Context : " + context);
while (ds.isConnected()) {
logger.fine("Just executing function in infinite loop for Bug43513");
try {
Thread.sleep(250);
} catch (InterruptedException e) {
return;
}
}
}
}
}
use of org.apache.geode.LogWriter in project geode by apache.
the class TestFunction method executeFunctionRunningForLongTime.
public void executeFunctionRunningForLongTime(FunctionContext context) {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
LogWriter logger = ds.getLogWriter();
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
logger.info("Exception in executeFunctionRunningForLongTime");
}
context.getResultSender().lastResult("Ran executeFunctionRunningForLongTime for 10000000");
}
use of org.apache.geode.LogWriter in project geode by apache.
the class TestFunction method executeFunctionReexecuteException.
private synchronized void executeFunctionReexecuteException(FunctionContext context) {
retryCountForExecuteFunctionReexecuteException++;
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
LogWriter logger = ds.getLogWriter();
logger.fine("Executing executeException in TestFunction on Member : " + ds.getDistributedMember() + "with Context : " + context);
if (retryCountForExecuteFunctionReexecuteException >= 5) {
logger.fine("Tried Function Execution 5 times. Now Returning after 5 attempts");
context.getResultSender().lastResult(new Integer(retryCountForExecuteFunctionReexecuteException));
retryCountForExecuteFunctionReexecuteException = 0;
return;
}
if (context.getArguments() instanceof Boolean) {
logger.fine("MyFunctionExecutionException is intentionally thrown");
throw new FunctionInvocationTargetException(new MyFunctionExecutionException("I have been thrown from TestFunction"));
}
}
use of org.apache.geode.LogWriter in project geode by apache.
the class Bug39356DUnitTest method testCrashWhileCreatingABucket.
/**
* This tests the case where the VM forcing other VMs to create a bucket crashes while creating
* the bucket.
*/
@Test
public void testCrashWhileCreatingABucket() {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
SerializableRunnable createParReg = new SerializableRunnable("Create parReg") {
public void run() {
DistributionMessageObserver.setInstance(new MyRegionObserver(vm0));
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setRecoveryDelay(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
cache.createRegion(REGION_NAME, af.create());
}
};
vm1.invoke(createParReg);
vm2.invoke(createParReg);
SerializableRunnable createParRegAccessor = new SerializableRunnable("Create parReg") {
public void run() {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setLocalMaxMemory(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
Region r = cache.createRegion(REGION_NAME, af.create());
// trigger the creation of a bucket, which should trigger the destruction of this VM.
try {
r.put("ping", "pong");
fail("Should have gotten a CancelException");
} catch (CancelException e) {
// this is ok, we expect our observer to close this cache.
}
}
};
vm0.invoke(createParRegAccessor);
SerializableRunnable verifyBuckets = new SerializableRunnable("Verify buckets") {
public void run() {
LogWriter log = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
Cache cache = getCache();
PartitionedRegion r = (PartitionedRegion) cache.getRegion(REGION_NAME);
for (int i = 0; i < r.getAttributes().getPartitionAttributes().getTotalNumBuckets(); i++) {
List owners = null;
while (owners == null) {
try {
owners = r.getBucketOwnersForValidation(i);
} catch (ForceReattemptException e) {
log.info(Bug39356DUnitTest.class + " verify buckets Caught a ForceReattemptException");
Wait.pause(1000);
}
}
if (owners.isEmpty()) {
log.info("skipping bucket " + i + " because it has no data");
continue;
}
assertEquals("Expecting bucket " + i + " to have two copies", 2, owners.size());
log.info("bucket " + i + " had two copies");
}
}
};
vm1.invoke(verifyBuckets);
vm2.invoke(verifyBuckets);
}
use of org.apache.geode.LogWriter in project geode by apache.
the class InterestResultPolicyDUnitTest method verifyResult.
/**
* Verifies the number of entries (including values) created on the test region at the end of
* {@link Region#registerInterest} call depending on the type of {@link InterestResultPolicy}
* registered for the region.
*
* @param interestPolicy - {@link InterestResultPolicy} registered for the region
*/
public static void verifyResult(Object interestPolicy, Object totalKeysRegistered) {
Region region1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
int entriesSize = region1.entrySet(false).size();
int keysSize = region1.keySet().size();
int valuesSize = region1.values().size();
InterestResultPolicy policy = (InterestResultPolicy) interestPolicy;
LogWriter logger = cache.getLogger();
logger.fine("policy = " + policy + " ==> entries = " + entriesSize + " ;keys = " + keysSize + ";values = " + valuesSize);
if (policy.isNone()) {
// nothing should be created on client cache
assertEquals(0, entriesSize);
assertEquals(0, keysSize);
assertEquals(0, valuesSize);
} else if (policy.isKeys()) {
// all keys should be created with values null
assertEquals(PREPOPULATED_ENTRIES, entriesSize);
assertEquals(PREPOPULATED_ENTRIES, keysSize);
assertEquals(0, valuesSize);
} else if (policy.isKeysValues()) {
// all the keys and values should be created
assertEquals(PREPOPULATED_ENTRIES, entriesSize);
assertEquals(PREPOPULATED_ENTRIES, keysSize);
assertEquals(PREPOPULATED_ENTRIES, valuesSize);
}
}
Aggregations