use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class PRFunctionExecutionDUnitTest method bug41118.
public static void bug41118() {
InternalDistributedSystem ds = new PRFunctionExecutionDUnitTest().getSystem();
assertNotNull(ds);
ds.disconnect();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
ds = (InternalDistributedSystem) DistributedSystem.connect(props);
DM dm = ds.getDistributionManager();
assertEquals("Distributed System is not loner", true, dm instanceof LonerDistributionManager);
Cache cache = CacheFactory.create(ds);
AttributesFactory factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.PARTITION);
assertNotNull(cache);
Region region = cache.createRegion("PartitonedRegion", factory.create());
for (int i = 0; i < 20; i++) {
region.put("KEY_" + i, "VALUE_" + i);
}
Set<String> keysForGet = new HashSet<String>();
keysForGet.add("KEY_4");
keysForGet.add("KEY_9");
keysForGet.add("KEY_7");
try {
Execution execution = FunctionService.onRegion(region).withFilter(keysForGet).setArguments(Boolean.TRUE);
ResultCollector rc = execution.execute(new FunctionAdapter() {
@Override
public void execute(FunctionContext fc) {
RegionFunctionContext context = (RegionFunctionContext) fc;
Set keys = context.getFilter();
Set keysTillSecondLast = new HashSet();
int setSize = keys.size();
Iterator keysIterator = keys.iterator();
for (int i = 0; i < (setSize - 1); i++) {
keysTillSecondLast.add(keysIterator.next());
}
for (Object k : keysTillSecondLast) {
context.getResultSender().sendResult((Serializable) PartitionRegionHelper.getLocalDataForContext(context).get(k));
}
Object lastResult = keysIterator.next();
context.getResultSender().lastResult((Serializable) PartitionRegionHelper.getLocalDataForContext(context).get(lastResult));
}
@Override
public String getId() {
return getClass().getName();
}
});
rc.getResult();
ds.disconnect();
} catch (Exception e) {
LogWriterUtils.getLogWriter().info("Exception Occurred : " + e.getMessage());
e.printStackTrace();
Assert.fail("Test failed", e);
}
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class PRFunctionExecutionDUnitTest method testExecutionOnAllNodes_byInstance.
/**
* Ensure that the execution is happening all the PR as a whole
*/
@Test
public void testExecutionOnAllNodes_byInstance() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore0 = host.getVM(0);
final VM datastore1 = host.getVM(1);
final VM datastore2 = host.getVM(2);
final VM accessor = host.getVM(3);
getCache();
accessor.invoke(new SerializableCallable("Create PR") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 0);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
pa.setTotalNumBuckets(17);
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
return Boolean.TRUE;
}
});
SerializableCallable dataStoreCreate = new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
pa.setTotalNumBuckets(17);
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
// Function function = new TestFunction(true,"TestFunction2");
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION2);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
};
datastore0.invoke(dataStoreCreate);
datastore1.invoke(dataStoreCreate);
datastore2.invoke(dataStoreCreate);
Object o = accessor.invoke(new SerializableCallable("Create data, invoke exectuable") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
DistributedSystem.setThreadsSocketPolicy(false);
final HashSet testKeys = new HashSet();
for (int i = (pr.getTotalNumberOfBuckets() * 3); i > 0; i--) {
testKeys.add("execKey-" + i);
}
int j = 0;
for (Iterator i = testKeys.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
pr.put(i.next(), val);
}
// Assert there is data in each bucket
for (int bid = 0; bid < pr.getTotalNumberOfBuckets(); bid++) {
assertTrue(pr.getBucketKeys(bid).size() > 0);
}
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION2);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
ResultCollector rc1 = dataSet.setArguments(Boolean.TRUE).execute(function);
List l = ((List) rc1.getResult());
assertEquals(3, l.size());
for (int i = 0; i < 3; i++) {
assertEquals(Boolean.TRUE, l.iterator().next());
}
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class PRFunctionExecutionDUnitTest method testLocalSingleKeyExecution_byName_FunctionInvocationTargetException.
/**
* Test local execution by a datastore Function throws the FunctionInvocationTargetException. As
* this is the case of HA then system should retry the function execution. After 5th attempt
* function will send Boolean as last result. factory present.
*/
@Test
public void testLocalSingleKeyExecution_byName_FunctionInvocationTargetException() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore = host.getVM(3);
getCache();
datastore.invoke(new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
raf.setPartitionAttributes(pa);
Region pr = getCache().createRegion(rName, raf.create());
final String testKey = "execKey";
final Set testKeysSet = new HashSet();
testKeysSet.add(testKey);
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_REEXECUTE_EXCEPTION);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
pr.put(testKey, new Integer(1));
try {
ResultCollector rs1 = dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
List list = (ArrayList) rs1.getResult();
assertEquals(list.get(0), 5);
} catch (Throwable e) {
e.printStackTrace();
Assert.fail("This is not expected Exception", e);
}
return Boolean.TRUE;
}
});
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class PRClientServerRegionFunctionExecutionSingleHopDUnitTest method serverSingleKeyExecution.
public static void serverSingleKeyExecution(Boolean isByName) {
Region region = cache.getRegion(PartitionedRegionName);
assertNotNull(region);
final String testKey = "execKey";
final Set testKeysSet = new HashSet();
testKeysSet.add(testKey);
DistributedSystem.setThreadsSocketPolicy(false);
Function function = new TestFunction(true, TEST_FUNCTION2);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(region);
try {
execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
} catch (Exception expected) {
assertTrue(expected.getMessage().contains("No target node found for KEY = " + testKey) || expected.getMessage().startsWith("Server could not send the reply") || expected.getMessage().startsWith("Unexpected exception during"));
}
region.put(testKey, new Integer(1));
try {
ResultCollector rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
assertEquals(Boolean.TRUE, ((List) rs.getResult()).get(0));
ResultCollector rs2 = execute(dataSet, testKeysSet, testKey, function, isByName);
assertEquals(new Integer(1), ((List) rs2.getResult()).get(0));
HashMap putData = new HashMap();
putData.put(testKey + "1", new Integer(2));
putData.put(testKey + "2", new Integer(3));
ResultCollector rs1 = execute(dataSet, testKeysSet, putData, function, isByName);
assertEquals(Boolean.TRUE, ((List) rs1.getResult()).get(0));
assertEquals(new Integer(2), region.get(testKey + "1"));
assertEquals(new Integer(3), region.get(testKey + "2"));
} catch (Exception ex) {
ex.printStackTrace();
LogWriterUtils.getLogWriter().info("Exception : ", ex);
Assert.fail("Test failed after the put operation", ex);
}
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class PRClientServerRegionFunctionExecutionSingleHopDUnitTest method serverMultiKeyExecutionSocketTimeOut.
public static void serverMultiKeyExecutionSocketTimeOut(Boolean isByName) {
Region region = cache.getRegion(PartitionedRegionName);
assertNotNull(region);
final HashSet testKeysSet = new HashSet();
for (int i = (totalNumBuckets.intValue() * 2); i > 0; i--) {
testKeysSet.add("execKey-" + i);
}
DistributedSystem.setThreadsSocketPolicy(false);
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(region);
try {
int j = 0;
HashSet origVals = new HashSet();
for (Iterator i = testKeysSet.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
origVals.add(val);
region.put(i.next(), val);
}
List l = null;
ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
l = ((List) rc1.getResult());
LogWriterUtils.getLogWriter().info("Result size : " + l.size());
assertEquals(3, l.size());
for (Iterator i = l.iterator(); i.hasNext(); ) {
assertEquals(Boolean.TRUE, i.next());
}
} catch (Exception e) {
Assert.fail("Test failed after the function execution", e);
}
}
Aggregations