use of org.apache.geode.internal.cache.PartitionAttributesImpl in project geode by apache.
the class PRFunctionExecutionDUnitTest method testRemoteSingleKeyExecution_byName.
/**
* Test remote execution by a pure accessor which doesn't have the function factory present.
*/
@Test
public void testRemoteSingleKeyExecution_byName() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM accessor = host.getVM(2);
final VM datastore = host.getVM(3);
getCache();
accessor.invoke(new SerializableCallable("Create PR") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 0);
getCache().createRegion(rName, ra);
return Boolean.TRUE;
}
});
datastore.invoke(new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TEST_FUNCTION2);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
});
accessor.invoke(new SerializableCallable("Create data, invoke exectuable") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
final String testKey = "execKey";
final Set testKeysSet = new HashSet();
testKeysSet.add(testKey);
DistributedSystem.setThreadsSocketPolicy(false);
Function function = new TestFunction(true, TEST_FUNCTION2);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
try {
dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
} catch (Exception expected) {
// No data should cause exec to throw
assertTrue(expected.getMessage().contains("No target node found for KEY = " + testKey));
}
pr.put(testKey, new Integer(1));
ResultCollector rs1 = dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
assertEquals(Boolean.TRUE, ((List) rs1.getResult()).get(0));
ResultCollector rs2 = dataSet.withFilter(testKeysSet).setArguments(testKey).execute(function.getId());
assertEquals(new Integer(1), ((List) rs2.getResult()).get(0));
HashMap putData = new HashMap();
putData.put(testKey + "1", new Integer(2));
putData.put(testKey + "2", new Integer(3));
ResultCollector rs3 = dataSet.withFilter(testKeysSet).setArguments(putData).execute(function.getId());
assertEquals(Boolean.TRUE, ((List) rs3.getResult()).get(0));
assertEquals(new Integer(2), pr.get(testKey + "1"));
assertEquals(new Integer(3), pr.get(testKey + "2"));
return Boolean.TRUE;
}
});
}
use of org.apache.geode.internal.cache.PartitionAttributesImpl in project geode by apache.
the class PRFunctionExecutionDUnitTest method testMultiKeyExecutionOnASingleBucket_byName.
/**
* Ensure that the execution is limited to a single bucket put another way, that the routing logic
* works correctly such that there is not extra execution
*/
@Test
public void testMultiKeyExecutionOnASingleBucket_byName() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore0 = host.getVM(0);
final VM datastore1 = host.getVM(1);
final VM datastore2 = host.getVM(2);
final VM datastore3 = host.getVM(3);
getCache();
SerializableCallable dataStoreCreate = new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
pa.setTotalNumBuckets(17);
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TEST_FUNCTION2);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
};
datastore0.invoke(dataStoreCreate);
datastore1.invoke(dataStoreCreate);
datastore2.invoke(dataStoreCreate);
datastore3.invoke(dataStoreCreate);
Object o = datastore3.invoke(new SerializableCallable("Create data, invoke exectuable") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
DistributedSystem.setThreadsSocketPolicy(false);
final HashSet testKeys = new HashSet();
for (int i = (pr.getTotalNumberOfBuckets() * 3); i > 0; i--) {
testKeys.add("execKey-" + i);
}
int j = 0;
for (Iterator i = testKeys.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
pr.put(i.next(), val);
}
// Assert there is data each bucket
for (int bid = 0; bid < pr.getTotalNumberOfBuckets(); bid++) {
assertTrue(pr.getBucketKeys(bid).size() > 0);
}
for (Iterator kiter = testKeys.iterator(); kiter.hasNext(); ) {
Set singleKeySet = Collections.singleton(kiter.next());
Function function = new TestFunction(true, TEST_FUNCTION2);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
ResultCollector rc1 = dataSet.withFilter(singleKeySet).setArguments(Boolean.TRUE).execute(function.getId());
List l = ((List) rc1.getResult());
assertEquals(1, l.size());
assertEquals(Boolean.TRUE, l.iterator().next());
// DefaultResultCollector rc2 = new DefaultResultCollector();
ResultCollector rc2 = dataSet.withFilter(singleKeySet).setArguments(new HashSet(singleKeySet)).execute(function.getId());
List l2 = ((List) rc2.getResult());
assertEquals(1, l2.size());
List subList = (List) l2.iterator().next();
assertEquals(1, subList.size());
assertEquals(pr.get(singleKeySet.iterator().next()), subList.iterator().next());
}
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
}
use of org.apache.geode.internal.cache.PartitionAttributesImpl in project geode by apache.
the class PRFunctionExecutionDUnitTest method testRemoteMultiKeyExecutionHA_Disconnect.
@Test
public void testRemoteMultiKeyExecutionHA_Disconnect() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM accessor = host.getVM(3);
final VM datastore0 = host.getVM(0);
final VM datastore1 = host.getVM(1);
final VM datastore2 = host.getVM(2);
accessor.invoke(new SerializableCallable("Create PR") {
public Object call() throws Exception {
cache = getCache();
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(1, 0);
cache.createRegion(rName, ra);
regionName = rName;
return Boolean.TRUE;
}
});
SerializableCallable dataStoreCreate = new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(1, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_HA);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
};
datastore0.invoke(dataStoreCreate);
datastore1.invoke(dataStoreCreate);
datastore2.invoke(dataStoreCreate);
Object o = accessor.invoke(new SerializableCallable("Create data") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(regionName);
final HashSet testKeysSet = new HashSet();
for (int i = (pr.getTotalNumberOfBuckets() * 2); i > 0; i--) {
testKeysSet.add("execKey-" + i);
}
int j = 0;
HashSet origVals = new HashSet();
for (Iterator i = testKeysSet.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
origVals.add(val);
pr.put(i.next(), val);
}
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
int AsyncInvocationArrSize = 1;
AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
async[0] = accessor.invokeAsync(() -> PRFunctionExecutionDUnitTest.executeFunction());
o = datastore0.invoke(new SerializableCallable("disconnect") {
public Object call() throws Exception {
long startTime = System.currentTimeMillis();
WaitCriterion wc = new WaitCriterion() {
String excuse;
public boolean done() {
return false;
}
public String description() {
return excuse;
}
};
Wait.waitForCriterion(wc, 3000, 200, false);
long endTime = System.currentTimeMillis();
getCache().getLogger().fine("Time wait for Cache Close = " + (endTime - startTime));
getCache().getDistributedSystem().disconnect();
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
ThreadUtils.join(async[0], 60 * 1000);
if (async[0].getException() != null) {
Assert.fail("UnExpected Exception Occurred : ", async[0].getException());
}
List l = (List) async[0].getReturnValue();
assertEquals(2, l.size());
}
use of org.apache.geode.internal.cache.PartitionAttributesImpl in project geode by apache.
the class PRFunctionExecutionDUnitTest method testBucketFilter_1.
/**
* Test bucketFilter functionality
*/
@Test
public void testBucketFilter_1() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM accessor = host.getVM(3);
final VM datastore0 = host.getVM(0);
final VM datastore1 = host.getVM(1);
final VM datastore2 = host.getVM(2);
getCache();
accessor.invoke(new SerializableCallable("Create PR") {
public Object call() throws Exception {
PartitionResolver resolver = new BucketFilterPRResolver();
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 0, resolver);
getCache().createRegion(rName, ra);
return Boolean.TRUE;
}
});
SerializableCallable dataStoreCreate = new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
PartitionResolver resolver = new BucketFilterPRResolver();
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10, resolver);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
};
datastore0.invoke(dataStoreCreate);
datastore1.invoke(dataStoreCreate);
datastore2.invoke(dataStoreCreate);
Object o = accessor.invoke(new SerializableCallable("Create data") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
for (int i = 0; i < 50; ++i) {
pr.put(i, i);
}
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
o = accessor.invoke(new SerializableCallable("Execute function single filter") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
FunctionService.registerFunction(function);
InternalExecution dataSet = (InternalExecution) FunctionService.onRegion(pr);
Set<Integer> bucketSet = new HashSet<Integer>();
bucketSet.add(2);
ResultCollector<Integer, List<Integer>> rc = dataSet.withBucketFilter(bucketSet).execute(function);
List<Integer> results = rc.getResult();
assertEquals(bucketSet.size(), results.size());
for (Integer bucket : results) {
bucketSet.remove(bucket);
}
assertTrue(bucketSet.isEmpty());
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
o = accessor.invoke(new SerializableCallable("Execute function multiple filter") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
FunctionService.registerFunction(function);
InternalExecution dataSet = (InternalExecution) FunctionService.onRegion(pr);
Set<Integer> bucketSet = new HashSet<Integer>();
bucketSet.add(2);
bucketSet.add(3);
ResultCollector<Integer, List<Integer>> rc = dataSet.withBucketFilter(bucketSet).execute(function);
List<Integer> results = rc.getResult();
assertEquals(bucketSet.size(), results.size());
for (Integer bucket : results) {
bucketSet.remove(bucket);
}
assertTrue(bucketSet.isEmpty());
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
o = accessor.invoke(new SerializableCallable("Execute function multiple filter") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
FunctionService.registerFunction(function);
InternalExecution dataSet = (InternalExecution) FunctionService.onRegion(pr);
Set<Integer> bucketSet = new HashSet<Integer>();
bucketSet.add(1);
bucketSet.add(2);
bucketSet.add(3);
bucketSet.add(0);
bucketSet.add(4);
ResultCollector<Integer, List<Integer>> rc = dataSet.withBucketFilter(bucketSet).execute(function);
List<Integer> results = rc.getResult();
assertEquals(bucketSet.size(), results.size());
for (Integer bucket : results) {
bucketSet.remove(bucket);
}
getCache().getLogger().info("results buckets=" + results);
getCache().getLogger().info("bucketset=" + bucketSet);
assertTrue(bucketSet.isEmpty());
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
}
use of org.apache.geode.internal.cache.PartitionAttributesImpl in project geode by apache.
the class PRFunctionExecutionDUnitTest method testLocalMultiKeyExecution_byName.
/**
* Test ability to execute a multi-key function by a local data store ResultCollector =
* DefaultResultCollector haveResult = true
*/
@Test
public void testLocalMultiKeyExecution_byName() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
VM localOnly = host.getVM(3);
getCache();
Object o = localOnly.invoke(new SerializableCallable("Create PR, validate local execution)") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
raf.setPartitionAttributes(pa);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, raf.create());
final String testKey = "execKey";
DistributedSystem.setThreadsSocketPolicy(false);
// Function function = new TestFunction(true,"TestFunction2");
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION2);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
final HashSet testKeysSet = new HashSet();
testKeysSet.add(testKey);
try {
dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
} catch (Exception expected) {
// No data should cause exec to throw
assertTrue(expected.getMessage().contains("No target node found for KEY = " + testKey));
}
final HashSet testKeys = new HashSet();
for (int i = (pr.getTotalNumberOfBuckets() * 2); i > 0; i--) {
testKeys.add("execKey-" + i);
}
int j = 0;
HashSet origVals = new HashSet();
for (Iterator i = testKeys.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
origVals.add(val);
pr.put(i.next(), val);
}
ResultCollector rc1 = dataSet.withFilter(testKeys).setArguments(Boolean.TRUE).execute(function.getId());
List l = ((List) rc1.getResult());
// assertIndexDetailsEquals(pr.getTotalNumberOfBuckets(), l.size());
assertEquals(1, l.size());
for (Iterator i = l.iterator(); i.hasNext(); ) {
assertEquals(Boolean.TRUE, i.next());
}
// DefaultResultCollector rc2 = new DefaultResultCollector();
ResultCollector rc2 = dataSet.withFilter(testKeys).setArguments(testKeys).execute(function.getId());
List l2 = ((List) rc2.getResult());
assertEquals(1, l2.size());
HashSet foundVals = new HashSet();
for (Iterator i = l2.iterator(); i.hasNext(); ) {
ArrayList subL = (ArrayList) i.next();
assertTrue(subL.size() > 0);
for (Iterator subI = subL.iterator(); subI.hasNext(); ) {
assertTrue(foundVals.add(subI.next()));
}
}
assertEquals(origVals, foundVals);
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
}
Aggregations