use of org.apache.geode.cache.execute.Execution in project geode by apache.
the class PRFunctionExecutionWithResultSenderDUnitTest method testExecutionOnAllNodes_byName.
@Test
public void testExecutionOnAllNodes_byName() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore0 = host.getVM(0);
final VM datastore1 = host.getVM(1);
final VM datastore2 = host.getVM(2);
final VM datastore3 = host.getVM(3);
getCache();
SerializableCallable dataStoreCreate = new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
pa.setTotalNumBuckets(17);
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION9);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
};
datastore0.invoke(dataStoreCreate);
datastore1.invoke(dataStoreCreate);
datastore2.invoke(dataStoreCreate);
datastore3.invoke(dataStoreCreate);
Object o = datastore3.invoke(new SerializableCallable("Create data, invoke exectuable") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
DistributedSystem.setThreadsSocketPolicy(false);
final HashSet testKeys = new HashSet();
for (int i = (pr.getTotalNumberOfBuckets() * 3); i > 0; i--) {
testKeys.add("execKey-" + i);
}
int j = 0;
for (Iterator i = testKeys.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
pr.put(i.next(), val);
}
// Assert there is data in each bucket
for (int bid = 0; bid < pr.getTotalNumberOfBuckets(); bid++) {
assertTrue(pr.getBucketKeys(bid).size() > 0);
}
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION9);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
ResultCollector rc1 = dataSet.setArguments(Boolean.TRUE).execute(function.getId());
List l = ((List) rc1.getResult());
LogWriterUtils.getLogWriter().info("PRFunctionExecutionDUnitTest#testExecutionOnAllNodes_byName : Result size :" + l.size() + " Result : " + l);
assertEquals(4, l.size());
for (int i = 0; i < 4; i++) {
assertEquals(Boolean.TRUE, l.iterator().next());
}
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
}
use of org.apache.geode.cache.execute.Execution in project geode by apache.
the class TimeKeeper method partitionedRegionTest.
public void partitionedRegionTest(final String prName, final int noOfEntries) {
/*
* Do put() operations through VM with PR having both Accessor and Datastore
*/
vm0.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations1") {
public void run2() throws CacheException {
Calendar cal = Calendar.getInstance();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
int size = 0;
size = pr.size();
assertEquals("Size doesnt return expected value", 0, size);
assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", true, pr.isEmpty());
assertEquals(0, pr.keySet().size());
int entries = noOfEntries;
while (entries > 0) {
for (int i = 0; i <= 11; i++) {
int yr = (new Integer((int) (Math.random() * 2100))).intValue();
int month = i;
int date = (new Integer((int) (Math.random() * 30))).intValue();
cal.set(yr, month, date);
Object key = cal.getTime();
listOfKeys.add(key);
assertNotNull(pr);
// pr.put(key, Integer.toString(i));
pr.put(key, valueArray);
// assertIndexDetailsEquals(valueArray, pr.get(key));
}
entries--;
}
}
});
vm0.invoke(new CacheSerializableRunnable("verifyKeysonVM0") {
public void run2() throws CacheException {
// Calendar cal = Calendar.getInstance();
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
Iterator itr = listOfKeys.iterator();
while (itr.hasNext()) {
assertTrue(searchForKey(pr, (Date) itr.next()));
}
// Intitial warm up phase ..Do a get of all the keys
// Iterate over the key and try to get all the values repetitively
itr = listOfKeys.iterator();
ArrayList vals = new ArrayList();
while (itr.hasNext()) {
Object val = pr.get(itr.next());
assertNotNull(val);
vals.add(val);
// assertTrue(searchForKey(pr, (Date)itr.next()));
}
// Call the execute method for each key
PerformanceTestFunction function = new PerformanceTestFunction();
FunctionService.registerFunction(function);
DefaultResultCollector drc = new DefaultResultCollector();
// final Set allKeysSet = new HashSet();
final Set singleKeySet = new HashSet();
Execution dataSet = FunctionService.onRegion(pr);
vals.clear();
ArrayList list = new ArrayList();
itr = listOfKeys.iterator();
while (itr.hasNext()) {
singleKeySet.add(itr.next());
dataSet = dataSet.withFilter(singleKeySet);
try {
ResultCollector rc = dataSet.execute(function.getId());
list = (ArrayList) rc.getResult();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Exception Occurred :" + ex.getMessage());
Assert.fail("Test failed", ex);
}
Object val = list.get(0);
assertNotNull(val);
vals.add(val);
singleKeySet.clear();
// assertTrue(searchForKey(pr, (Date)itr.next()));
}
assertEquals(vals.size(), listOfKeys.size());
// END: warmup
// Now start the performance count
itr = listOfKeys.iterator();
TimeKeeper t = new TimeKeeper();
vals.clear();
t.start();
// ArrayList vals = new ArrayList();
while (itr.hasNext()) {
Object val = pr.get(itr.next());
assertNotNull(val);
vals.add(val);
// assertTrue(searchForKey(pr, (Date)itr.next()));
}
t.stop();
LogWriterUtils.getLogWriter().info("Time taken to iterate over " + vals.size() + " no. of keys: " + t.getTimeInMs() + " ms");
// Call the execute method for each key and see if this takes more time
vals.clear();
t = new TimeKeeper();
t.start();
// ArrayList list = new ArrayList();
itr = listOfKeys.iterator();
while (itr.hasNext()) {
singleKeySet.add(itr.next());
dataSet = dataSet.withFilter(singleKeySet);
try {
ResultCollector rc = dataSet.execute(function.getId());
list = (ArrayList) rc.getResult();
} catch (Exception expected) {
// No data should cause exec to throw
}
Object val = list.get(0);
assertNotNull(val);
vals.add(val);
singleKeySet.clear();
}
t.stop();
assertEquals(vals.size(), listOfKeys.size());
LogWriterUtils.getLogWriter().info("Time taken to iterate over " + vals.size() + " no. of keys using FunctionExecution: " + t.getTimeInMs() + " ms");
}
});
}
use of org.apache.geode.cache.execute.Execution in project geode by apache.
the class ExportLogsCommandTest method sizeOKOnMember_sizeChecksEnabled_doesNotThrow.
@Test
public void sizeOKOnMember_sizeChecksEnabled_doesNotThrow() throws Exception {
final Cache mockCache = mock(Cache.class);
final DistributedMember mockDistributedMember = mock(DistributedMember.class);
final Execution mockFunctionExecutor = mock(Execution.class);
final ExportLogsCommand cmd = createExportLogsCommand(mockCache, mockDistributedMember, mockFunctionExecutor);
cmd.checkIfExportLogsOverflowsDisk("clusterMember", 10 * MEGABYTE, MEGABYTE - 1024, MEGABYTE);
}
use of org.apache.geode.cache.execute.Execution in project geode by apache.
the class DiskStoreCommandsJUnitTest method testGetDiskStoreDescription.
@Test
public void testGetDiskStoreDescription() {
final String diskStoreName = "mockDiskStore";
final String memberId = "mockMember";
final InternalCache mockCache = mockContext.mock(InternalCache.class, "InternalCache");
final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
final ResultCollector mockResultCollector = mockContext.mock(ResultCollector.class, "ResultCollector");
final DiskStoreDetails expectedDiskStoredDetails = createDiskStoreDetails(memberId, diskStoreName);
mockContext.checking(new Expectations() {
{
oneOf(mockMember).getName();
will(returnValue(null));
oneOf(mockMember).getId();
will(returnValue(memberId));
oneOf(mockFunctionExecutor).setArguments(with(equal(diskStoreName)));
will(returnValue(mockFunctionExecutor));
oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeDiskStoreFunction.class)));
will(returnValue(mockResultCollector));
oneOf(mockResultCollector).getResult();
will(returnValue(Arrays.asList(expectedDiskStoredDetails)));
}
});
final DiskStoreCommands commands = createDiskStoreCommands(mockCache, mockMember, mockFunctionExecutor);
final DiskStoreDetails actualDiskStoreDetails = commands.getDiskStoreDescription(memberId, diskStoreName);
assertNotNull(actualDiskStoreDetails);
assertEquals(expectedDiskStoredDetails, actualDiskStoreDetails);
}
use of org.apache.geode.cache.execute.Execution in project geode by apache.
the class DiskStoreCommandsJUnitTest method testGetDiskStoreDescriptionThrowsRuntimeException.
@Test(expected = RuntimeException.class)
public void testGetDiskStoreDescriptionThrowsRuntimeException() {
final String diskStoreName = "mockDiskStore";
final String memberId = "mockMember";
final InternalCache mockCache = mockContext.mock(InternalCache.class, "InternalCache");
final DistributedMember mockMember = mockContext.mock(DistributedMember.class, "DistributedMember");
final Execution mockFunctionExecutor = mockContext.mock(Execution.class, "Function Executor");
mockContext.checking(new Expectations() {
{
oneOf(mockMember).getName();
will(returnValue(null));
oneOf(mockMember).getId();
will(returnValue(memberId));
oneOf(mockFunctionExecutor).setArguments(with(equal(diskStoreName)));
will(returnValue(mockFunctionExecutor));
oneOf(mockFunctionExecutor).execute(with(aNonNull(DescribeDiskStoreFunction.class)));
will(throwException(new RuntimeException("expected")));
}
});
final DiskStoreCommands commands = createDiskStoreCommands(mockCache, mockMember, mockFunctionExecutor);
try {
commands.getDiskStoreDescription(memberId, diskStoreName);
} catch (RuntimeException expected) {
assertEquals("expected", expected.getMessage());
throw expected;
}
}
Aggregations