use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class PRFunctionExecutionWithResultSenderDUnitTest method testExecutionOnAllNodes_byName.
@Test
public void testExecutionOnAllNodes_byName() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore0 = host.getVM(0);
final VM datastore1 = host.getVM(1);
final VM datastore2 = host.getVM(2);
final VM datastore3 = host.getVM(3);
getCache();
SerializableCallable dataStoreCreate = new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
pa.setTotalNumBuckets(17);
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION9);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
};
datastore0.invoke(dataStoreCreate);
datastore1.invoke(dataStoreCreate);
datastore2.invoke(dataStoreCreate);
datastore3.invoke(dataStoreCreate);
Object o = datastore3.invoke(new SerializableCallable("Create data, invoke exectuable") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
DistributedSystem.setThreadsSocketPolicy(false);
final HashSet testKeys = new HashSet();
for (int i = (pr.getTotalNumberOfBuckets() * 3); i > 0; i--) {
testKeys.add("execKey-" + i);
}
int j = 0;
for (Iterator i = testKeys.iterator(); i.hasNext(); ) {
Integer val = new Integer(j++);
pr.put(i.next(), val);
}
// Assert there is data in each bucket
for (int bid = 0; bid < pr.getTotalNumberOfBuckets(); bid++) {
assertTrue(pr.getBucketKeys(bid).size() > 0);
}
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION9);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
ResultCollector rc1 = dataSet.setArguments(Boolean.TRUE).execute(function.getId());
List l = ((List) rc1.getResult());
LogWriterUtils.getLogWriter().info("PRFunctionExecutionDUnitTest#testExecutionOnAllNodes_byName : Result size :" + l.size() + " Result : " + l);
assertEquals(4, l.size());
for (int i = 0; i < 4; i++) {
assertEquals(Boolean.TRUE, l.iterator().next());
}
return Boolean.TRUE;
}
});
assertEquals(Boolean.TRUE, o);
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class TimeKeeper method partitionedRegionTest.
public void partitionedRegionTest(final String prName, final int noOfEntries) {
/*
* Do put() operations through VM with PR having both Accessor and Datastore
*/
vm0.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations1") {
public void run2() throws CacheException {
Calendar cal = Calendar.getInstance();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
int size = 0;
size = pr.size();
assertEquals("Size doesnt return expected value", 0, size);
assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", true, pr.isEmpty());
assertEquals(0, pr.keySet().size());
int entries = noOfEntries;
while (entries > 0) {
for (int i = 0; i <= 11; i++) {
int yr = (new Integer((int) (Math.random() * 2100))).intValue();
int month = i;
int date = (new Integer((int) (Math.random() * 30))).intValue();
cal.set(yr, month, date);
Object key = cal.getTime();
listOfKeys.add(key);
assertNotNull(pr);
// pr.put(key, Integer.toString(i));
pr.put(key, valueArray);
// assertIndexDetailsEquals(valueArray, pr.get(key));
}
entries--;
}
}
});
vm0.invoke(new CacheSerializableRunnable("verifyKeysonVM0") {
public void run2() throws CacheException {
// Calendar cal = Calendar.getInstance();
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
Iterator itr = listOfKeys.iterator();
while (itr.hasNext()) {
assertTrue(searchForKey(pr, (Date) itr.next()));
}
// Intitial warm up phase ..Do a get of all the keys
// Iterate over the key and try to get all the values repetitively
itr = listOfKeys.iterator();
ArrayList vals = new ArrayList();
while (itr.hasNext()) {
Object val = pr.get(itr.next());
assertNotNull(val);
vals.add(val);
// assertTrue(searchForKey(pr, (Date)itr.next()));
}
// Call the execute method for each key
PerformanceTestFunction function = new PerformanceTestFunction();
FunctionService.registerFunction(function);
DefaultResultCollector drc = new DefaultResultCollector();
// final Set allKeysSet = new HashSet();
final Set singleKeySet = new HashSet();
Execution dataSet = FunctionService.onRegion(pr);
vals.clear();
ArrayList list = new ArrayList();
itr = listOfKeys.iterator();
while (itr.hasNext()) {
singleKeySet.add(itr.next());
dataSet = dataSet.withFilter(singleKeySet);
try {
ResultCollector rc = dataSet.execute(function.getId());
list = (ArrayList) rc.getResult();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Exception Occurred :" + ex.getMessage());
Assert.fail("Test failed", ex);
}
Object val = list.get(0);
assertNotNull(val);
vals.add(val);
singleKeySet.clear();
// assertTrue(searchForKey(pr, (Date)itr.next()));
}
assertEquals(vals.size(), listOfKeys.size());
// END: warmup
// Now start the performance count
itr = listOfKeys.iterator();
TimeKeeper t = new TimeKeeper();
vals.clear();
t.start();
// ArrayList vals = new ArrayList();
while (itr.hasNext()) {
Object val = pr.get(itr.next());
assertNotNull(val);
vals.add(val);
// assertTrue(searchForKey(pr, (Date)itr.next()));
}
t.stop();
LogWriterUtils.getLogWriter().info("Time taken to iterate over " + vals.size() + " no. of keys: " + t.getTimeInMs() + " ms");
// Call the execute method for each key and see if this takes more time
vals.clear();
t = new TimeKeeper();
t.start();
// ArrayList list = new ArrayList();
itr = listOfKeys.iterator();
while (itr.hasNext()) {
singleKeySet.add(itr.next());
dataSet = dataSet.withFilter(singleKeySet);
try {
ResultCollector rc = dataSet.execute(function.getId());
list = (ArrayList) rc.getResult();
} catch (Exception expected) {
// No data should cause exec to throw
}
Object val = list.get(0);
assertNotNull(val);
vals.add(val);
singleKeySet.clear();
}
t.stop();
assertEquals(vals.size(), listOfKeys.size());
LogWriterUtils.getLogWriter().info("Time taken to iterate over " + vals.size() + " no. of keys using FunctionExecution: " + t.getTimeInMs() + " ms");
}
});
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class Bug39356DUnitTest method testCrashWhileCreatingABucket.
/**
* This tests the case where the VM forcing other VMs to create a bucket crashes while creating
* the bucket.
*/
@Test
public void testCrashWhileCreatingABucket() {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
SerializableRunnable createParReg = new SerializableRunnable("Create parReg") {
public void run() {
DistributionMessageObserver.setInstance(new MyRegionObserver(vm0));
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setRecoveryDelay(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
cache.createRegion(REGION_NAME, af.create());
}
};
vm1.invoke(createParReg);
vm2.invoke(createParReg);
SerializableRunnable createParRegAccessor = new SerializableRunnable("Create parReg") {
public void run() {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setLocalMaxMemory(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
Region r = cache.createRegion(REGION_NAME, af.create());
// trigger the creation of a bucket, which should trigger the destruction of this VM.
try {
r.put("ping", "pong");
fail("Should have gotten a CancelException");
} catch (CancelException e) {
// this is ok, we expect our observer to close this cache.
}
}
};
vm0.invoke(createParRegAccessor);
SerializableRunnable verifyBuckets = new SerializableRunnable("Verify buckets") {
public void run() {
LogWriter log = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
Cache cache = getCache();
PartitionedRegion r = (PartitionedRegion) cache.getRegion(REGION_NAME);
for (int i = 0; i < r.getAttributes().getPartitionAttributes().getTotalNumBuckets(); i++) {
List owners = null;
while (owners == null) {
try {
owners = r.getBucketOwnersForValidation(i);
} catch (ForceReattemptException e) {
log.info(Bug39356DUnitTest.class + " verify buckets Caught a ForceReattemptException");
Wait.pause(1000);
}
}
if (owners.isEmpty()) {
log.info("skipping bucket " + i + " because it has no data");
continue;
}
assertEquals("Expecting bucket " + i + " to have two copies", 2, owners.size());
log.info("bucket " + i + " had two copies");
}
}
};
vm1.invoke(verifyBuckets);
vm2.invoke(verifyBuckets);
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class FetchEntriesMessageJUnitTest method testProcessChunk.
@Test
public void testProcessChunk() throws Exception {
cache = Fakes.cache();
PartitionedRegion pr = mock(PartitionedRegion.class);
InternalDistributedSystem system = cache.getInternalDistributedSystem();
FetchEntriesResponse response = new FetchEntriesResponse(system, pr, null, 0);
HeapDataOutputStream chunkStream = createDummyChunk();
FetchEntriesReplyMessage reply = new FetchEntriesReplyMessage(null, 0, 0, chunkStream, 0, 0, 0, false, false);
reply.chunk = chunkStream.toByteArray();
response.processChunk(reply);
assertNull(response.returnRVV);
assertEquals(2, response.returnValue.size());
assertTrue(response.returnValue.get("keyWithOutVersionTag").equals("valueWithOutVersionTag"));
assertTrue(response.returnValue.get("keyWithVersionTag").equals("valueWithVersionTag"));
assertNull(response.returnVersions.get("keyWithOutVersionTag"));
assertNotNull(response.returnVersions.get("keyWithVersionTag"));
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class ShutdownAllDUnitTest method getBucketList.
protected Set<Integer> getBucketList(VM vm, final String regionName) {
SerializableCallable getBuckets = new SerializableCallable("get buckets") {
public Object call() throws Exception {
Cache cache = getCache();
Region region = cache.getRegion(regionName);
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
return new TreeSet<Integer>(pr.getDataStore().getAllLocalBucketIds());
} else {
return null;
}
}
};
return (Set<Integer>) vm.invoke(getBuckets);
}
Aggregations