use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnRR.
// GEODE-1828
@Category(FlakyTest.class)
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnRR() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
hooked = false;
name = "PartionedPortfoliosPR";
// Create Overflow Persistent Partition Region
vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region partitionRegion = null;
IndexManager.testHook = null;
try {
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(PortfolioData.class);
attr.setIndexMaintenanceSynchronous(true);
EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
attr.setEvictionAttributes(evicAttr);
attr.setDataPolicy(DataPolicy.REPLICATE);
// attr.setPartitionAttributes(new
// PartitionAttributesFactory().setTotalNumBuckets(1).create());
attr.setDiskStoreName("disk");
RegionFactory regionFactory = cache.createRegionFactory(attr.create());
partitionRegion = regionFactory.create(name);
} catch (IllegalStateException ex) {
LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
}
assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
assertNotNull("Region ref null", partitionRegion);
assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
// Create Indexes
try {
Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
assertNotNull(index);
} catch (Exception e1) {
e1.printStackTrace();
fail("Index creation failed");
}
}
});
// Start changing the value in Region which should turn into a deadlock if the fix is not there
AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
// Do a put in region.
Region r = getCache().getRegion(name);
for (int i = 0; i < 100; i++) {
r.put(i, new PortfolioData(i));
}
assertNull(IndexManager.testHook);
IndexManager.testHook = new IndexManagerTestHook();
// Destroy one of the values.
getCache().getLogger().fine("Destroying the value");
r.destroy(1);
IndexManager.testHook = null;
}
});
AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
while (!hooked) {
Wait.pause(100);
}
try {
getCache().getLogger().fine("Querying the region");
SelectResults results = (SelectResults) statusQuery.execute();
assertEquals(100, results.size());
} catch (Exception e) {
e.printStackTrace();
}
}
});
// If we take more than 30 seconds then its a deadlock.
ThreadUtils.join(asyncInv2, 30 * 1000);
ThreadUtils.join(asyncInv1, 30 * 1000);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnOnNonOverflowPR.
/**
*
*/
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnOnNonOverflowPR() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
hooked = false;
name = "PartionedPortfoliosPR";
// Create Overflow Persistent Partition Region
vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region partitionRegion = null;
IndexManager.testHook = null;
try {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(PortfolioData.class);
attr.setIndexMaintenanceSynchronous(true);
attr.setDataPolicy(DataPolicy.PARTITION);
attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(1).create());
RegionFactory regionFactory = cache.createRegionFactory(attr.create());
partitionRegion = regionFactory.create(name);
} catch (IllegalStateException ex) {
LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
}
assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
assertNotNull("Region ref null", partitionRegion);
assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
// Create Indexes
try {
Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
assertNotNull(index);
} catch (Exception e1) {
e1.printStackTrace();
fail("Index creation failed");
}
}
});
// Start changing the value in Region which should turn into a deadlock if the fix is not there
AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {
@Override
public void run2() throws CacheException {
// Do a put in region.
Region r = getCache().getRegion(name);
for (int i = 0; i < 100; i++) {
r.put(i, new PortfolioData(i));
}
assertNull(IndexManager.testHook);
IndexManager.testHook = new IndexManagerNoWaitTestHook();
// Destroy one of the values.
getCache().getLogger().fine("Destroying the value");
r.destroy(1);
IndexManager.testHook = null;
}
});
AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {
@Override
public void run2() throws CacheException {
Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
while (!hooked) {
Wait.pause(10);
}
try {
getCache().getLogger().fine("Querying the region");
SelectResults results = (SelectResults) statusQuery.execute();
assertEquals(100, results.size());
} catch (Exception e) {
e.printStackTrace();
}
}
});
// If we take more than 30 seconds then its a deadlock.
ThreadUtils.join(asyncInv2, 30 * 1000);
ThreadUtils.join(asyncInv1, 30 * 1000);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForCacheClose.
/**
* This function <br>
* 1. calls the cache.close on the VM <br>
* 2. creates the cache again & also the PR <br>
*
* @return cacheSerializable object
*
* NOTE: Closing of the cache must be done from the test case rather than in
* PRQueryDUintHelper
*
*/
public CacheSerializableRunnable getCacheSerializableRunnableForCacheClose(final String regionName, final int redundancy, final Class constraint) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("cacheClose") {
@Override
public void run2() throws CacheException {
final String expectedCacheClosedException = CacheClosedException.class.getName();
final String expectedReplyException = ReplyException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + expectedCacheClosedException + "</ExpectedException>");
getCache().getLogger().info("<ExpectedException action=add>" + expectedReplyException + "</ExpectedException>");
Cache cache = getCache();
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Recreating the cache ");
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(constraint);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
attr.setPartitionAttributes(prAttr);
final CountDownLatch cdl = new CountDownLatch(1);
ResourceObserverAdapter observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
cdl.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
try {
cache.createRegion(regionName, attr.create());
// Wait for recovery to finish
cdl.await();
} catch (InterruptedException e) {
Assert.fail("interupted", e);
} finally {
InternalResourceManager.setResourceObserver(null);
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: cache Recreated on VM ");
getCache().getLogger().info("<ExpectedException action=remove>" + expectedReplyException + "</ExpectedException>");
getCache().getLogger().info("<ExpectedException action=remove>" + expectedCacheClosedException + "</ExpectedException>");
}
};
return (CacheSerializableRunnable) PrRegion;
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PRQueryDUnitTest method testQueryResultsFromMembers.
@Test
public void testQueryResultsFromMembers() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 10;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
int numEntries = 100;
for (int i = 1; i <= numEntries; i++) {
pr.put(new Integer(i), new Portfolio(i));
}
int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
try {
for (int q = 0; q < queries.length; q++) {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < totalBuckets; i++) {
buckets.add(new Integer(i));
}
final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public HashMap resultsPerMember = new HashMap();
public void hook(int spot) throws RuntimeException {
int size = 0;
if (spot == 3) {
for (Object mr : qe.getResultsPerMember().entrySet()) {
Map.Entry e = (Map.Entry) mr;
Collection<Collection> results = (Collection<Collection>) e.getValue();
for (Collection<Object> r : results) {
if (this.resultsPerMember.containsKey(e.getKey())) {
this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
} else {
this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
}
}
}
}
}
}
;
final MyTestHook th = new MyTestHook();
qe.queryBuckets(th);
for (Object r : th.resultsPerMember.entrySet()) {
Map.Entry e = (Map.Entry) r;
Integer res = (Integer) e.getValue();
LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
}
}
} finally {
getCache().close();
}
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForColocatedChildCreate.
/**
* This function creates the parent region of colocated pair of PR's given the scope & the
* redundancy parameters for the parent *
*
* @param regionName
* @param redundancy
* @param constraint
* @param isPersistent
* @return cacheSerializable object
*/
public CacheSerializableRunnable getCacheSerializableRunnableForColocatedChildCreate(final String regionName, final int redundancy, final Class constraint, boolean isPersistent) {
final String childRegionName = regionName + "Child";
final String diskName = "disk";
SerializableRunnable createPrRegion;
createPrRegion = new CacheSerializableRunnable(regionName + "-ChildRegion") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region partitionedregion = null;
Region childRegion = null;
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(constraint);
if (isPersistent) {
DiskStore ds = cache.findDiskStore(diskName);
if (ds == null) {
// ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs())
ds = cache.createDiskStoreFactory().setDiskDirs(org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase.getDiskDirs()).create(diskName);
}
attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
attr.setDiskStoreName(diskName);
} else {
attr.setDataPolicy(DataPolicy.PARTITION);
attr.setDiskStoreName(null);
}
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
attr.setPartitionAttributes(paf.create());
// skip parent region creation
// partitionedregion = cache.createRegion(regionName, attr.create());
// child region
attr.setValueConstraint(constraint);
paf.setColocatedWith(regionName);
attr.setPartitionAttributes(paf.create());
childRegion = cache.createRegion(childRegionName, attr.create());
}
};
return (CacheSerializableRunnable) createPrRegion;
}
Aggregations