use of org.apache.geode.cache.query.QueryService in project geode by apache.
the class CopyOnReadIndexDUnitTest method helpTestPRQueryOnLocalNode.
// The tests sets up a partition region across 2 servers
// It does puts in each server, checking instance counts of portfolio objects
// Querying the data will result in deserialization of portfolio objects.
// In cases where index is present, the objects will be deserialized in the cache
public void helpTestPRQueryOnLocalNode(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
final int numPortfoliosPerVM = numPortfolios / 2;
resetInstanceCount(vm0);
resetInstanceCount(vm1);
createPartitionRegion(vm0, "portfolios");
createPartitionRegion(vm1, "portfolios");
if (hasIndex) {
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
}
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = 0; i < numPortfoliosPerVM; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = numPortfoliosPerVM; i < numPortfolios; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
// PR indexes are created across nodes unlike Replicated Region Indexes
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
if (index == null) {
QueryTestUtils utils = new QueryTestUtils();
index = utils.createIndex("idIndex", "p.ID", "/portfolios p");
}
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
Iterator it = results.iterator();
assertEquals("Failed:" + queryString, numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
p.status = "discardStatus";
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// Since we have deserialized and cached these values, we just need to add the number of
// results we did a copy of due to copy on read
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to the query we deserialized the number of entries this vm currently hosts
// We had to deserialized the results from the other data nodes when we iterated through
// the results as well as our own
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
if (hasIndex) {
// After vm0 executed the query, we already had the values deserialized in our cache
// So it's the same total as before
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
} else {
// After vm0 executed the query, we had to deserialize the values in our vm
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
// Because we have no index, we have to again deserialize all the values that this vm is
// hosting
Wait.waitForCriterion(verifyPortfolioCount((int) (((PartitionedRegion) region).getLocalSize() + ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
}
return null;
}
});
destroyRegion("portfolio", vm0);
}
use of org.apache.geode.cache.query.QueryService in project geode by apache.
the class CompactRangeIndexJUnitTest method testNullMapKeyCompactRangeIndexCreateIndexLater.
/**
* Tests adding entries to compact range index where the the key of an indexed map field is null.
*/
@Test
public void testNullMapKeyCompactRangeIndexCreateIndexLater() throws Exception {
Region region = utils.getCache().getRegion("exampleRegion");
// create objects
int numObjects = 10;
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
p.status = null;
p.getPositions().put(null, "something");
region.put("KEY-" + i, p);
}
index = utils.createIndex("indexName", "positions[*]", "/exampleRegion");
// execute query and check result size
QueryService qs = utils.getCache().getQueryService();
SelectResults results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.position[null] = something").execute();
assertEquals("Null matched Results expected", numObjects, results.size());
}
use of org.apache.geode.cache.query.QueryService in project geode by apache.
the class CompactRangeIndexJUnitTest method testInvalidTokens.
@Test
public void testInvalidTokens() throws Exception {
final Region r = utils.getCache().getRegion("/exampleRegion");
r.put("0", new Portfolio(0));
r.invalidate("0");
index = utils.createIndex("compact range index", "p.status", "/exampleRegion p");
QueryService qs = utils.getCache().getQueryService();
SelectResults results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status='active'").execute();
// the remove should have happened
assertEquals(0, results.size());
results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status!='inactive'").execute();
assertEquals(0, results.size());
CompactRangeIndex cindex = (CompactRangeIndex) index;
MemoryIndexStore indexStore = (MemoryIndexStore) cindex.getIndexStorage();
CloseableIterator iterator = indexStore.get(QueryService.UNDEFINED);
int count = 0;
while (iterator.hasNext()) {
count++;
iterator.next();
}
assertEquals("incorrect number of entries in collection", 0, count);
}
use of org.apache.geode.cache.query.QueryService in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForPROrderByQueryWithLimit.
public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryWithLimit(final String regionName, final String localRegion) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
public void run2() throws CacheException {
Cache cache = getCache();
// Querying the localRegion and the PR region
String[] queries = new String[] { "status as st from /REGION_NAME order by status", "p.status from /REGION_NAME p order by p.status", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId, p.ID desc", "key from /REGION_NAME.keys key order by key.status, key.ID", "key.ID from /REGION_NAME.keys key order by key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID asc", "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID", "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID", "p.ID from /REGION_NAME p, p.positions.values order by p.ID", "* from /REGION_NAME p, p.positions.values val order by p.ID, val.secId", "p.iD, p.status from /REGION_NAME p order by p.iD", "iD, status from /REGION_NAME order by iD", "* from /REGION_NAME p order by p.getID()", "* from /REGION_NAME p order by p.getP1().secId, p.ID desc, p.ID", " p.position1.secId , p.ID as st from /REGION_NAME p order by p.position1.secId, p.ID", "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key from /REGION_NAME.entrySet e order by e.key.ID, e.key.pkid desc", "p, pos from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId desc", "p, pos from /REGION_NAME p, p.positions.values pos order by pos.secId, p.ID", "status , ID as ied from /REGION_NAME where ID > 0 order by status, ID desc", "p.status as st, p.ID as id from /REGION_NAME p where ID > 0 and status = 'inactive' order by p.status, p.ID desc", "p.position1.secId as st, p.ID as ied from /REGION_NAME p where p.ID > 0 and p.position1.secId != 'IBM' order by p.position1.secId, p.ID", " key.status as st, key.ID from /REGION_NAME.keys key where key.ID > 5 order by key.status, key.ID desc", " key.ID, key.status as st from /REGION_NAME.keys key where key.status = 'inactive' order by key.status desc, key.ID" };
Object[][] r = new Object[queries.length][2];
Region local = cache.getRegion(localRegion);
Region region = cache.getRegion(regionName);
assertNotNull(region);
final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
}
String distinct = "<TRACE>SELECT DISTINCT ";
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int l = 1; l <= 3; l++) {
String[] rq = new String[queries.length];
for (int j = 0; j < queries.length; j++) {
String qStr = null;
synchronized (region) {
// Execute on local region.
qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
qStr += (" LIMIT " + (l * l));
rq[j] = qStr;
SelectResults sr = (SelectResults) qs.newQuery(qStr).execute();
r[j][0] = sr;
if (sr.asList().size() > l * l) {
fail("The resultset size exceeds limit size. Limit size=" + l * l + ", result size =" + sr.asList().size());
}
// Execute on remote region.
qStr = (distinct + queries[j].replace("REGION_NAME", regionName));
qStr += (" LIMIT " + (l * l));
rq[j] = qStr;
SelectResults srr = (SelectResults) qs.newQuery(qStr).execute();
r[j][1] = srr;
if (srr.size() > l * l) {
fail("The resultset size exceeds limit size. Limit size=" + l * l + ", result size =" + srr.asList().size());
}
// assertIndexDetailsEquals("The resultset size is not same as limit size.", l*l,
// srr.asList().size());
// getCache().getLogger().info("Finished executing PR query: " + qStr);
}
}
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length, true, rq);
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
} catch (QueryInvocationTargetException e) {
// not it's okay
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (QueryException e) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (RegionDestroyedException rde) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
} catch (CancelException cce) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
} finally {
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
}
}
}
};
return (CacheSerializableRunnable) PrRegion;
}
use of org.apache.geode.cache.query.QueryService in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForPROrderByQueryAndCompareResults.
public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndCompareResults(final String regionName, final String localRegion) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
public void run2() throws CacheException {
Cache cache = getCache();
// Querying the localRegion and the PR region
String[] queries = new String[] { "p.status from /REGION_NAME p order by p.status", "* from /REGION_NAME order by status, ID desc", "status, ID from /REGION_NAME order by status", "p.status, p.ID from /REGION_NAME p order by p.status", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId", "key from /REGION_NAME.keys key order by key.status", "key.ID from /REGION_NAME.keys key order by key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID desc", "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID", "* from /REGION_NAME p order by p.status, p.ID", "p.ID from /REGION_NAME p, p.positions.values order by p.ID", "* from /REGION_NAME p, p.positions.values order by p.ID", "p.ID, p.status from /REGION_NAME p, p.positions.values order by p.status", "pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId", "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId", "* from /REGION_NAME p order by p.iD", "p.iD from /REGION_NAME p order by p.iD", "p.iD, p.status from /REGION_NAME p order by p.iD", "iD, status from /REGION_NAME order by iD", "* from /REGION_NAME p order by p.getID()", "p.getID() from /REGION_NAME p order by p.getID()", "* from /REGION_NAME p order by p.names[1]", "* from /REGION_NAME p order by p.getP1().secId", "* from /REGION_NAME p order by p.getP1().getSecId()", "* from /REGION_NAME p order by p.position1.secId", "p.ID, p.position1.secId from /REGION_NAME p order by p.position1.secId", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId", "e.key.ID from /REGION_NAME.entries e order by e.key.ID", "e.key.ID, e.value.status from /REGION_NAME.entries e order by e.key.ID", "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key, e.value from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key from /REGION_NAME.entrySet e order by e.key.ID, e.key.pkid desc", "p, pos from /REGION_NAME p, p.positions.values pos order by p.ID", "p, pos from /REGION_NAME p, p.positions.values pos order by pos.secId", "p, pos from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId" };
Object[][] r = new Object[queries.length][2];
Region local = cache.getRegion(localRegion);
Region region = cache.getRegion(regionName);
assertNotNull(region);
final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
}
String distinct = "SELECT DISTINCT ";
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
String qStr = null;
synchronized (region) {
// Execute on local region.
qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
r[j][0] = qs.newQuery(qStr).execute();
// Execute on remote region.
qStr = (distinct + queries[j].replace("REGION_NAME", regionName));
r[j][1] = qs.newQuery(qStr).execute();
}
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length, queries);
} catch (QueryInvocationTargetException e) {
// not it's okay
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (QueryException e) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (RegionDestroyedException rde) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
} catch (CancelException cce) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
} finally {
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
}
}
}
};
return (CacheSerializableRunnable) PrRegion;
}
Aggregations