use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class AsynchIndexMaintenanceJUnitTest method testIndexMaintenanceBasedOnThresholdAsZero.
@Test
public void testIndexMaintenanceBasedOnThresholdAsZero() throws Exception {
System.getProperties().put(DistributionConfig.GEMFIRE_PREFIX + "AsynchIndexMaintenanceThreshold", "0");
System.getProperties().put(DistributionConfig.GEMFIRE_PREFIX + "AsynchIndexMaintenanceInterval", "60000");
final Index ri = (Index) qs.createIndex("statusIndex", IndexType.FUNCTIONAL, "p.getID", "/portfolio p");
for (int i = 0; i < 3; ++i) {
region.put("" + (i + 1), new Portfolio(i + 1));
idSet.add((i + 1) + "");
}
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return (getIndexSize(ri) == 3);
}
public String description() {
return "valueToEntries map never became size 3";
}
};
Wait.waitForCriterion(ev, 10 * 1000, 200, true);
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class CompactRangeIndexIndexMapJUnitTest method createPortfolios.
private void createPortfolios(Region region, int num) {
for (int i = 0; i < num; i++) {
Portfolio p = new Portfolio(i);
p.positions = new HashMap();
p.positions.put("VMW", new Position("VMW", Position.cnt * 1000));
p.positions.put("IBM", new Position("IBM", Position.cnt * 1000));
p.positions.put("VMW_2", new Position("VMW", Position.cnt * 1000));
region.put("" + i, p);
}
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class CompactRangeIndexJUnitTest method testNullMapKeyCompactRangeIndexCreateIndexLater.
/**
* Tests adding entries to compact range index where the the key of an indexed map field is null.
*/
@Test
public void testNullMapKeyCompactRangeIndexCreateIndexLater() throws Exception {
Region region = utils.getCache().getRegion("exampleRegion");
// create objects
int numObjects = 10;
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
p.status = null;
p.getPositions().put(null, "something");
region.put("KEY-" + i, p);
}
index = utils.createIndex("indexName", "positions[*]", "/exampleRegion");
// execute query and check result size
QueryService qs = utils.getCache().getQueryService();
SelectResults results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.position[null] = something").execute();
assertEquals("Null matched Results expected", numObjects, results.size());
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class CompactRangeIndexJUnitTest method testInvalidTokens.
@Test
public void testInvalidTokens() throws Exception {
final Region r = utils.getCache().getRegion("/exampleRegion");
r.put("0", new Portfolio(0));
r.invalidate("0");
index = utils.createIndex("compact range index", "p.status", "/exampleRegion p");
QueryService qs = utils.getCache().getQueryService();
SelectResults results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status='active'").execute();
// the remove should have happened
assertEquals(0, results.size());
results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status!='inactive'").execute();
assertEquals(0, results.size());
CompactRangeIndex cindex = (CompactRangeIndex) index;
MemoryIndexStore indexStore = (MemoryIndexStore) cindex.getIndexStorage();
CloseableIterator iterator = indexStore.get(QueryService.UNDEFINED);
int count = 0;
while (iterator.hasNext()) {
count++;
iterator.next();
}
assertEquals("incorrect number of entries in collection", 0, count);
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class PRQueryDUnitTest method testQueryResultsFromMembers.
@Test
public void testQueryResultsFromMembers() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 10;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
int numEntries = 100;
for (int i = 1; i <= numEntries; i++) {
pr.put(new Integer(i), new Portfolio(i));
}
int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
try {
for (int q = 0; q < queries.length; q++) {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < totalBuckets; i++) {
buckets.add(new Integer(i));
}
final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public HashMap resultsPerMember = new HashMap();
public void hook(int spot) throws RuntimeException {
int size = 0;
if (spot == 3) {
for (Object mr : qe.getResultsPerMember().entrySet()) {
Map.Entry e = (Map.Entry) mr;
Collection<Collection> results = (Collection<Collection>) e.getValue();
for (Collection<Object> r : results) {
if (this.resultsPerMember.containsKey(e.getKey())) {
this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
} else {
this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
}
}
}
}
}
}
;
final MyTestHook th = new MyTestHook();
qe.queryBuckets(th);
for (Object r : th.resultsPerMember.entrySet()) {
Map.Entry e = (Map.Entry) r;
Integer res = (Integer) e.getValue();
LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
}
}
} finally {
getCache().close();
}
}
Aggregations