use of org.apache.geode.cache.query.QueryTestUtils in project geode by apache.
the class CopyOnReadIndexDUnitTest method helpTestPRQueryOnLocalNode.
// The tests sets up a partition region across 2 servers
// It does puts in each server, checking instance counts of portfolio objects
// Querying the data will result in deserialization of portfolio objects.
// In cases where index is present, the objects will be deserialized in the cache
public void helpTestPRQueryOnLocalNode(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
final int numPortfoliosPerVM = numPortfolios / 2;
resetInstanceCount(vm0);
resetInstanceCount(vm1);
createPartitionRegion(vm0, "portfolios");
createPartitionRegion(vm1, "portfolios");
if (hasIndex) {
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
}
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = 0; i < numPortfoliosPerVM; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = numPortfoliosPerVM; i < numPortfolios; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
// PR indexes are created across nodes unlike Replicated Region Indexes
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
if (index == null) {
QueryTestUtils utils = new QueryTestUtils();
index = utils.createIndex("idIndex", "p.ID", "/portfolios p");
}
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
Iterator it = results.iterator();
assertEquals("Failed:" + queryString, numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
p.status = "discardStatus";
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// Since we have deserialized and cached these values, we just need to add the number of
// results we did a copy of due to copy on read
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to the query we deserialized the number of entries this vm currently hosts
// We had to deserialized the results from the other data nodes when we iterated through
// the results as well as our own
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
if (hasIndex) {
// After vm0 executed the query, we already had the values deserialized in our cache
// So it's the same total as before
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
} else {
// After vm0 executed the query, we had to deserialize the values in our vm
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
// Because we have no index, we have to again deserialize all the values that this vm is
// hosting
Wait.waitForCriterion(verifyPortfolioCount((int) (((PartitionedRegion) region).getLocalSize() + ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
}
return null;
}
});
destroyRegion("portfolio", vm0);
}
use of org.apache.geode.cache.query.QueryTestUtils in project geode by apache.
the class CompactRangeIndexJUnitTest method setUp.
@Before
public void setUp() {
System.setProperty("index_elemarray_threshold", "3");
utils = new QueryTestUtils();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
utils.initializeQueryMap();
utils.createCache(props);
utils.createReplicateRegion("exampleRegion");
}
use of org.apache.geode.cache.query.QueryTestUtils in project geode by apache.
the class CopyOnReadIndexDUnitTest method testTransactionsOnReplicatedRegion.
// tests different queries with a transaction for replicated region
@Test
public void testTransactionsOnReplicatedRegion() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
configureServers();
helpTestTransactionsOnReplicatedRegion(utils.queries.get("545"), 100, 100, true);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("546"), 100, 100, true);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("543"), 100, 100, true);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("544"), 100, 100, true);
helpTestTransactionsOnReplicatedRegion("select * from /portfolios p where p.ID = 1", 100, 1, true);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("545"), 100, 100, false);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("546"), 100, 100, false);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("543"), 100, 100, false);
helpTestTransactionsOnReplicatedRegion(utils.queries.get("544"), 100, 100, false);
helpTestTransactionsOnReplicatedRegion("select * from /portfolios p where p.ID = 1", 100, 1, false);
}
use of org.apache.geode.cache.query.QueryTestUtils in project geode by apache.
the class CopyOnReadIndexDUnitTest method helpTestTransactionsOnReplicatedRegion.
public void helpTestTransactionsOnReplicatedRegion(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
resetInstanceCount(vm0);
resetInstanceCount(vm1);
resetInstanceCount(vm2);
createReplicatedRegion(vm0, "portfolios");
createReplicatedRegion(vm1, "portfolios");
createReplicatedRegion(vm2, "portfolios");
// counts
if (hasIndex) {
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createHashIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
// let's not create index on vm1 to check different scenarios
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createHashIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
}
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = 0; i < numPortfolios; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
// We should have the same number of portfolio objects that we created for the put
Wait.waitForCriterion(verifyPortfolioCount(numPortfolios), 5000, 200, true);
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
// At this point, we should only have serialized values in this vm
Region region = getCache().getRegion("/portfolios");
Wait.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
// There is an index for vm2, so we should have deserialized values at this point,
Region region = getCache().getRegion("/portfolios");
if (hasIndex) {
Wait.waitForCriterion(verifyPortfolioCount(numPortfolios), 0, 200, true);
} else {
Wait.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
}
return null;
}
});
// start transaction
// execute query
// modify results
// check instance count
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
CacheTransactionManager txManager = region.getCache().getCacheTransactionManager();
try {
txManager.begin();
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
p.status = "discardStatus";
}
}
txManager.commit();
} catch (CommitConflictException conflict) {
Assert.fail("commit conflict exception", conflict);
}
// We have created puts from our previous callable
// Now we have copied the results from the query
Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
return null;
}
});
// Check objects in cache on vm1
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
p.status = "discardStatus";
}
}
// first it must deserialize the portfolios in the replicated region
// then we do a copy on read of these deserialized objects for the final result set
Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
// we never created index on vm1
// so in this case, we always have to deserialize the value from the region
Wait.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);
return null;
}
});
// Check objects in cache on vm2
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
p.status = "discardStatus";
}
}
// with or without index, the values had to have been deserialized at one point
Wait.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults), 0, 200, true);
results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
if (hasIndex) {
// we have an index, so the values are already deserialized
// total is now our original deserialization amount : numPortfolios
// two query results copied.
Wait.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults * 2), 0, 200, true);
} else {
// we never created index on vm1
// so in this case, we always have to deserialize the value from the region
Wait.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);
}
return null;
}
});
// Check objects in cache on vm0
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
// with or without index, the values we put in the region were already deserialized values
Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults * 2 + numPortfolios), 0, 200, true);
return null;
}
});
destroyRegion("portfolio", vm0);
}
use of org.apache.geode.cache.query.QueryTestUtils in project geode by apache.
the class CopyOnReadIndexDUnitTest method testPRQueryOnLocalNode.
// test different queries against partitioned region
@Test
public void testPRQueryOnLocalNode() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
configureServers();
helpTestPRQueryOnLocalNode(utils.queries.get("545"), 100, 100, true);
helpTestPRQueryOnLocalNode(utils.queries.get("546"), 100, 100, true);
helpTestPRQueryOnLocalNode(utils.queries.get("543"), 100, 100, true);
helpTestPRQueryOnLocalNode(utils.queries.get("544"), 100, 100, true);
helpTestPRQueryOnLocalNode("select * from /portfolios p where p.ID = 1", 100, 1, true);
helpTestPRQueryOnLocalNode(utils.queries.get("545"), 100, 100, false);
helpTestPRQueryOnLocalNode(utils.queries.get("546"), 100, 100, false);
helpTestPRQueryOnLocalNode(utils.queries.get("543"), 100, 100, false);
helpTestPRQueryOnLocalNode(utils.queries.get("544"), 100, 100, false);
helpTestPRQueryOnLocalNode("select * from /portfolios p where p.ID = 1", 100, 1, false);
}
Aggregations