use of org.apache.geode.cache.query.data.Position in project geode by apache.
the class CopyOnReadIndexDUnitTest method helpTestPRQueryOnLocalNode.
// The tests sets up a partition region across 2 servers
// It does puts in each server, checking instance counts of portfolio objects
// Querying the data will result in deserialization of portfolio objects.
// In cases where index is present, the objects will be deserialized in the cache
public void helpTestPRQueryOnLocalNode(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
final int numPortfoliosPerVM = numPortfolios / 2;
resetInstanceCount(vm0);
resetInstanceCount(vm1);
createPartitionRegion(vm0, "portfolios");
createPartitionRegion(vm1, "portfolios");
if (hasIndex) {
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
}
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = 0; i < numPortfoliosPerVM; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = numPortfoliosPerVM; i < numPortfolios; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
// PR indexes are created across nodes unlike Replicated Region Indexes
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
if (index == null) {
QueryTestUtils utils = new QueryTestUtils();
index = utils.createIndex("idIndex", "p.ID", "/portfolios p");
}
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
Iterator it = results.iterator();
assertEquals("Failed:" + queryString, numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
p.status = "discardStatus";
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// Since we have deserialized and cached these values, we just need to add the number of
// results we did a copy of due to copy on read
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to the query we deserialized the number of entries this vm currently hosts
// We had to deserialized the results from the other data nodes when we iterated through
// the results as well as our own
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
if (hasIndex) {
// After vm0 executed the query, we already had the values deserialized in our cache
// So it's the same total as before
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
} else {
// After vm0 executed the query, we had to deserialize the values in our vm
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
// Because we have no index, we have to again deserialize all the values that this vm is
// hosting
Wait.waitForCriterion(verifyPortfolioCount((int) (((PartitionedRegion) region).getLocalSize() + ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
}
return null;
}
});
destroyRegion("portfolio", vm0);
}
use of org.apache.geode.cache.query.data.Position in project geode by apache.
the class CopyOnReadIndexJUnitTest method createData.
/**
*
* @param region
* @param numObjects
* @param objectsAndResultsMultiplier number of similar objects to put into the cache so that
* results from queries will be satisfied by the multiple
*/
private void createData(Region region, int numObjects, int objectsAndResultsMultiplier) {
for (int i = 0; i < numObjects; i++) {
for (int j = 0; j < objectsAndResultsMultiplier; j++) {
int regionKey = i * objectsAndResultsMultiplier + j;
Portfolio p = new Portfolio(regionKey);
p.indexKey = i;
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + 1, new Position("" + i, i));
region.put("key-" + regionKey, p);
}
}
}
use of org.apache.geode.cache.query.data.Position in project geode by apache.
the class CompactRangeIndexIndexMapJUnitTest method createPortfolios.
private void createPortfolios(Region region, int num) {
for (int i = 0; i < num; i++) {
Portfolio p = new Portfolio(i);
p.positions = new HashMap();
p.positions.put("VMW", new Position("VMW", Position.cnt * 1000));
p.positions.put("IBM", new Position("IBM", Position.cnt * 1000));
p.positions.put("VMW_2", new Position("VMW", Position.cnt * 1000));
region.put("" + i, p);
}
}
use of org.apache.geode.cache.query.data.Position in project geode by apache.
the class QueryDataInconsistencyDUnitTest method testRangeIndex.
@Test
public void testRangeIndex() {
// Create caches
Properties props = new Properties();
server.invoke(() -> PRClientServerTestBase.createCacheInVm(props));
server.invoke(new CacheSerializableRunnable("create indexes") {
@Override
public void run2() throws CacheException {
cache = CacheFactory.getAnyInstance();
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(repRegionName);
IndexManager.testHook = null;
// Create common Portfolios and NewPortfolios
Position.cnt = 0;
for (int j = cnt; j < cntDest; j++) {
Portfolio p = new Portfolio(j);
CacheFactory.getAnyInstance().getLogger().fine("Shobhit: portfolio " + j + " : " + p);
region.put(new Integer(j), p);
}
QueryService qs = CacheFactory.getAnyInstance().getQueryService();
try {
Index index = qs.createIndex("posIndex", "pos.secId", "/" + repRegionName + " p, p.positions.values pos");
assertEquals(12, index.getStatistics().getNumberOfKeys());
} catch (Exception e) {
fail("Index creation failed");
}
}
});
// Invoke update from client and stop in updateIndex
// first before updating the RegionEntry and second after updating
// the RegionEntry.
AsyncInvocation putThread = server.invokeAsync(new CacheSerializableRunnable("update a Region Entry") {
@Override
public void run2() throws CacheException {
Region repRegion = CacheFactory.getAnyInstance().getRegion(repRegionName);
IndexManager.testHook = new IndexManagerTestHook();
Portfolio newPort = new Portfolio(cntDest + 1);
CacheFactory.getAnyInstance().getLogger().fine("Shobhit: New Portfolio" + newPort);
repRegion.put(new Integer("1"), newPort);
// above call must be hooked in BEFORE_UPDATE_OP call.
}
});
server.invoke(new CacheSerializableRunnable("query on server") {
@Override
public void run2() throws CacheException {
QueryService qs = CacheFactory.getAnyInstance().getQueryService();
Position pos1 = null;
while (!hooked) {
Wait.pause(100);
}
try {
Object rs = qs.newQuery("<trace> select pos from /" + repRegionName + " p, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
CacheFactory.getAnyInstance().getLogger().fine("Shobhit: " + rs);
assertTrue(rs instanceof SelectResults);
pos1 = (Position) ((SelectResults) rs).iterator().next();
if (!pos1.secId.equals("APPL")) {
fail("Query thread did not verify index results even when RE is under update");
IndexManager.testHook = null;
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail("Query execution failed on server.", e);
IndexManager.testHook = null;
} finally {
// Let client put go further.
hooked = false;
}
while (!hooked) {
Wait.pause(100);
}
try {
Object rs = qs.newQuery("<trace> select pos from /" + repRegionName + " p, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
CacheFactory.getAnyInstance().getLogger().fine("Shobhit: " + rs);
assertTrue(rs instanceof SelectResults);
if (((SelectResults) rs).size() > 0) {
Position pos2 = (Position) ((SelectResults) rs).iterator().next();
if (pos2.equals(pos1)) {
fail("Query thread did not verify index results even when RE is under update and " + "RegionEntry value has been modified before releasing the lock");
}
}
} catch (Exception e) {
e.printStackTrace();
fail("Query execution failed on server.");
} finally {
// Let client put go further.
hooked = false;
IndexManager.testHook = null;
}
}
});
ThreadUtils.join(putThread, 200);
}
use of org.apache.geode.cache.query.data.Position in project geode by apache.
the class QueryDataInconsistencyDUnitTest method testRangeIndexWithIndexAndQueryFromCluaseMisMatch2.
@Test
public void testRangeIndexWithIndexAndQueryFromCluaseMisMatch2() {
// Create caches
Properties props = new Properties();
server.invoke(() -> PRClientServerTestBase.createCacheInVm(props));
server.invoke(new CacheSerializableRunnable("create indexes") {
@Override
public void run2() throws CacheException {
cache = CacheFactory.getAnyInstance();
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create(repRegionName);
IndexManager.testHook = null;
// Create common Portfolios and NewPortfolios
Position.cnt = 0;
for (int j = cnt; j < cntDest; j++) {
region.put(new Integer(j), new Portfolio(j));
}
QueryService qs = CacheFactory.getAnyInstance().getQueryService();
try {
Index index = qs.createIndex("posIndex", "pos.secId", "/" + repRegionName + " p, p.positions.values pos");
assertEquals(12, index.getStatistics().getNumberOfKeys());
} catch (Exception e) {
fail("Index creation failed");
}
}
});
// Invoke update from client and stop in updateIndex
// first before updating the RegionEntry and second after updating
// the RegionEntry.
AsyncInvocation putThread = server.invokeAsync(new CacheSerializableRunnable("update a Region Entry") {
@Override
public void run2() throws CacheException {
Region repRegion = CacheFactory.getAnyInstance().getRegion(repRegionName);
IndexManager.testHook = new IndexManagerTestHook();
// This portfolio with same ID must have different positions.
repRegion.put(new Integer("1"), new Portfolio(1));
// above call must be hooked in BEFORE_UPDATE_OP call.
}
});
server.invoke(new CacheSerializableRunnable("query on server") {
@Override
public void run2() throws CacheException {
QueryService qs = CacheFactory.getAnyInstance().getQueryService();
Position pos1 = null;
while (!hooked) {
Wait.pause(100);
}
try {
Object rs = qs.newQuery("<trace> select pos from /" + repRegionName + " p, p.collectionHolderMap.values coll, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
CacheFactory.getAnyInstance().getLogger().fine("Shobhit: " + rs);
assertTrue(rs instanceof SelectResults);
pos1 = (Position) ((SelectResults) rs).iterator().next();
if (!pos1.secId.equals("APPL")) {
fail("Query thread did not verify index results even when RE is under update");
IndexManager.testHook = null;
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail("Query execution failed on server.", e);
IndexManager.testHook = null;
} finally {
// Let client put go further.
hooked = false;
}
while (!hooked) {
Wait.pause(100);
}
try {
Object rs = qs.newQuery("select pos from /" + repRegionName + " p, p.collectionHolderMap.values coll, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
assertTrue(rs instanceof SelectResults);
if (((SelectResults) rs).size() > 0) {
Position pos2 = (Position) ((SelectResults) rs).iterator().next();
if (pos2.equals(pos1)) {
fail("Query thread did not verify index results even when RE is under update and " + "RegionEntry value has been modified before releasing the lock");
}
}
} catch (Exception e) {
e.printStackTrace();
fail("Query execution failed on server.");
} finally {
IndexManager.testHook = null;
// Let client put go further.
hooked = false;
}
}
});
ThreadUtils.join(putThread, 200);
}
Aggregations