use of org.apache.geode.cache.query.functional.StructSetOrResultsSet in project geode by apache.
the class QueryIndexDUnitTest method createIndexAndUpdatesOnOverflowRegionsAndValidateResults.
@Test
public void createIndexAndUpdatesOnOverflowRegionsAndValidateResults() throws Exception {
Host host = Host.getHost(0);
VM[] vms = new VM[] { host.getVM(0), host.getVM(1) };
// Create and load regions on all vms.
for (int i = 0; i < vms.length; i++) {
int finalI = i;
vms[i].invoke(() -> QueryIndexDUnitTest.createAndLoadOverFlowRegions("testOfValid3" + "vm" + finalI, new Boolean(true), new Boolean(true)));
}
vms[0].invoke(new CacheSerializableRunnable("Execute query validate results") {
public void run2() throws CacheException {
Cache cache = basicGetCache();
String[] regionNames = new String[] { "replicateOverFlowRegion", "replicatePersistentOverFlowRegion", "prOverFlowRegion", "prPersistentOverFlowRegion" };
QueryService qs = cache.getQueryService();
Region region = null;
String[] qString = new String[] { "SELECT * FROM /REGION_NAME pf WHERE pf.ID = 1", "SELECT ID FROM /REGION_NAME pf WHERE pf.ID = 1", "SELECT * FROM /REGION_NAME pf WHERE pf.ID > 5", "SELECT ID FROM /REGION_NAME pf WHERE pf.ID > 5", "SELECT * FROM /REGION_NAME.keys key WHERE key.ID = 1", "SELECT ID FROM /REGION_NAME.keys key WHERE key.ID = 1", "SELECT * FROM /REGION_NAME.keys key WHERE key.ID > 5", "SELECT ID FROM /REGION_NAME.keys key WHERE key.ID > 5" };
// Execute Query without index.
SelectResults[] srWithoutIndex = new SelectResults[qString.length * regionNames.length];
String[] queryString = new String[qString.length * regionNames.length];
int r = 0;
try {
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
queryString[r] = queryStr;
srWithoutIndex[r] = (SelectResults) query.execute();
r++;
}
}
} catch (Exception ex) {
logger.info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Create index.
try {
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
String indexName = "idIndex" + regionNames[i];
cache.getLogger().fine("createIndexOnOverFlowRegions() checking for index: " + indexName);
try {
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i1 = qs.createIndex(indexName, "pf.ID", "/" + regionNames[i] + " pf");
}
indexName = "keyIdIndex" + regionNames[i];
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i2 = qs.createIndex(indexName, "key.ID", "/" + regionNames[i] + ".keys key");
}
} catch (IndexNameConflictException ice) {
// Ignore. The pr may have created the index through
// remote index create message from peer.
}
}
} catch (Exception ex) {
logger.info("Failed to create index", ex);
fail("Failed to create index.");
}
int numObjects = 50;
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
for (int cnt = 0; cnt < numObjects; cnt++) {
region.put(new Portfolio(cnt), new Portfolio(cnt));
}
}
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
String indexName = "idIndex" + regionNames[i];
Index i1, i2;
if ((i1 = qs.getIndex(region, indexName)) != null) {
assertEquals("Unexpected number of keys in the index ", numObjects, i1.getStatistics().getNumberOfKeys());
assertEquals("Unexpected number of values in the index ", numObjects, i1.getStatistics().getNumberOfValues());
}
indexName = "keyIdIndex" + regionNames[i];
if ((i2 = qs.getIndex(region, indexName)) != null) {
assertEquals("Unexpected number of keys in the index ", numObjects, i2.getStatistics().getNumberOfKeys());
assertEquals("Unexpected number of values in the index ", numObjects, i2.getStatistics().getNumberOfValues());
}
}
// Execute Query with index.
SelectResults[] srWithIndex = new SelectResults[qString.length * regionNames.length];
try {
r = 0;
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
QueryObserverImpl observer = new QueryObserverImpl();
QueryObserverHolder.setInstance(observer);
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
srWithIndex[r++] = (SelectResults) query.execute();
if (!observer.isIndexesUsed) {
fail("Index not used for query. " + queryStr);
}
}
}
} catch (Exception ex) {
logger.info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Compare results with and without index.
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
SelectResults[][] sr = new SelectResults[1][2];
for (int i = 0; i < srWithIndex.length; i++) {
sr[0][0] = srWithoutIndex[i];
sr[0][1] = srWithIndex[i];
logger.info("Comparing the result for the query : " + queryString[i] + " Index in ResultSet is: " + i);
ssORrs.CompareQueryResultsWithoutAndWithIndexes(sr, 1, queryString);
}
}
});
}
use of org.apache.geode.cache.query.functional.StructSetOrResultsSet in project geode by apache.
the class QueryIndexDUnitTest method createIndexOnOverflowRegionsAndValidateResults.
@Test
public void createIndexOnOverflowRegionsAndValidateResults() throws Exception {
Host host = Host.getHost(0);
VM[] vms = new VM[] { host.getVM(0), host.getVM(1) };
// Create and load regions on all vms.
for (int i = 0; i < vms.length; i++) {
int finalI = i;
vms[i].invoke(() -> QueryIndexDUnitTest.createAndLoadOverFlowRegions("testOfValid" + "vm" + finalI, new Boolean(true), new Boolean(false)));
}
vms[0].invoke(new CacheSerializableRunnable("Execute query validate results") {
public void run2() throws CacheException {
Cache cache = basicGetCache();
String[] regionNames = new String[] { "replicateOverFlowRegion", "replicatePersistentOverFlowRegion", "prOverFlowRegion", "prPersistentOverFlowRegion" };
QueryService qs = cache.getQueryService();
Region region = null;
int numObjects = 10;
// The index should get updated accordingly.
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
for (int cnt = 1; cnt < numObjects; cnt++) {
region.put(new Portfolio(cnt), new Portfolio(cnt));
}
}
String[] qString = new String[] { "SELECT * FROM /REGION_NAME pf WHERE pf.ID = 1", "SELECT ID FROM /REGION_NAME pf WHERE pf.ID = 1", "SELECT * FROM /REGION_NAME pf WHERE pf.ID > 5", "SELECT ID FROM /REGION_NAME pf WHERE pf.ID > 5", "SELECT * FROM /REGION_NAME.keys key WHERE key.ID = 1", "SELECT ID FROM /REGION_NAME.keys key WHERE key.ID = 1", "SELECT * FROM /REGION_NAME.keys key WHERE key.ID > 5", "SELECT ID FROM /REGION_NAME.keys key WHERE key.ID > 5" };
// Execute Query without index.
SelectResults[] srWithoutIndex = new SelectResults[qString.length * regionNames.length];
String[] queryString = new String[qString.length * regionNames.length];
int r = 0;
try {
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
queryString[r] = queryStr;
srWithoutIndex[r] = (SelectResults) query.execute();
r++;
}
}
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Create index.
try {
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
String indexName = "idIndex" + regionNames[i];
cache.getLogger().fine("createIndexOnOverFlowRegions() checking for index: " + indexName);
try {
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i1 = qs.createIndex(indexName, "pf.ID", "/" + regionNames[i] + " pf");
}
indexName = "keyIdIndex" + regionNames[i];
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i2 = qs.createIndex(indexName, "key.ID", "/" + regionNames[i] + ".keys key");
}
} catch (IndexNameConflictException ice) {
// Ignore. The pr may have created the index through
// remote index create message from peer.
}
}
} catch (Exception ex) {
logger.info("Failed to create index", ex);
fail("Failed to create index.");
}
// Execute Query with index.
SelectResults[] srWithIndex = new SelectResults[qString.length * regionNames.length];
try {
r = 0;
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
QueryObserverImpl observer = new QueryObserverImpl();
QueryObserverHolder.setInstance(observer);
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
srWithIndex[r++] = (SelectResults) query.execute();
if (!observer.isIndexesUsed) {
fail("Index not used for query. " + queryStr);
}
}
}
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Compare results with and without index.
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
SelectResults[][] sr = new SelectResults[1][2];
for (int i = 0; i < srWithIndex.length; i++) {
sr[0][0] = srWithoutIndex[i];
sr[0][1] = srWithIndex[i];
logger.info("Comparing the result for the query : " + queryString[i] + " Index in ResultSet is: " + i);
ssORrs.CompareQueryResultsWithoutAndWithIndexes(sr, 1, queryString);
}
// The index should get updated accordingly.
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
for (int cnt = 1; cnt < numObjects; cnt++) {
if (cnt % 2 == 0) {
region.destroy(new Portfolio(cnt));
}
}
for (int cnt = 10; cnt < numObjects; cnt++) {
if (cnt % 2 == 0) {
region.put(new Portfolio(cnt), new Portfolio(cnt));
}
}
}
// Execute Query with index.
srWithIndex = new SelectResults[qString.length * regionNames.length];
try {
r = 0;
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
QueryObserverImpl observer = new QueryObserverImpl();
QueryObserverHolder.setInstance(observer);
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
srWithIndex[r++] = (SelectResults) query.execute();
if (!observer.isIndexesUsed) {
fail("Index not used for query. " + queryStr);
}
}
}
} catch (Exception ex) {
logger.info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
}
});
}
use of org.apache.geode.cache.query.functional.StructSetOrResultsSet in project geode by apache.
the class PdxLocalQueryDUnitTest method testLocalPdxQueries.
@Test
public void testLocalPdxQueries() throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(1);
final VM client = host.getVM(2);
final int numberOfEntries = 10;
final String name = "/" + regionName;
final String name2 = "/" + regionName2;
final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
"select distinct * from " + name + " p where p.status = 'inactive'", // 1
"select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
"select p from " + name + " p where p.status = 'inactive'", // 4
"select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
"select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
"select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
"select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
"select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
"select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
"select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
"select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
"select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
"select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
" p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
" p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
"select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
"select * from " + name + ".values v where v.status = 'inactive'", // 19
"select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
" p where p.status = 'inactive')", // 200
"select * from " + name + " r1, " + name2 + " r2 where r1.status = r2.status", "select * from " + name + " r1, " + name2 + // 100
" r2 where r1.status = r2.status and r1.status = 'active'", "select r2.status from " + name + " r1, " + name2 + // 100
" r2 where r1.status = r2.status and r1.status = 'active'", "select distinct r2.status from " + name + " r1, " + name2 + // 1
" r2 where r1.status = r2.status and r1.status = 'active'", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
" p where p.status = 'inactive')" };
final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, 200, 100, 100, 1, numberOfEntries };
// Start server1
final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
Region r2 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName2);
for (int i = 0; i < numberOfEntries; i++) {
PortfolioPdx p = new PortfolioPdx(i);
r1.put("key-" + i, p);
r2.put("key-" + i, p);
}
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// client loads pdx objects on server
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
ClientCache cache = getClientCache(cf);
Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
Region region2 = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName2);
for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
PortfolioPdx p = new PortfolioPdx(i);
region.put("key-" + i, p);
region2.put("key-" + i, p);
}
return null;
}
});
// query locally on server1 to verify pdx objects are not deserialized
server1.invoke(new SerializableCallable("query locally on server1") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
QueryService qs = null;
SelectResults sr = null;
// Execute query locally
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
int extra = 0;
if (cache.getLogger().fineEnabled()) {
extra = 20;
}
assertEquals(numberOfEntries * 6 + 1 + extra, PortfolioPdx.numInstance);
// set readserealized and query
((GemFireCacheImpl) getCache()).setReadSerialized(true);
PdxInstanceFactory out = PdxInstanceFactoryImpl.newCreator("org.apache.geode.cache.query.data.PositionPdx", false);
out.writeLong("avg20DaysVol", 0);
out.writeString("bondRating", "");
out.writeDouble("convRatio", 0);
out.writeString("country", "");
out.writeDouble("delta", 0);
out.writeLong("industry", 0);
out.writeLong("issuer", 0);
out.writeDouble("mktValue", pos.getMktValue());
out.writeDouble("qty", 0);
out.writeString("secId", pos.secId);
out.writeString("secIdIndexed", pos.secIdIndexed);
out.writeString("secLinks", "");
out.writeDouble("sharesOutstanding", pos.getSharesOutstanding());
out.writeString("underlyer", "");
out.writeLong("volatility", 0);
out.writeInt("pid", pos.getPid());
out.writeInt("portfolioId", 0);
// Identity Field.
out.markIdentityField("secId");
PdxInstance pi = out.create();
PdxInstanceEnum pdxEnum = new PdxInstanceEnum(pDay);
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pi });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pdxEnum });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
// in case of PortfolioPdx
if (queries[i].indexOf("distinct") == -1) {
if (i == 0 || i == 1) {
assertEquals("Expected and actual results do not match for query: " + queries[i], 1, sr.size());
} else {
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
// reset readserealized and query
((GemFireCacheImpl) getCache()).setReadSerialized(false);
return null;
}
});
// query from client
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
ClientCache cache = getClientCache(cf);
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = cache.getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
for (Object result : sr) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
return null;
}
});
// query locally on server1
server1.invoke(new SerializableCallable("query locally on server1") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
QueryService qs = null;
SelectResults[][] sr = new SelectResults[queries.length][2];
// Execute query locally
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
int cnt = PositionPdx.cnt;
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
for (Object result : sr[i][0]) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
// create index
qs.createIndex("statusIndex", "status", name);
qs.createIndex("IDIndex", "ID", name);
qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
for (Object result : sr[i][1]) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
return null;
}
});
this.closeClient(client);
this.closeClient(server1);
}
use of org.apache.geode.cache.query.functional.StructSetOrResultsSet in project geode by apache.
the class QueryIndexDUnitTest method createIndexOnOverflowRegionsAndValidateResultsUsingParams.
@Test
public void createIndexOnOverflowRegionsAndValidateResultsUsingParams() throws Exception {
Host host = Host.getHost(0);
VM[] vms = new VM[] { host.getVM(0), host.getVM(1) };
// Create and load regions on all vms.
for (int i = 0; i < vms.length; i++) {
int finalI = i;
vms[i].invoke(() -> QueryIndexDUnitTest.createAndLoadOverFlowRegions("testOfValidUseParams" + "vm" + finalI, new Boolean(true), new Boolean(false)));
}
vms[0].invoke(new CacheSerializableRunnable("Execute query validate results") {
public void run2() throws CacheException {
Cache cache = basicGetCache();
String[] regionNames = new String[] { "replicateOverFlowRegion", "replicatePersistentOverFlowRegion", "prOverFlowRegion", "prPersistentOverFlowRegion" };
QueryService qs = cache.getQueryService();
Region region = null;
int numObjects = 10;
// The index should get updated accordingly.
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
for (int cnt = 1; cnt < numObjects; cnt++) {
region.put(new Portfolio(cnt), new Integer(cnt + 100));
}
}
String[] qString = new String[] { "SELECT * FROM /REGION_NAME pf WHERE pf = $1", "SELECT * FROM /REGION_NAME pf WHERE pf > $1", "SELECT * FROM /REGION_NAME.values pf WHERE pf < $1", "SELECT * FROM /REGION_NAME.keys k WHERE k.ID = $1", "SELECT key.ID FROM /REGION_NAME.keys key WHERE key.ID = $1", "SELECT ID, status FROM /REGION_NAME.keys WHERE ID = $1", "SELECT k.ID, k.status FROM /REGION_NAME.keys k WHERE k.ID = $1 and k.status = $2", "SELECT * FROM /REGION_NAME.keys key WHERE key.ID > $1", "SELECT key.ID FROM /REGION_NAME.keys key WHERE key.ID > $1 and key.status = $2" };
// Execute Query without index.
SelectResults[] srWithoutIndex = new SelectResults[qString.length * regionNames.length];
String[] queryString = new String[qString.length * regionNames.length];
int r = 0;
try {
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
queryString[r] = queryStr;
srWithoutIndex[r] = (SelectResults) query.execute(new Object[] { new Integer(5), "active" });
r++;
}
}
} catch (Exception ex) {
logger.info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Create index.
String indexName = "";
try {
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
indexName = "idIndex" + regionNames[i];
cache.getLogger().fine("createIndexOnOverFlowRegions() checking for index: " + indexName);
try {
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i1 = qs.createIndex(indexName, "pf", "/" + regionNames[i] + " pf");
}
indexName = "valueIndex" + regionNames[i];
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i1 = qs.createIndex(indexName, "pf", "/" + regionNames[i] + ".values pf");
}
indexName = "keyIdIndex" + regionNames[i];
if (qs.getIndex(region, indexName) == null) {
cache.getLogger().fine("createIndexOnOverFlowRegions() Index doesn't exist, creating index: " + indexName);
Index i2 = qs.createIndex(indexName, "key.ID", "/" + regionNames[i] + ".keys key");
}
} catch (IndexNameConflictException ice) {
// Ignore. The pr may have created the index through
// remote index create message from peer.
}
}
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to create index", ex);
fail("Failed to create index." + indexName);
}
// Execute Query with index.
SelectResults[] srWithIndex = new SelectResults[qString.length * regionNames.length];
try {
r = 0;
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
QueryObserverImpl observer = new QueryObserverImpl();
QueryObserverHolder.setInstance(observer);
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
srWithIndex[r++] = (SelectResults) query.execute(new Object[] { new Integer(5), "active" });
if (!observer.isIndexesUsed) {
fail("Index not used for query. " + queryStr);
}
}
}
} catch (Exception ex) {
logger.info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Compare results with and without index.
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
SelectResults[][] sr = new SelectResults[1][2];
for (int i = 0; i < srWithIndex.length; i++) {
sr[0][0] = srWithoutIndex[i];
sr[0][1] = srWithIndex[i];
logger.info("Comparing the result for the query : " + queryString[i] + " Index in ResultSet is: " + i);
ssORrs.CompareQueryResultsWithoutAndWithIndexes(sr, 1, queryString);
}
// The index should get updated accordingly.
for (int i = 0; i < regionNames.length; i++) {
region = cache.getRegion(regionNames[i]);
for (int cnt = 1; cnt < numObjects; cnt++) {
if (cnt % 2 == 0) {
region.destroy(new Portfolio(cnt));
}
}
// Add destroyed entries
for (int cnt = 1; cnt < numObjects; cnt++) {
if (cnt % 2 == 0) {
region.put(new Portfolio(cnt), new Integer(cnt + 100));
}
}
}
// Execute Query with index.
srWithIndex = new SelectResults[qString.length * regionNames.length];
try {
r = 0;
for (int q = 0; q < qString.length; q++) {
for (int i = 0; i < regionNames.length; i++) {
QueryObserverImpl observer = new QueryObserverImpl();
QueryObserverHolder.setInstance(observer);
String queryStr = qString[q].replace("REGION_NAME", regionNames[i]);
Query query = qs.newQuery(queryStr);
srWithIndex[r++] = (SelectResults) query.execute(new Object[] { new Integer(5), "active" });
if (!observer.isIndexesUsed) {
fail("Index not used for query. " + queryStr);
}
}
}
} catch (Exception ex) {
logger.info("Failed to Execute query", ex);
fail("Failed to Execute query.");
}
// Compare results with and without index.
for (int i = 0; i < srWithIndex.length; i++) {
sr[0][0] = srWithoutIndex[i];
sr[0][1] = srWithIndex[i];
logger.info("Comparing the result for the query : " + queryString[i] + " Index in ResultSet is: " + i);
ssORrs.CompareQueryResultsWithoutAndWithIndexes(sr, 1, queryString);
}
}
});
}
use of org.apache.geode.cache.query.functional.StructSetOrResultsSet in project geode by apache.
the class IndexMaintainceJUnitTest method test008RangeAndCompactRangeIndex.
/**
* Test to compare range and compact index. They should return the same results.
*/
@Test
public void test008RangeAndCompactRangeIndex() {
try {
// CacheUtils.restartCache();
if (!IndexMaintainceJUnitTest.isInitDone) {
init();
}
qs.removeIndexes();
String[] queryStr = new String[] { "Select status from /portfolio pf where status='active'", "Select pf.ID from /portfolio pf where pf.ID > 2 and pf.ID < 100", "Select * from /portfolio pf where pf.position1.secId > '2'" };
String[] queryFields = new String[] { "status", "ID", "position1.secId" };
for (int i = 0; i < queryStr.length; i++) {
// Clear indexes if any.
qs.removeIndexes();
// initialize region.
region.clear();
for (int k = 0; k < 10; k++) {
region.put("" + k, new Portfolio(k));
}
for (int j = 0; j < 1; j++) {
// Update Region.
for (int k = 0; k < (j * 100); k++) {
region.put("" + k, new Portfolio(k));
}
// Create compact index.
IndexManager.TEST_RANGEINDEX_ONLY = false;
index = (IndexProtocol) qs.createIndex(queryFields[i] + "Index", IndexType.FUNCTIONAL, queryFields[i], "/portfolio");
// Execute Query.
SelectResults[][] rs = new SelectResults[1][2];
Query query = qs.newQuery(queryStr[i]);
rs[0][0] = (SelectResults) query.execute();
// remove compact index.
qs.removeIndexes();
// Create Range Index.
IndexManager.TEST_RANGEINDEX_ONLY = true;
index = (IndexProtocol) qs.createIndex(queryFields[i] + "rIndex", IndexType.FUNCTIONAL, queryFields[i], "/portfolio");
query = qs.newQuery(queryStr[i]);
rs[0][1] = (SelectResults) query.execute();
CacheUtils.log("#### rs1 size is : " + (rs[0][0]).size() + " rs2 size is : " + (rs[0][1]).size());
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
ssORrs.CompareQueryResultsWithoutAndWithIndexes(rs, 1, queryStr);
}
}
} catch (Exception e) {
e.printStackTrace();
fail("Test failed due to exception=" + e);
} finally {
IndexManager.TEST_RANGEINDEX_ONLY = false;
IndexMaintainceJUnitTest.isInitDone = false;
CacheUtils.restartCache();
}
}
Aggregations