use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.
the class PdxCopyOnReadQueryJUnitTest method testCopyOnReadPdxSerialization.
@Test
public void testCopyOnReadPdxSerialization() throws Exception {
List<String> classes = new ArrayList<String>();
classes.add(PortfolioPdx.class.getCanonicalName());
ReflectionBasedAutoSerializer serializer = new ReflectionBasedAutoSerializer(classes.toArray(new String[0]));
CacheFactory cf = new CacheFactory();
cf.setPdxSerializer(serializer);
cf.setPdxReadSerialized(false);
cf.set(MCAST_PORT, "0");
cache = cf.create();
cache.setCopyOnRead(true);
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create("SimpleObjects");
Region duplicates = cache.createRegionFactory(RegionShortcut.REPLICATE).create("SimpleObjects_Duplicates");
for (int i = 0; i < 10; i++) {
PortfolioPdx t = new PortfolioPdx(i);
region.put(i, t);
duplicates.put(i, t);
}
QueryService qs = cache.getQueryService();
SelectResults rs = (SelectResults) qs.newQuery("select * from /SimpleObjects").execute();
assertEquals(10, rs.size());
Query query = qs.newQuery("select * from /SimpleObjects_Duplicates s where s in ($1)");
SelectResults finalResults = (SelectResults) query.execute(new Object[] { rs });
assertEquals(10, finalResults.size());
}
use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.
the class SelectStarQueryDUnitTest method functionWithStructTypeInInnerQueryShouldNotThrowExceptionWhenRunOnMultipleNodes.
@Test
public void functionWithStructTypeInInnerQueryShouldNotThrowExceptionWhenRunOnMultipleNodes() throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM server3 = host.getVM(2);
final VM client = host.getVM(3);
PortfolioPdx[] portfolios = new PortfolioPdx[10];
for (int i = 0; i < portfolios.length; i++) {
portfolios[i] = new PortfolioPdx(i);
}
// create servers and regions
final int port1 = startPartitionedCacheServer(server1, portfolios);
final int port2 = startPartitionedCacheServer(server2, portfolios);
final int port3 = startPartitionedCacheServer(server3, portfolios);
// create client
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(getServerHostName(server1.getHost()), port1);
cf.addPoolServer(getServerHostName(server2.getHost()), port2);
cf.addPoolServer(getServerHostName(server3.getHost()), port3);
ClientCache cache = getClientCache(cf);
cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regName);
return null;
}
});
// put serialized PortfolioPdx objects
client.invoke(new SerializableCallable("Put objects") {
@Override
public Object call() throws Exception {
Region r1 = getRootRegion(regName);
for (int i = 10; i < 100; i++) {
r1.put("key-" + i, new PortfolioPdx(i));
}
return null;
}
});
// query remotely from client
client.invoke(new SerializableCallable("Query") {
@Override
public Object call() throws Exception {
getLogWriter().info("Querying remotely from client");
QueryService remoteQS = null;
try {
remoteQS = ((ClientCache) getCache()).getQueryService();
SelectResults sr = (SelectResults) remoteQS.newQuery("select distinct oP.ID, oP.status, oP.getType from /" + regName + " oP where element(select distinct p.ID, p.status, p.getType from /" + regName + " p where p.ID = oP.ID).status = 'inactive'").execute();
assertEquals(50, sr.size());
} catch (Exception e) {
fail("Exception getting query service ", e);
}
return null;
}
});
closeCache(client);
closeCache(server1);
closeCache(server2);
closeCache(server3);
}
use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.
the class PdxOrderByJUnitTest method testPartitionRangeIndex.
@Test
public void testPartitionRangeIndex() throws Exception {
final int numberOfEntries = 10;
Region pr = this.configurePR();
// create a local query service
QueryService localQueryService = null;
try {
localQueryService = CacheUtils.getQueryService();
} catch (Exception e) {
fail(e.toString());
}
for (int i = 0; i < numberOfEntries; i++) {
pr.put("key-" + i, new PortfolioPdx(i));
}
localQueryService = CacheUtils.getCache().getQueryService();
SelectResults[][] rs = new SelectResults[queryString.length][2];
for (int i = 0; i < queryString.length; i++) {
try {
Query query = localQueryService.newQuery(queryString[i]);
rs[i][0] = (SelectResults) query.execute();
checkForPdxString(rs[i][0].asList(), queryString[i]);
} catch (Exception e) {
fail("Failed executing " + queryString[i]);
}
}
Index index = null;
try {
index = localQueryService.createIndex("secIdIndex", "pos.secId", regName + " p, p.positions.values pos");
if (index instanceof PartitionedIndex) {
for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
if (!(o instanceof RangeIndex)) {
fail("Range Index should have been created instead of " + index.getClass());
}
}
} else {
fail("Partitioned index expected");
}
} catch (Exception ex) {
fail("Failed to create index." + ex.getMessage());
}
for (int i = 0; i < queryString.length; i++) {
try {
Query query = localQueryService.newQuery(queryString[i]);
rs[i][1] = (SelectResults) query.execute();
checkForPdxString(rs[i][1].asList(), queryString[i]);
} catch (Exception e) {
fail("Failed executing " + queryString[i]);
}
}
for (int i = 0; i < queryString.length; i++) {
try {
if (i < 7) {
// Compare local and remote query results.
if (!compareResultsOfWithAndWithoutIndex(rs[i])) {
fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
}
} else {
// compare the order of results returned
compareResultsOrder(rs[i], true);
}
} catch (Exception e) {
fail("Failed executing " + queryString[i]);
}
}
}
use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.
the class PdxGroupByTestImpl method testAggregateFuncMax.
@Override
@Test
public void testAggregateFuncMax() throws Exception {
Region region = this.createRegion("portfolio", PortfolioPdx.class);
for (int i = 1; i < 200; ++i) {
PortfolioPdx pf = new PortfolioPdx(i);
pf.shortID = (short) ((short) i / 5);
region.put("key-" + i, pf);
}
String queryStr = "select p.status as status, Max(p.ID) as Maxx from /portfolio p where p.ID > 0 group by status ";
QueryService qs = CacheUtils.getQueryService();
Query query = qs.newQuery(queryStr);
CompiledSelect cs = ((DefaultQuery) query).getSelect();
SelectResults sr = (SelectResults) query.execute();
assertTrue(sr.getCollectionType().getElementType().isStructType());
assertEquals(2, sr.size());
Iterator iter = sr.iterator();
Region rgn = CacheUtils.getRegion("portfolio");
int activeMaxID = 0;
int inactiveMaxID = 0;
for (Object o : rgn.values()) {
PortfolioPdx pf = (PortfolioPdx) o;
if (pf.status.equals("active")) {
if (pf.getID() > activeMaxID) {
activeMaxID = pf.getID();
}
} else if (pf.status.equals("inactive")) {
if (pf.getID() > inactiveMaxID) {
inactiveMaxID = pf.getID();
}
} else {
fail("unexpected value of status");
}
}
while (iter.hasNext()) {
Struct struct = (Struct) iter.next();
StructType structType = struct.getStructType();
ObjectType[] fieldTypes = structType.getFieldTypes();
assertEquals("String", fieldTypes[0].getSimpleClassName());
assertEquals("Number", fieldTypes[1].getSimpleClassName());
if (struct.get("status").equals("active")) {
assertEquals(activeMaxID, ((Integer) struct.get("Maxx")).intValue());
} else if (struct.get("status").equals("inactive")) {
assertEquals(inactiveMaxID, ((Integer) struct.get("Maxx")).intValue());
} else {
fail("unexpected value of status");
}
}
ObjectType elementType = sr.getCollectionType().getElementType();
assertTrue(elementType.isStructType());
StructType structType = (StructType) elementType;
ObjectType[] fieldTypes = structType.getFieldTypes();
assertEquals("String", fieldTypes[0].getSimpleClassName());
assertEquals("Number", fieldTypes[1].getSimpleClassName());
}
use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.
the class PdxGroupByTestImpl method testSumWithMultiColumnGroupBy.
@Override
@Test
public void testSumWithMultiColumnGroupBy() throws Exception {
Region region = this.createRegion("portfolio", PortfolioPdx.class);
for (int i = 1; i < 200; ++i) {
PortfolioPdx pf = new PortfolioPdx(i);
pf.shortID = (short) ((short) i / 5);
region.put("key-" + i, pf);
}
Map<String, Integer> expectedData = new HashMap<String, Integer>();
for (Object o : region.values()) {
PortfolioPdx pf = (PortfolioPdx) o;
String key = pf.status + "_" + pf.shortID;
if (expectedData.containsKey(key)) {
int sum = expectedData.get(key).intValue() + pf.getID();
expectedData.put(key, sum);
} else {
expectedData.put(key, pf.getID());
}
}
String queryStr = "select p.status as status, p.shortID as shortID, sum(p.ID) as summ from /portfolio p" + " where p.ID > 0 group by status, shortID ";
QueryService qs = CacheUtils.getQueryService();
Query query = qs.newQuery(queryStr);
CompiledSelect cs = ((DefaultQuery) query).getSelect();
SelectResults sr = (SelectResults) query.execute();
assertTrue(sr.getCollectionType().getElementType().isStructType());
assertEquals(expectedData.size(), sr.size());
Iterator iter = sr.iterator();
while (iter.hasNext()) {
Struct struct = (Struct) iter.next();
StructType structType = struct.getStructType();
ObjectType[] fieldTypes = structType.getFieldTypes();
assertEquals("String", fieldTypes[0].getSimpleClassName());
assertEquals("short", fieldTypes[1].getSimpleClassName());
assertEquals("Number", fieldTypes[2].getSimpleClassName());
String key = struct.get("status") + "_" + struct.get("shortID");
int sum = ((Integer) struct.get("summ")).intValue();
assertTrue(expectedData.containsKey(key));
assertEquals(expectedData.get(key).intValue(), sum);
}
ObjectType elementType = sr.getCollectionType().getElementType();
assertTrue(elementType.isStructType());
StructType structType = (StructType) elementType;
ObjectType[] fieldTypes = structType.getFieldTypes();
assertEquals("String", fieldTypes[0].getSimpleClassName());
assertEquals("short", fieldTypes[1].getSimpleClassName());
assertEquals("Number", fieldTypes[2].getSimpleClassName());
}
Aggregations