Search in sources :

Example 31 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxQueryDUnitTest method testClientServerQueryWithRangeIndex.

/**
   * Tests client-server query on PdxInstance.
   */
@Test
public void testClientServerQueryWithRangeIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    final String[] qs = new String[] { "SELECT * FROM " + regName + " p WHERE p.ID > 0", "SELECT p FROM " + regName + " p WHERE p.ID > 0", "SELECT * FROM " + regName + " p WHERE p.ID = 1", "SELECT * FROM " + regName + " p WHERE p.ID < 10", "SELECT * FROM " + regName + " p WHERE p.ID != 10", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE p.ID > 0", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE p.ID = 10", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE p.ID > 0", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE p.ID = 10", "SELECT pos FROM " + regName + " p, p.positions.values pos WHERE p.ID > 0", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId != 'XXX'", "SELECT pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId != 'XXX'", "SELECT pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'SUN'", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'SUN'", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'DELL'", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'SUN'", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'DELL'", "SELECT p, p.position1 FROM " + regName + " p where p.position1.secId != 'XXX'", "SELECT p, p.position1 FROM " + regName + " p where p.position1.secId = 'SUN'", "SELECT p.position1 FROM " + regName + " p WHERE p.ID > 0", "SELECT * FROM " + regName + " p WHERE p.status = 'active'", "SELECT p FROM " + regName + " p WHERE p.status != 'active'" };
    // Start server1
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Async index
            configAndStartBridgeServer(false, false, true, null);
            Region region = getRootRegion().getSubregion(regionName);
            // Create Range index.
            QueryService qs = getCache().getQueryService();
            try {
                qs.createIndex("idIndex", "p.ID", regName + " p");
                qs.createIndex("statusIndex", "p.status", regName + " p");
                qs.createIndex("secIdIndex", "pos.secId", regName + " p, p.positions.values pos");
                qs.createIndex("pSecIdIdIndex", "p.position1.secId", regName + " p");
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Async index
            configAndStartBridgeServer(false, false, true, null);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = vm0.invoke(() -> PdxQueryDUnitTest.getCacheServerPort());
    final int port1 = vm1.invoke(() -> PdxQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(vm2, poolName, new String[] { host0 }, new int[] { port0 }, true);
    createPool(vm3, poolName, new String[] { host0 }, new int[] { port1 }, true);
    // Create client region
    vm3.invoke(new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            int j = 0;
            for (int i = 0; i < 100; i++) {
                region.put("key-" + i, new PortfolioPdx(j, j++));
                // To add duplicate:
                if (i % 24 == 0) {
                    // reset
                    j = 0;
                }
            }
        }
    });
    // Execute query and make sure there is no PdxInstance in the results.
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Execute query locally.
            QueryService queryService = getCache().getQueryService();
            for (int i = 0; i < qs.length; i++) {
                try {
                    Query query = queryService.newQuery(qs[i]);
                    SelectResults results = (SelectResults) query.execute();
                    for (Object o : results.asList()) {
                        if (o instanceof Struct) {
                            Object[] values = ((Struct) o).getFieldValues();
                            for (int c = 0; c < values.length; c++) {
                                if (values[c] instanceof PdxInstance) {
                                    fail("Found unexpected PdxInstance in the query results. At struct field [" + c + "] query :" + qs[i] + " Object is: " + values[c]);
                                }
                            }
                        } else {
                            if (o instanceof PdxInstance) {
                                fail("Found unexpected PdxInstance in the query results. " + qs[i]);
                            }
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
        }
    });
    // Re-execute query to fetch PdxInstance in the results.
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Execute query locally.
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            try {
                QueryService queryService = getCache().getQueryService();
                for (int i = 0; i < qs.length; i++) {
                    try {
                        Query query = queryService.newQuery(qs[i]);
                        SelectResults results = (SelectResults) query.execute();
                        for (Object o : results.asList()) {
                            if (o instanceof Struct) {
                                Object[] values = ((Struct) o).getFieldValues();
                                for (int c = 0; c < values.length; c++) {
                                    if (!(values[c] instanceof PdxInstance)) {
                                        fail("Didn't found expected PdxInstance in the query results. At struct field [" + c + "] query :" + qs[i] + " Object is: " + values[c]);
                                    }
                                }
                            } else {
                                if (!(o instanceof PdxInstance)) {
                                    fail("Didn't found expected PdxInstance in the query results. " + qs[i] + " Object is: " + o);
                                }
                            }
                        }
                    } catch (Exception e) {
                        Assert.fail("Failed executing " + qs[i], e);
                    }
                }
            } finally {
                cache.setReadSerialized(false);
            }
        }
    });
    this.closeClient(vm2);
    this.closeClient(vm3);
    this.closeClient(vm1);
    this.closeClient(vm0);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) CacheException(org.apache.geode.cache.CacheException) Struct(org.apache.geode.cache.query.Struct) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 32 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxLocalQueryDUnitTest method testLocalPdxQueries.

@Test
public void testLocalPdxQueries() throws Exception {
    final Host host = Host.getHost(0);
    final VM server1 = host.getVM(1);
    final VM client = host.getVM(2);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String name2 = "/" + regionName2;
    final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive'", // 1
    "select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select p from " + name + " p where p.status = 'inactive'", // 4
    "select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
    "select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
    "select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
    "select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
    "select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
    "select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
    "select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
    "select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
    "select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
    " p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
    " p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
    "select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
    "select * from " + name + ".values v where v.status = 'inactive'", // 19
    "select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')", // 200
    "select * from " + name + " r1, " + name2 + " r2 where r1.status = r2.status", "select * from " + name + " r1, " + name2 + // 100
    " r2 where r1.status = r2.status and r1.status = 'active'", "select r2.status from " + name + " r1, " + name2 + // 100
    " r2 where r1.status = r2.status and r1.status = 'active'", "select distinct r2.status from " + name + " r1, " + name2 + // 1
    " r2 where r1.status = r2.status and r1.status = 'active'", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')" };
    final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, 200, 100, 100, 1, numberOfEntries };
    // Start server1
    final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            Region r2 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName2);
            for (int i = 0; i < numberOfEntries; i++) {
                PortfolioPdx p = new PortfolioPdx(i);
                r1.put("key-" + i, p);
                r2.put("key-" + i, p);
            }
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client loads pdx objects on server
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
            Region region2 = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName2);
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PortfolioPdx p = new PortfolioPdx(i);
                region.put("key-" + i, p);
                region2.put("key-" + i, p);
            }
            return null;
        }
    });
    // query locally on server1 to verify pdx objects are not deserialized
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            int extra = 0;
            if (cache.getLogger().fineEnabled()) {
                extra = 20;
            }
            assertEquals(numberOfEntries * 6 + 1 + extra, PortfolioPdx.numInstance);
            // set readserealized and query
            ((GemFireCacheImpl) getCache()).setReadSerialized(true);
            PdxInstanceFactory out = PdxInstanceFactoryImpl.newCreator("org.apache.geode.cache.query.data.PositionPdx", false);
            out.writeLong("avg20DaysVol", 0);
            out.writeString("bondRating", "");
            out.writeDouble("convRatio", 0);
            out.writeString("country", "");
            out.writeDouble("delta", 0);
            out.writeLong("industry", 0);
            out.writeLong("issuer", 0);
            out.writeDouble("mktValue", pos.getMktValue());
            out.writeDouble("qty", 0);
            out.writeString("secId", pos.secId);
            out.writeString("secIdIndexed", pos.secIdIndexed);
            out.writeString("secLinks", "");
            out.writeDouble("sharesOutstanding", pos.getSharesOutstanding());
            out.writeString("underlyer", "");
            out.writeLong("volatility", 0);
            out.writeInt("pid", pos.getPid());
            out.writeInt("portfolioId", 0);
            // Identity Field.
            out.markIdentityField("secId");
            PdxInstance pi = out.create();
            PdxInstanceEnum pdxEnum = new PdxInstanceEnum(pDay);
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pi });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pdxEnum });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    // in case of PortfolioPdx
                    if (queries[i].indexOf("distinct") == -1) {
                        if (i == 0 || i == 1) {
                            assertEquals("Expected and actual results do not match for query: " + queries[i], 1, sr.size());
                        } else {
                            assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // reset readserealized and query
            ((GemFireCacheImpl) getCache()).setReadSerialized(false);
            return null;
        }
    });
    // query from client
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query remotely
            try {
                qs = cache.getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                    for (Object result : sr) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            return null;
        }
    });
    // query locally on server1
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults[][] sr = new SelectResults[queries.length][2];
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            int cnt = PositionPdx.cnt;
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
                    for (Object result : sr[i][0]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // create index
            qs.createIndex("statusIndex", "status", name);
            qs.createIndex("IDIndex", "ID", name);
            qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
            qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
                    for (Object result : sr[i][1]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
            ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
            return null;
        }
    });
    this.closeClient(client);
    this.closeClient(server1);
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) PositionPdx(org.apache.geode.cache.query.data.PositionPdx) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) Struct(org.apache.geode.cache.query.Struct) PdxInstanceEnum(org.apache.geode.pdx.internal.PdxInstanceEnum) SelectResults(org.apache.geode.cache.query.SelectResults) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) PdxString(org.apache.geode.pdx.internal.PdxString) CacheException(org.apache.geode.cache.CacheException) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 33 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxGroupByTestImpl method testConvertibleGroupByQuery_refer_column.

@Override
@Test
public void testConvertibleGroupByQuery_refer_column() throws Exception {
    Region region = this.createRegion("portfolio", PortfolioPdx.class);
    for (int i = 1; i < 200; ++i) {
        PortfolioPdx pf = new PortfolioPdx(i);
        pf.shortID = (short) ((short) i / 5);
        region.put("key-" + i, pf);
    }
    String queryStr = "select  p.shortID  from /portfolio p where p.ID >= 0 group by p.shortID ";
    QueryService qs = CacheUtils.getQueryService();
    Query query = qs.newQuery(queryStr);
    SelectResults<Short> results = (SelectResults<Short>) query.execute();
    Iterator<Short> iter = results.asList().iterator();
    int counter = 0;
    while (iter.hasNext()) {
        Short shortID = iter.next();
        assertEquals(counter++, shortID.intValue());
    }
    assertEquals(39, counter - 1);
}
Also used : SelectResults(org.apache.geode.cache.query.SelectResults) DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) QueryService(org.apache.geode.cache.query.QueryService) Region(org.apache.geode.cache.Region) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) Test(org.junit.Test)

Example 34 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxGroupByTestImpl method testAggregateFuncCountDistinctStar_1.

@Override
@Test
public void testAggregateFuncCountDistinctStar_1() throws Exception {
    Region region = this.createRegion("portfolio", PortfolioPdx.class);
    for (int i = 1; i < 200; ++i) {
        PortfolioPdx pf = new PortfolioPdx(i);
        pf.shortID = (short) ((short) i % 5);
        region.put("key-" + i, pf);
    }
    String queryStr = "select  p.status as status, count(distinct p.shortID) as countt from /portfolio p where p.ID > 0 group by status ";
    QueryService qs = CacheUtils.getQueryService();
    Query query = qs.newQuery(queryStr);
    CompiledSelect cs = ((DefaultQuery) query).getSelect();
    SelectResults sr = (SelectResults) query.execute();
    assertTrue(sr.getCollectionType().getElementType().isStructType());
    assertEquals(2, sr.size());
    Iterator iter = sr.iterator();
    Region rgn = CacheUtils.getRegion("portfolio");
    Set<Object> distinctShortIDActive = new HashSet<Object>();
    Set<Object> distinctShortIDInactive = new HashSet<Object>();
    int inactiveCount = 0;
    for (Object o : rgn.values()) {
        PortfolioPdx pf = (PortfolioPdx) o;
        if (pf.status.equals("active")) {
            distinctShortIDActive.add(pf.shortID);
        } else if (pf.status.equals("inactive")) {
            distinctShortIDInactive.add(pf.shortID);
        } else {
            fail("unexpected status");
        }
    }
    while (iter.hasNext()) {
        Struct struct = (Struct) iter.next();
        StructType structType = struct.getStructType();
        ObjectType[] fieldTypes = structType.getFieldTypes();
        assertEquals("String", fieldTypes[0].getSimpleClassName());
        assertEquals("Integer", fieldTypes[1].getSimpleClassName());
        if (struct.get("status").equals("active")) {
            assertEquals(distinctShortIDActive.size(), ((Integer) struct.get("countt")).intValue());
        } else if (struct.get("status").equals("inactive")) {
            assertEquals(distinctShortIDInactive.size(), ((Integer) struct.get("countt")).intValue());
        } else {
            fail("unexpected value of status");
        }
    }
    ObjectType elementType = sr.getCollectionType().getElementType();
    assertTrue(elementType.isStructType());
    StructType structType = (StructType) elementType;
    ObjectType[] fieldTypes = structType.getFieldTypes();
    assertEquals("String", fieldTypes[0].getSimpleClassName());
    assertEquals("Integer", fieldTypes[1].getSimpleClassName());
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) StructType(org.apache.geode.cache.query.types.StructType) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) Struct(org.apache.geode.cache.query.Struct) ObjectType(org.apache.geode.cache.query.types.ObjectType) SelectResults(org.apache.geode.cache.query.SelectResults) QueryService(org.apache.geode.cache.query.QueryService) CompiledSelect(org.apache.geode.cache.query.internal.CompiledSelect) Iterator(java.util.Iterator) Region(org.apache.geode.cache.Region) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 35 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxGroupByTestImpl method testDistinctCountWithoutGroupBy.

@Test
public void testDistinctCountWithoutGroupBy() throws Exception {
    Region region = this.createRegion("portfolio", PortfolioPdx.class);
    for (int i = 1; i < 200; ++i) {
        PortfolioPdx pf = new PortfolioPdx(i);
        pf.shortID = (short) ((short) i / 5);
        region.put("key-" + i, pf);
    }
    QueryService qs = CacheUtils.getQueryService();
    String[] queries = { // 10
    "select  count(distinct pos.secId) from /portfolio p, p.positions.values pos where  pos.secId > 'APPL' " };
    Object[][] r = new Object[queries.length][2];
    // Execute Queries without Indexes
    for (int i = 0; i < queries.length; i++) {
        Query q = null;
        try {
            q = CacheUtils.getQueryService().newQuery(queries[i]);
            CacheUtils.getLogger().info("Executing query: " + queries[i]);
            SelectResults sr = (SelectResults) q.execute();
            r[i][0] = sr;
            assertTrue(sr.size() > 0);
        } catch (Exception e) {
            e.printStackTrace();
            fail(q.getQueryString());
        }
    }
    // Create Indexes
    qs.createIndex("secIdIndex", "pos.secId", "/portfolio p, p.positions.values pos");
    // Execute Queries with Indexes
    for (int i = 0; i < queries.length; i++) {
        Query q = null;
        try {
            q = CacheUtils.getQueryService().newQuery(queries[i]);
            CacheUtils.getLogger().info("Executing query: " + queries[i]);
            r[i][1] = q.execute();
            SelectResults rcw = (SelectResults) r[i][1];
            int indexLimit = queries[i].indexOf("limit");
            int limit = -1;
            boolean limitQuery = indexLimit != -1;
            if (limitQuery) {
                limit = Integer.parseInt(queries[i].substring(indexLimit + 5).trim());
            }
            assertTrue("Result size is " + rcw.size() + " and limit is " + limit, !limitQuery || rcw.size() <= limit);
            String colType = rcw.getCollectionType().getSimpleClassName();
        } catch (Exception e) {
            e.printStackTrace();
            fail(q.getQueryString());
        }
    }
    StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
    ssOrrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length, true, queries);
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) SelectResults(org.apache.geode.cache.query.SelectResults) QueryService(org.apache.geode.cache.query.QueryService) Region(org.apache.geode.cache.Region) Test(org.junit.Test)

Aggregations

PortfolioPdx (org.apache.geode.cache.query.data.PortfolioPdx)51 Region (org.apache.geode.cache.Region)50 Test (org.junit.Test)50 QueryService (org.apache.geode.cache.query.QueryService)47 SelectResults (org.apache.geode.cache.query.SelectResults)46 Query (org.apache.geode.cache.query.Query)34 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)28 VM (org.apache.geode.test.dunit.VM)27 DefaultQuery (org.apache.geode.cache.query.internal.DefaultQuery)26 Host (org.apache.geode.test.dunit.Host)26 CacheException (org.apache.geode.cache.CacheException)19 Struct (org.apache.geode.cache.query.Struct)17 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)14 AttributesFactory (org.apache.geode.cache.AttributesFactory)13 ClientCache (org.apache.geode.cache.client.ClientCache)13 ClientCacheFactory (org.apache.geode.cache.client.ClientCacheFactory)13 CompiledSelect (org.apache.geode.cache.query.internal.CompiledSelect)13 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)13 Iterator (java.util.Iterator)12 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)12