Search in sources :

Example 26 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxStringQueryDUnitTest method testNullPdxString.

@Test
public void testNullPdxString() throws CacheException {
    final Host host = Host.getHost(0);
    VM server0 = host.getVM(0);
    VM server1 = host.getVM(1);
    VM server2 = host.getVM(2);
    VM client = host.getVM(3);
    final int numberOfEntries = 10;
    final boolean isPr = true;
    // Start server1 and create index
    server0.invoke(new CacheSerializableRunnable("Create Server1") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            // create a local query service
            QueryService localQueryService = null;
            try {
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            // Verify the type of index created
            Index index = null;
            try {
                index = localQueryService.createIndex("statusIndex", "status", regName);
                if (index instanceof PartitionedIndex) {
                    for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                        if (!(o instanceof CompactRangeIndex)) {
                            fail("CompactRangeIndex Index should have been created instead of " + index.getClass());
                        }
                    }
                } else {
                    fail("Partitioned index expected");
                }
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    server1.invoke(new CacheSerializableRunnable("Create Server2") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Start server3
    server2.invoke(new CacheSerializableRunnable("Create Server3") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
    // Create client region and put PortfolioPdx objects (PdxInstances)
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
            // Put some PortfolioPdx objects with null Status and secIds
            for (int i = 0; i < numberOfEntries * 2; i++) {
                PortfolioPdx portfolioPdx = new PortfolioPdx(i);
                // this will create NULL PdxStrings
                portfolioPdx.status = null;
                portfolioPdx.positions = new HashMap();
                portfolioPdx.positions.put(null, new PositionPdx(null, PositionPdx.cnt * 1000));
                region.put("key-" + i, portfolioPdx);
            }
            // Put some PortfolioPdx with non null status to reproduce bug#45351
            for (int i = 0; i < numberOfEntries; i++) {
                PortfolioPdx portfolioPdx = new PortfolioPdx(i);
                region.put("key-" + i, portfolioPdx);
            }
        }
    });
    // Verify if all the index keys are PdxStrings
    server0.invoke(new CacheSerializableRunnable("Create Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            QueryService localQueryService = getCache().getQueryService();
            Index index = localQueryService.getIndex(region, "statusIndex");
            if (index instanceof PartitionedIndex) {
                for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                    CloseableIterator<IndexStoreEntry> iter = ((CompactRangeIndex) o).getIndexStorage().iterator(null);
                    while (iter.hasNext()) {
                        Object key = iter.next().getDeserializedKey();
                        if (!(key instanceof PdxString) && !(key == IndexManager.NULL)) {
                            fail("All keys of the CompactRangeIndex in the Partitioned index should be PdxStrings and not " + key.getClass());
                        }
                    }
                }
            } else {
                fail("Partitioned index expected");
            }
        }
    });
    // Execute queries from client to server and locally on client
    client.invoke(new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            // Querying the fields with null values
            String[] qs = { "SELECT pos.secId FROM " + regName + "  p, p.positions.values pos where p.status = null", "SELECT p.pkid FROM " + regName + "  p, p.positions.values pos where pos.secId = null" };
            for (int i = 0; i < 2; i++) {
                try {
                    Query query = remoteQueryService.newQuery(qs[i]);
                    SelectResults res = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("PR NULL Pdxstring test size of resultset: " + res.size() + " for query: " + qs[i]);
                    ;
                    if (i == 0) {
                        for (Object o : res) {
                            if (o != null) {
                                fail("Query : " + qs[i] + " should have returned null and not " + o);
                            }
                        }
                    } else {
                        checkForPdxString(res.asList(), qs[i]);
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
        }
    });
    closeClient(server2);
    closeClient(client);
    closeClient(server1);
    closeClient(server0);
}
Also used : CloseableIterator(org.apache.geode.internal.cache.persistence.query.CloseableIterator) PositionPdx(org.apache.geode.cache.query.data.PositionPdx) DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) HashMap(java.util.HashMap) Host(org.apache.geode.test.dunit.Host) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) Index(org.apache.geode.cache.query.Index) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) PdxString(org.apache.geode.pdx.internal.PdxString) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 27 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxStringQueryDUnitTest method testPartitionRegionRangeIndex.

@Test
public void testPartitionRegionRangeIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM server0 = host.getVM(0);
    VM server1 = host.getVM(1);
    VM server2 = host.getVM(2);
    VM client = host.getVM(3);
    final int numberOfEntries = 10;
    final boolean isPr = true;
    // Start server1 and create index
    server0.invoke(new CacheSerializableRunnable("Create Server1") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            // create a local query service
            QueryService localQueryService = null;
            try {
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            // Verify the type of index created
            Index index = null;
            try {
                index = localQueryService.createIndex("secIdIndex", "pos.secId", regName + " p, p.positions.values pos");
                if (index instanceof PartitionedIndex) {
                    for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                        if (!(o instanceof RangeIndex)) {
                            fail("Range Index should have been created instead of " + index.getClass());
                        }
                    }
                } else {
                    fail("Partitioned index expected");
                }
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    server1.invoke(new CacheSerializableRunnable("Create Server2") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Start server3
    server2.invoke(new CacheSerializableRunnable("Create Server3") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
    // Create client region and put PortfolioPdx objects (PdxInstances)
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
            for (int i = 0; i < numberOfEntries; i++) {
                region.put("key-" + i, new PortfolioPdx(i));
            }
        }
    });
    // Verify if all the index keys are PdxStrings
    server0.invoke(new CacheSerializableRunnable("Create Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            QueryService localQueryService = getCache().getQueryService();
            Index index = localQueryService.getIndex(region, "secIdIndex");
            if (index instanceof PartitionedIndex) {
                for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                    for (Object key : ((RangeIndex) o).getValueToEntriesMap().keySet()) {
                        if (!(key instanceof PdxString)) {
                            fail("All keys of the RangeIndex in the Partitioned index should be PdxStrings and not " + key.getClass());
                        }
                    }
                }
            } else {
                fail("Partitioned index expected");
            }
        }
    });
    // Execute queries from client to server and locally on client
    SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
                    Query query = remoteQueryService.newQuery(queryString[i]);
                    rs[0][0] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("RR remote indexType: Range size of resultset: " + rs[0][0].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][0].asList(), queryString[i]);
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
                    query = localQueryService.newQuery(queryString[i]);
                    rs[0][1] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + "  client local indexType: Range size of resultset: " + rs[0][1].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][1].asList(), queryString[i]);
                    if (i < orderByQueryIndex) {
                        // Compare local and remote query results.
                        if (!compareResultsOfWithAndWithoutIndex(rs)) {
                            LogWriterUtils.getLogWriter().info("result0=" + rs[0][0].asList());
                            LogWriterUtils.getLogWriter().info("result1=" + rs[0][1].asList());
                            fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
                        }
                    } else {
                        // compare the order of results returned
                        compareResultsOrder(rs, isPr);
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    };
    client.invoke(executeQueries);
    // Put Non Pdx objects on server execute queries locally
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            LogWriterUtils.getLogWriter().info("Put Objects locally on server");
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                region.put("key-" + i, new Portfolio(i));
            }
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + " server local readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag on client
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService remoteQueryService = (PoolManager.find(poolName)).getQueryService();
            // Query server1 remotely to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    closeClient(server2);
    closeClient(client);
    closeClient(server1);
    closeClient(server0);
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Portfolio(org.apache.geode.cache.query.data.Portfolio) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) Index(org.apache.geode.cache.query.Index) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) PdxString(org.apache.geode.pdx.internal.PdxString) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 28 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxQueryDUnitTest method testNestedAndCollectionPdxWithPR.

/**
   * Tests client-server query with nested and collection of Pdx.
   */
@Test
public void testNestedAndCollectionPdxWithPR() throws CacheException {
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    final int numberOfEntries = 50;
    final String[] queries = new String[] { "SELECT * FROM " + this.regName + " pf where pf.position1.secId > '2'", "SELECT * FROM " + this.regName + " p where p.position3[1].portfolioId = 2", "SELECT * FROM " + this.regName + " p, p.positions.values AS pos WHERE pos.secId != '1'", "SELECT key, positions FROM " + this.regName + ".entrySet, value.positions.values " + "positions WHERE positions.mktValue >= 25.00", "SELECT * FROM " + this.regName + " portfolio1, " + this.regName2 + " portfolio2 WHERE " + "portfolio1.status = portfolio2.status", "SELECT portfolio1.ID, portfolio2.status FROM " + this.regName + " portfolio1, " + this.regName + " portfolio2  WHERE portfolio1.status = portfolio2.status", "SELECT * FROM " + this.regName + " portfolio1, portfolio1.positions.values positions1, " + this.regName + " portfolio2,  portfolio2.positions.values positions2 WHERE " + "positions1.secId = positions2.secId ", "SELECT * FROM " + this.regName + " portfolio, portfolio.positions.values positions WHERE " + "portfolio.Pk IN SET ('1', '2') AND positions.secId = '1'", "SELECT DISTINCT * FROM " + this.regName + "  pf1, pf1.collectionHolderMap.values coll1," + " pf1.positions.values posit1, " + this.regName2 + "  pf2, pf2.collectionHolderMap.values " + " coll2, pf2.positions.values posit2 WHERE posit1.secId='IBM' AND posit2.secId='IBM'" };
    // Start server1
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, true);
            Region region1 = getRootRegion().getSubregion(regionName);
            Region region2 = getRootRegion().getSubregion(regionName2);
        }
    });
    // Start server2
    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, true);
            Region region = getRootRegion().getSubregion(regionName);
            Region region2 = getRootRegion().getSubregion(regionName2);
        }
    });
    // Start server2
    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, true);
            Region region = getRootRegion().getSubregion(regionName);
            Region region2 = getRootRegion().getSubregion(regionName2);
        }
    });
    // Client pool.
    final int port0 = vm0.invoke(() -> PdxQueryDUnitTest.getCacheServerPort());
    final int port1 = vm1.invoke(() -> PdxQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(vm2, poolName, new String[] { host0 }, new int[] { port0 }, true);
    createPool(vm3, poolName, new String[] { host0 }, new int[] { port1 }, true);
    // Create client region
    vm3.invoke(new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            QueryService localQueryService = null;
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region1 = createRegion(regionName, rootRegionName, factory.create());
            Region region2 = createRegion(regionName2, rootRegionName, factory.create());
            for (int i = 0; i < numberOfEntries; i++) {
                region1.put("key-" + i, new PortfolioPdx(i, i));
                region2.put("key-" + i, new PortfolioPdx(i, i));
            }
        }
    });
    // Execute client queries
    SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < queries.length; i++) {
                try {
                    logger.info("### Executing Query on server:" + queries[i]);
                    Query query = remoteQueryService.newQuery(queries[i]);
                    rs[0][0] = (SelectResults) query.execute();
                    logger.info("### Executing Query locally:" + queries[i]);
                    query = localQueryService.newQuery(queries[i]);
                    rs[0][1] = (SelectResults) query.execute();
                    // Compare local and remote query results.
                    if (!CacheUtils.compareResultsOfWithAndWithoutIndex(rs)) {
                        fail("Local and Remote Query Results are not matching for query :" + queries[i]);
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queries[i], e);
                }
            }
        }
    };
    vm3.invoke(executeQueries);
    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            assertEquals(0, PortfolioPdx.numInstance);
            assertEquals(0, PositionPdx.numInstance);
        }
    });
    // Create index
    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            QueryService qs = getCache().getQueryService();
            try {
                qs.createIndex("pkIndex", IndexType.FUNCTIONAL, "portfolio.Pk", regName + " portfolio");
                qs.createIndex("idIndex", IndexType.FUNCTIONAL, "pos.secId", regName + " p, p.positions.values AS pos");
                qs.createIndex("tickerIndex", IndexType.FUNCTIONAL, "pf.position1.secId", regName + " pf");
                qs.createIndex("secIdIndexPf1", IndexType.FUNCTIONAL, "pos11.secId", regName + " pf1, pf1.collectionHolderMap.values coll1, pf1.positions.values pos11");
                qs.createIndex("secIdIndexPf2", IndexType.FUNCTIONAL, "pos22.secId", regName2 + " pf2, pf2.collectionHolderMap.values coll2, pf2.positions.values pos22");
            } catch (Exception ex) {
                fail("Unable to create index. " + ex.getMessage());
            }
        }
    });
    vm3.invoke(executeQueries);
    // Check for TestObject instances.
    // It should be 0
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            assertEquals(0, PortfolioPdx.numInstance);
            assertEquals(0, PositionPdx.numInstance);
        }
    });
    // index is created on portfolio.Pk field which does not exists in
    // PorfolioPdx object
    // but there is a method getPk(), so for #44436, the objects are
    // deserialized to get the value in vm1
    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            assertEquals(numberOfEntries, PortfolioPdx.numInstance);
            // 50 PorforlioPdx objects create (50*3)+50+50+50+25 = 325 PositionPdx
            // objects when deserialized
            assertEquals(325, PositionPdx.numInstance);
        }
    });
    vm2.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            assertEquals(0, PortfolioPdx.numInstance);
            assertEquals(0, PositionPdx.numInstance);
        }
    });
    this.closeClient(vm2);
    this.closeClient(vm3);
    this.closeClient(vm1);
    this.closeClient(vm0);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) CacheException(org.apache.geode.cache.CacheException) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 29 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxStringQueryDUnitTest method testPRQueryForDuplicates.

/**
   * Test to verify if duplicate results are not being accumulated when PdxString is used in PR
   * query
   * 
   * @throws CacheException
   */
@Test
public void testPRQueryForDuplicates() throws CacheException {
    final String regionName = "exampleRegion";
    final Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    final String name = "/" + regionName;
    final String[] qs = { "select distinct pkid from " + name, "select distinct pkid, status from " + name };
    // Start server1
    final int port1 = (Integer) vm0.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // Start server2
    final int port2 = (Integer) vm1.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // create client load data and execute queries
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
            cf.addPoolServer(NetworkUtils.getServerHostName(vm1.getHost()), port2);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
            // Put Portfolios with 2 different pkids
            for (int set = 1; set <= 2; set++) {
                for (int current = 1; current <= 5; current++) {
                    region.put("key-" + set + "_" + current, new PortfolioPdx(set, current));
                }
            }
            for (int i = 0; i < qs.length; i++) {
                SelectResults sr = (SelectResults) cache.getQueryService().newQuery(qs[i]).execute();
                assertEquals("Did not get expected result from query: " + qs[i] + " ", 2, sr.size());
            }
            return null;
        }
    });
    // execute query on server by setting DefaultQuery.setPdxReadSerialized
    // to simulate remote query
    vm0.invoke(new SerializableCallable("Create server") {

        @Override
        public Object call() throws Exception {
            DefaultQuery.setPdxReadSerialized(true);
            try {
                for (int i = 0; i < qs.length; i++) {
                    SelectResults sr = (SelectResults) getCache().getQueryService().newQuery(qs[i]).execute();
                    assertEquals("Did not get expected result from query: " + qs[i] + " ", 2, sr.size());
                }
            } finally {
                DefaultQuery.setPdxReadSerialized(false);
            }
            return null;
        }
    });
    disconnectAllFromDS();
}
Also used : Host(org.apache.geode.test.dunit.Host) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) ClientCache(org.apache.geode.cache.client.ClientCache) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) SelectResults(org.apache.geode.cache.query.SelectResults) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 30 with PortfolioPdx

use of org.apache.geode.cache.query.data.PortfolioPdx in project geode by apache.

the class PdxStringQueryDUnitTest method testReplicatedRegionRangeIndex.

@Test
public void testReplicatedRegionRangeIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM server0 = host.getVM(0);
    VM server1 = host.getVM(1);
    VM server2 = host.getVM(2);
    VM client = host.getVM(3);
    final int numberOfEntries = 10;
    // Start server1 and create index
    server0.invoke(new CacheSerializableRunnable("Create Server1") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, false, false);
            // create a local query service
            QueryService localQueryService = null;
            try {
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            // Verify the type of index created
            Index index = null;
            try {
                index = localQueryService.createIndex("secIdIndex", "pos.secId", regName + " p, p.positions.values pos");
                if (!(index instanceof RangeIndex)) {
                    fail("Range Index should have been created instead of " + index.getClass());
                }
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    server1.invoke(new CacheSerializableRunnable("Create Server2") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Start server3
    server2.invoke(new CacheSerializableRunnable("Create Server3") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
    // Create client region and put PortfolioPdx objects (PdxInstances)
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
            for (int i = 0; i < numberOfEntries; i++) {
                region.put("key-" + i, new PortfolioPdx(i));
            }
        }
    });
    // Verify if all the index keys are PdxStrings
    server0.invoke(new CacheSerializableRunnable("Create Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            QueryService localQueryService = getCache().getQueryService();
            Index index = localQueryService.getIndex(region, "secIdIndex");
            for (Object key : ((RangeIndex) index).getValueToEntriesMap().keySet()) {
                if (!(key instanceof PdxString)) {
                    fail("All keys of the RangeIndex should be PdxStrings and not " + key.getClass());
                }
            }
        }
    });
    // Execute queries from client to server and locally on client
    SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
                    Query query = remoteQueryService.newQuery(queryString[i]);
                    rs[0][0] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("RR remote indexType: Range size of resultset: " + rs[0][0].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][0].asList(), queryString[i]);
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
                    query = localQueryService.newQuery(queryString[i]);
                    rs[0][1] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("RR  client local indexType: Range size of resultset: " + rs[0][1].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][1].asList(), queryString[i]);
                    if (i < orderByQueryIndex) {
                        // Compare local and remote query results.
                        if (!compareResultsOfWithAndWithoutIndex(rs)) {
                            fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
                        }
                    } else {
                        // compare the order of results returned
                        compareResultsOrder(rs, false);
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    };
    client.invoke(executeQueries);
    // Put Non Pdx objects on server execute queries locally
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            LogWriterUtils.getLogWriter().info("Put Objects locally on server");
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                region.put("key-" + i, new Portfolio(i));
            }
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR  server local readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag on client
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService remoteQueryService = (PoolManager.find(poolName)).getQueryService();
            // Query server1 remotely to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    closeClient(server2);
    closeClient(client);
    closeClient(server1);
    closeClient(server0);
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Portfolio(org.apache.geode.cache.query.data.Portfolio) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) Index(org.apache.geode.cache.query.Index) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) PdxString(org.apache.geode.pdx.internal.PdxString) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

PortfolioPdx (org.apache.geode.cache.query.data.PortfolioPdx)51 Region (org.apache.geode.cache.Region)50 Test (org.junit.Test)50 QueryService (org.apache.geode.cache.query.QueryService)47 SelectResults (org.apache.geode.cache.query.SelectResults)46 Query (org.apache.geode.cache.query.Query)34 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)28 VM (org.apache.geode.test.dunit.VM)27 DefaultQuery (org.apache.geode.cache.query.internal.DefaultQuery)26 Host (org.apache.geode.test.dunit.Host)26 CacheException (org.apache.geode.cache.CacheException)19 Struct (org.apache.geode.cache.query.Struct)17 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)14 AttributesFactory (org.apache.geode.cache.AttributesFactory)13 ClientCache (org.apache.geode.cache.client.ClientCache)13 ClientCacheFactory (org.apache.geode.cache.client.ClientCacheFactory)13 CompiledSelect (org.apache.geode.cache.query.internal.CompiledSelect)13 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)13 Iterator (java.util.Iterator)12 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)12