Search in sources :

Example 16 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxQueryDUnitTest method testClientForFieldInOtherVersion.

/**
   * 2 servers(replicated) and 2 clients. client2 puts version1 and version2 objects on server1
   * client1 had registered interest to server2, hence gets the pdx objects for both versions Test
   * local query on client1 Test if client1 fetched pdxtypes from server
   * 
   * @throws CacheException
   */
@Test
public void testClientForFieldInOtherVersion() throws CacheException {
    final Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    final VM vm3 = host.getVM(3);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String[] qs = { "select pdxStatus from " + name + " where pdxStatus = 'active'", "select status from " + name + " where id > 8 and id < 14" };
    // Start server1
    final int port1 = (Integer) vm0.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // Start server2
    final int port2 = (Integer) vm1.invoke(new SerializableCallable("Create Server2") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client 1 registers interest for server2
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.setPoolSubscriptionEnabled(true);
            cf.addPoolServer(NetworkUtils.getServerHostName(vm1.getHost()), port2);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
            region.registerInterest("ALL_KEYS");
            return null;
        }
    });
    // client2 loads both version objects on server1
    vm3.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
            // Load version 1 objects
            for (int i = 0; i < numberOfEntries; i++) {
                PdxInstanceFactory pdxFactory = cache.createPdxInstanceFactory("PdxPortfolio");
                pdxFactory.writeString("pdxStatus", (i % 2 == 0 ? "active" : "inactive"));
                pdxFactory.writeInt("id", i);
                PdxInstance pdxInstance = pdxFactory.create();
                region.put("key-" + i, pdxInstance);
            }
            ;
            // Load version 2 objects
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PdxInstanceFactory pdxFactory = cache.createPdxInstanceFactory("PdxPortfolio");
                pdxFactory.writeString("status", i % 2 == 0 ? "active" : "inactive");
                pdxFactory.writeInt("id", i);
                PdxInstance pdxInstance = pdxFactory.create();
                region.put("key-" + i, pdxInstance);
            }
            return null;
        }
    });
    // query locally on client 1 which has registered interest
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService localQueryService = null;
            // Execute query remotely
            try {
                localQueryService = ((ClientCache) getCache()).getLocalQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < qs.length; i++) {
                try {
                    SelectResults sr = (SelectResults) localQueryService.newQuery(qs[i]).execute();
                    assertEquals(5, sr.size());
                    if (i == 1) {
                        for (Object o : sr) {
                            if (o == null) {
                            } else if (o instanceof String) {
                            } else {
                                fail("Result should be either null or String and not " + o.getClass());
                            }
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
            // check if the types registered on server are fetched by the client
            TypeRegistration registration = GemFireCacheImpl.getForPdx("PDX registry is unavailable because the Cache has been closed.").getPdxRegistry().getTypeRegistration();
            Assert.assertTrue(registration instanceof ClientTypeRegistration);
            Map<Integer, PdxType> m = ((ClientTypeRegistration) registration).types();
            assertEquals(2, m.size());
            for (PdxType type : m.values()) {
                assertEquals("PdxPortfolio", type.getClassName());
            }
            return null;
        }
    });
    Invoke.invokeInEveryVM("Disconnecting from the Distributed system", () -> disconnectFromDS());
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) PdxType(org.apache.geode.pdx.internal.PdxType) PeerTypeRegistration(org.apache.geode.pdx.internal.PeerTypeRegistration) ClientTypeRegistration(org.apache.geode.pdx.internal.ClientTypeRegistration) TypeRegistration(org.apache.geode.pdx.internal.TypeRegistration) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) CacheException(org.apache.geode.cache.CacheException) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) SelectResults(org.apache.geode.cache.query.SelectResults) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) ClientTypeRegistration(org.apache.geode.pdx.internal.ClientTypeRegistration) Map(java.util.Map) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 17 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxQueryDUnitTest method testPdxInstanceFieldInOtherVersion.

/**
   * Test to query a field that is not present in the Pdx object but is present in some other
   * version of the pdx instance
   * 
   * @throws CacheException
   */
@Test
public void testPdxInstanceFieldInOtherVersion() throws CacheException {
    final Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    final VM vm3 = host.getVM(3);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String[] qs = { "select pdxStatus from " + name + " where pdxStatus = 'active'", "select pdxStatus from " + name + " where id > 8 and id < 14" };
    // Start server1
    final int port1 = (Integer) vm0.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // Start server2
    final int port2 = (Integer) vm1.invoke(new SerializableCallable("Create Server2") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client1 loads version 1 objects on server1
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
            // Load version 1 objects
            for (int i = 0; i < numberOfEntries; i++) {
                PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedNewPortfolio", false);
                pdxInstanceFactory.writeInt("id", i);
                pdxInstanceFactory.writeString("pdxStatus", (i % 2 == 0 ? "active" : "inactive"));
                PdxInstance pdxInstance = pdxInstanceFactory.create();
                region.put("key-" + i, pdxInstance);
            }
            return null;
        }
    });
    // client 2 loads version 2 objects on server2
    vm3.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(vm1.getHost()), port2);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
            // Load version 2 objects
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedNewPortfolio", false);
                pdxInstanceFactory.writeInt("id", i);
                pdxInstanceFactory.writeString("status", (i % 2 == 0 ? "active" : "inactive"));
                PdxInstance pdxInstance = pdxInstanceFactory.create();
                region.put("key-" + i, pdxInstance);
            }
            return null;
        }
    });
    // query remotely from client 1 with version 1 in classpath
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            QueryService remoteQueryService = null;
            // Execute query remotely
            try {
                remoteQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < qs.length; i++) {
                try {
                    SelectResults sr = (SelectResults) remoteQueryService.newQuery(qs[i]).execute();
                    assertEquals(5, sr.size());
                    if (i == 1) {
                        for (Object o : sr) {
                            if (o == null) {
                            } else if (o instanceof String) {
                            } else {
                                fail("Result should be either null or String and not " + o.getClass());
                            }
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
            return null;
        }
    });
    // query remotely from client 2 with version 2 in classpath
    vm3.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            QueryService remoteQueryService = null;
            // Execute query remotely
            try {
                remoteQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < qs.length; i++) {
                try {
                    SelectResults sr = (SelectResults) remoteQueryService.newQuery(qs[i]).execute();
                    assertEquals(5, sr.size());
                    if (i == 1) {
                        for (Object o : sr) {
                            if (o == null) {
                            } else if (o instanceof String) {
                            } else {
                                fail("Result should be either null or String and not " + o.getClass());
                            }
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
            return null;
        }
    });
    // query locally on server
    vm0.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService queryService = null;
            try {
                queryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < qs.length; i++) {
                try {
                    SelectResults sr = (SelectResults) queryService.newQuery(qs[i]).execute();
                    assertEquals(5, sr.size());
                    if (i == 1) {
                        for (Object o : sr) {
                            if (o == null) {
                            } else if (o instanceof String) {
                            } else {
                                fail("Result should be either null or String and not " + o.getClass());
                            }
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
            return null;
        }
    });
    // create index
    vm0.invoke(new SerializableCallable("Query") {

        @Override
        public Object call() throws Exception {
            QueryService qs = null;
            try {
                qs = getCache().getQueryService();
                qs.createIndex("status", "status", name);
            } catch (Exception e) {
                Assert.fail("Exception getting query service ", e);
            }
            return null;
        }
    });
    // query from client 1 with version 1 in classpath
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            QueryService remoteQueryService = null;
            // Execute query remotely
            try {
                remoteQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < qs.length; i++) {
                try {
                    SelectResults sr = (SelectResults) remoteQueryService.newQuery(qs[i]).execute();
                    assertEquals(5, sr.size());
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
            return null;
        }
    });
    Invoke.invokeInEveryVM(DistributedTestCase.class, "disconnectFromDS");
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) CacheException(org.apache.geode.cache.CacheException) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) SelectResults(org.apache.geode.cache.query.SelectResults) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 18 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxLocalQueryDUnitTest method testLocalPdxQueriesOnPR.

@Test
public void testLocalPdxQueriesOnPR() throws Exception {
    final Host host = Host.getHost(0);
    final VM server1 = host.getVM(0);
    final VM server2 = host.getVM(1);
    final VM client = host.getVM(2);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive'", // 1
    "select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select p from " + name + " p where p.status = 'inactive'", // 4
    "select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
    "select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
    "select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
    "select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
    "select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
    "select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
    "select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
    "select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
    "select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
    " p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
    " p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
    "select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
    "select * from " + name + ".values v where v.status = 'inactive'", // 19
    "select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')" };
    final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, numberOfEntries };
    // Start server1
    final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            for (int i = 0; i < numberOfEntries; i++) {
                r1.put("key-" + i, new PortfolioPdx(i));
            }
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // Start server2
    final int port2 = (Integer) server2.invoke(new SerializableCallable("Create Server2") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client loads pdx objects on server
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                region.put("key-" + i, new PortfolioPdx(i));
            }
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query remotely
            try {
                qs = cache.getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                    for (Object result : sr) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            return null;
        }
    });
    // query locally on server1
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                    for (Object result : sr) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            return null;
        }
    });
    // query locally on server2
    server2.invoke(new SerializableCallable("query locally on server2") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults[][] sr = new SelectResults[queries.length][2];
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
                    for (Object result : sr[i][0]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // create index
            qs.createIndex("statusIndex", "p.status", name + " p");
            qs.createIndex("IDIndex", "ID", name);
            qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
            qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
                    for (Object result : sr[i][1]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
            ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
            return null;
        }
    });
    this.closeClient(client);
    this.closeClient(server1);
    this.closeClient(server2);
}
Also used : PositionPdx(org.apache.geode.cache.query.data.PositionPdx) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) Struct(org.apache.geode.cache.query.Struct) SelectResults(org.apache.geode.cache.query.SelectResults) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) PdxString(org.apache.geode.pdx.internal.PdxString) CacheException(org.apache.geode.cache.CacheException) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 19 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxStringQueryDUnitTest method testReplicatedRegionNoIndex.

@Test
public void testReplicatedRegionNoIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM server0 = host.getVM(0);
    VM server1 = host.getVM(1);
    VM server2 = host.getVM(2);
    VM client = host.getVM(3);
    final int numberOfEntries = 10;
    // Start server1 and create index
    server0.invoke(new CacheSerializableRunnable("Create Server1") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, false, false);
            // create a local query service
            QueryService localQueryService = null;
            try {
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            Index index = null;
            // create an index on statusIndexed is created
            try {
                index = localQueryService.createIndex("secIdIndex2", "pos.secIdIndexed", regName + " p, p.positions.values pos");
                if (!(index instanceof RangeIndex)) {
                    fail("Range Index should have been created instead of " + index.getClass());
                }
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    server1.invoke(new CacheSerializableRunnable("Create Server2") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Start server3
    server2.invoke(new CacheSerializableRunnable("Create Server3") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(false, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
    // Create client region and put PortfolioPdx objects (PdxInstances)
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
            for (int i = 0; i < numberOfEntries; i++) {
                region.put("key-" + i, new PortfolioPdx(i));
            }
        }
    });
    // Execute queries from client to server and locally on client
    SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            SelectResults[] resWithoutIndexRemote = new SelectResults[queryString.length];
            SelectResults[] resWithIndexRemote = new SelectResults[queryString2.length];
            SelectResults[] resWithoutIndexLocal = new SelectResults[queryString.length];
            SelectResults[] resWithIndexLocal = new SelectResults[queryString2.length];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
                    Query query = remoteQueryService.newQuery(queryString[i]);
                    rs[0][0] = (SelectResults) query.execute();
                    resWithoutIndexRemote[i] = rs[0][0];
                    LogWriterUtils.getLogWriter().info("RR remote indexType: no index  size of resultset: " + rs[0][0].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][0].asList(), queryString[i]);
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
                    query = localQueryService.newQuery(queryString[i]);
                    rs[0][1] = (SelectResults) query.execute();
                    resWithoutIndexLocal[i] = rs[0][1];
                    LogWriterUtils.getLogWriter().info("RR  client local indexType:no index size of resultset: " + rs[0][1].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][1].asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
                try {
                    // to compare remote query results with and without index
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server for region2:" + queryString2[i]);
                    Query query = remoteQueryService.newQuery(queryString2[i]);
                    resWithIndexRemote[i] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("RR  remote region2 size of resultset: " + resWithIndexRemote[i].size() + " for query: " + queryString2[i]);
                    ;
                    checkForPdxString(resWithIndexRemote[i].asList(), queryString2[i]);
                    // to compare local query results with and without index
                    LogWriterUtils.getLogWriter().info("### Executing Query on local for region2:" + queryString2[i]);
                    query = localQueryService.newQuery(queryString2[i]);
                    resWithIndexLocal[i] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("RR  local region2 size of resultset: " + resWithIndexLocal[i].size() + " for query: " + queryString2[i]);
                    ;
                    checkForPdxString(resWithIndexLocal[i].asList(), queryString2[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString2[i], e);
                }
                if (i < orderByQueryIndex) {
                    // Compare local and remote query results.
                    if (!compareResultsOfWithAndWithoutIndex(rs)) {
                        fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
                    }
                } else {
                    // compare the order of results returned
                    compareResultsOrder(rs, false);
                }
            }
            // compare remote query results with and without index
            for (int i = 0; i < queryString.length; i++) {
                rs[0][0] = resWithoutIndexRemote[i];
                rs[0][1] = resWithIndexRemote[i];
                if (i < orderByQueryIndex) {
                    if (!compareResultsOfWithAndWithoutIndex(rs)) {
                        fail("Results with and without index are not matching for query :" + queryString2[i]);
                    }
                } else {
                    // compare the order of results returned
                    compareResultsOrder(rs, false);
                }
            }
            // compare local query results with and without index
            for (int i = 0; i < queryString.length; i++) {
                rs[0][0] = resWithoutIndexLocal[i];
                rs[0][1] = resWithIndexLocal[i];
                if (i < orderByQueryIndex) {
                    if (!compareResultsOfWithAndWithoutIndex(rs)) {
                        fail("Results with and without index are not matching for query :" + queryString2[i]);
                    }
                } else {
                    // compare the order of results returned
                    compareResultsOrder(rs, false);
                }
            }
        }
    };
    client.invoke(executeQueries);
    // Put Non Pdx objects on server execute queries locally
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            LogWriterUtils.getLogWriter().info("Put Objects locally on server");
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                region.put("key-" + i, new Portfolio(i));
            }
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server local indexType: no  size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
                try {
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString2[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server local indexType: no size of resultset: " + rs.size() + " for query: " + queryString2[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString2[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString2[i], e);
                }
            }
        }
    });
    // test for readSerialized flag
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("isPR: false server local readSerializedTrue: indexType: false size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag on client
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService remoteQueryService = (PoolManager.find(poolName)).getQueryService();
            // Query server1 remotely to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: false size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    closeClient(server2);
    closeClient(client);
    closeClient(server1);
    closeClient(server0);
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Portfolio(org.apache.geode.cache.query.data.Portfolio) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) Index(org.apache.geode.cache.query.Index) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 20 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxStringQueryDUnitTest method testPartitionRegionCompactRangeIndex.

@Test
public void testPartitionRegionCompactRangeIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM server0 = host.getVM(0);
    VM server1 = host.getVM(1);
    VM server2 = host.getVM(2);
    VM client = host.getVM(3);
    final int numberOfEntries = 10;
    final boolean isPr = true;
    // Start server1 and create index
    server0.invoke(new CacheSerializableRunnable("Create Server1") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            // create a local query service
            QueryService localQueryService = null;
            try {
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            // Verify the type of index created
            Index index = null;
            try {
                index = localQueryService.createIndex("statusIndex", "status", regName);
                if (index instanceof PartitionedIndex) {
                    for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                        if (!(o instanceof CompactRangeIndex)) {
                            fail("CompactRangeIndex Index should have been created instead of " + index.getClass());
                        }
                    }
                } else {
                    fail("Partitioned index expected");
                }
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    server1.invoke(new CacheSerializableRunnable("Create Server2") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Start server3
    server2.invoke(new CacheSerializableRunnable("Create Server3") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
    // Create client region and put PortfolioPdx objects (PdxInstances)
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
            for (int i = 0; i < numberOfEntries; i++) {
                region.put("key-" + i, new PortfolioPdx(i));
            }
        }
    });
    // Verify if all the index keys are PdxStrings
    server0.invoke(new CacheSerializableRunnable("Create Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            QueryService localQueryService = getCache().getQueryService();
            Index index = localQueryService.getIndex(region, "statusIndex");
            if (index instanceof PartitionedIndex) {
                for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                    CloseableIterator<IndexStoreEntry> iter = ((CompactRangeIndex) o).getIndexStorage().iterator(null);
                    while (iter.hasNext()) {
                        Object key = iter.next().getDeserializedKey();
                        if (!(key instanceof PdxString)) {
                            fail("All keys of the CompactRangeIndex in the Partitioned index should be PdxStrings and not " + key.getClass());
                        }
                    }
                }
            } else {
                fail("Partitioned index expected");
            }
        }
    });
    // Execute queries from client to server and locally on client
    SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
                    Query query = remoteQueryService.newQuery(queryString[i]);
                    rs[0][0] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("RR remote indexType:CompactRange size of resultset: " + rs[0][0].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][0].asList(), queryString[i]);
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
                    query = localQueryService.newQuery(queryString[i]);
                    rs[0][1] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + "  client local indexType:CompactRange size of resultset: " + rs[0][1].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][1].asList(), queryString[i]);
                    if (i < orderByQueryIndex) {
                        // Compare local and remote query results.
                        if (!compareResultsOfWithAndWithoutIndex(rs)) {
                            fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
                        }
                    } else {
                        // compare the order of results returned
                        compareResultsOrder(rs, isPr);
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    };
    client.invoke(executeQueries);
    // Put Non Pdx objects on server execute queries locally
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            LogWriterUtils.getLogWriter().info("Put Objects locally on server");
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                region.put("key-" + i, new Portfolio(i));
            }
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + " server local readSerializedTrue: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag on client
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService remoteQueryService = (PoolManager.find(poolName)).getQueryService();
            // Query server1 remotely to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    closeClient(server2);
    closeClient(client);
    closeClient(server1);
    closeClient(server0);
}
Also used : CloseableIterator(org.apache.geode.internal.cache.persistence.query.CloseableIterator) DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Portfolio(org.apache.geode.cache.query.data.Portfolio) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) Index(org.apache.geode.cache.query.Index) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) PdxString(org.apache.geode.pdx.internal.PdxString) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

GemFireCacheImpl (org.apache.geode.internal.cache.GemFireCacheImpl)213 Test (org.junit.Test)127 Region (org.apache.geode.cache.Region)86 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)55 LocalRegion (org.apache.geode.internal.cache.LocalRegion)54 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)51 VM (org.apache.geode.test.dunit.VM)49 DistributedRegion (org.apache.geode.internal.cache.DistributedRegion)47 Host (org.apache.geode.test.dunit.Host)42 ClientCacheCreation (org.apache.geode.internal.cache.xmlcache.ClientCacheCreation)40 RegionAttributes (org.apache.geode.cache.RegionAttributes)39 CacheCreation (org.apache.geode.internal.cache.xmlcache.CacheCreation)35 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)33 CacheException (org.apache.geode.cache.CacheException)32 RegionCreation (org.apache.geode.internal.cache.xmlcache.RegionCreation)32 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)31 Properties (java.util.Properties)24 AttributesFactory (org.apache.geode.cache.AttributesFactory)24 Cache (org.apache.geode.cache.Cache)23 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)23