Search in sources :

Example 81 with ClientCache

use of org.apache.geode.cache.client.ClientCache in project geode by apache.

the class PdxQueryDUnitTest method testDefaultValuesInPdxFieldTypes.

/**
   * Test query execution when default values of {@link FieldType} are used. This happens when one
   * version of Pdx object does not have a field but other version has.
   *
   * @throws Exception
   */
@Test
public void testDefaultValuesInPdxFieldTypes() throws Exception {
    final Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String query = "select stringField, booleanField, charField, shortField, intField, longField, floatField, doubleField from " + name;
    // Start server1
    final int port1 = (Integer) vm0.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client loads version1 and version2 objects on server
    vm1.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
            // Load version 1 objects
            for (int i = 0; i < numberOfEntries; i++) {
                PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedFieldType", false);
                pdxInstanceFactory.writeString("stringField", "" + i);
                pdxInstanceFactory.writeBoolean("booleanField", (i % 2 == 0 ? true : false));
                pdxInstanceFactory.writeChar("charField", ((char) i));
                pdxInstanceFactory.writeShort("shortField", new Integer(i).shortValue());
                PdxInstance pdxInstance = pdxInstanceFactory.create();
                logger.info("Putting object: " + pdxInstance);
                region.put("key-" + i, pdxInstance);
            }
            // Load version 2 objects
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedFieldType", false);
                pdxInstanceFactory.writeInt("intField", i);
                pdxInstanceFactory.writeLong("longField", new Integer(i + 1).longValue());
                pdxInstanceFactory.writeFloat("floatField", new Integer(i + 2).floatValue());
                pdxInstanceFactory.writeDouble("doubleField", new Integer(i + 3).doubleValue());
                PdxInstance pdxInstance = pdxInstanceFactory.create();
                logger.info("Putting object: " + pdxInstance);
                region.put("key-" + i, pdxInstance);
            }
            return null;
        }
    });
    // query locally on server, create index, verify results with and without index
    vm0.invoke(new SerializableCallable("Create index") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService qs = null;
            SelectResults[][] sr = new SelectResults[1][2];
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            try {
                sr[0][0] = (SelectResults) qs.newQuery(query).execute();
                assertEquals(20, sr[0][0].size());
            } catch (Exception e) {
                Assert.fail("Failed executing " + qs, e);
            }
            // create index
            try {
                qs.createIndex("stringIndex", "stringField", name);
                qs.createIndex("booleanIndex", "booleanField", name);
                qs.createIndex("shortIndex", "shortField", name);
                qs.createIndex("charIndex", "charField", name);
                qs.createIndex("intIndex", "intField", name);
                qs.createIndex("longIndex", "longField", name);
                qs.createIndex("floatIndex", "floatField", name);
                qs.createIndex("doubleIndex", "doubleField", name);
            } catch (Exception e) {
                Assert.fail("Exception creating index ", e);
            }
            // query after index creation
            try {
                sr[0][1] = (SelectResults) qs.newQuery(query).execute();
                assertEquals(20, sr[0][1].size());
            } catch (Exception e) {
                Assert.fail("Failed executing " + qs, e);
            }
            CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
            return null;
        }
    });
    // Update index
    vm1.invoke(new SerializableCallable("update index") {

        @Override
        public Object call() throws Exception {
            Region region = getCache().getRegion(regionName);
            // Load version 1 objects
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedFieldType", false);
                pdxInstanceFactory.writeString("stringField", "" + i);
                pdxInstanceFactory.writeBoolean("booleanField", (i % 2 == 0 ? true : false));
                pdxInstanceFactory.writeChar("charField", ((char) i));
                pdxInstanceFactory.writeShort("shortField", new Integer(i).shortValue());
                PdxInstance pdxInstance = pdxInstanceFactory.create();
                logger.info("Putting object: " + pdxInstance);
                region.put("key-" + i, pdxInstance);
            }
            // Load version 2 objects
            for (int i = 0; i < numberOfEntries; i++) {
                PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedFieldType", false);
                pdxInstanceFactory.writeInt("intField", i);
                pdxInstanceFactory.writeLong("longField", new Integer(i + 1).longValue());
                pdxInstanceFactory.writeFloat("floatField", new Integer(i + 2).floatValue());
                pdxInstanceFactory.writeDouble("doubleField", new Integer(i + 3).doubleValue());
                PdxInstance pdxInstance = pdxInstanceFactory.create();
                logger.info("Putting object: " + pdxInstance);
                region.put("key-" + i, pdxInstance);
            }
            return null;
        }
    });
    // query remotely from client
    vm1.invoke(new SerializableCallable("query") {

        @Override
        public Object call() throws Exception {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] sr = new SelectResults[1][2];
            // Execute query locally
            try {
                remoteQueryService = getCache().getQueryService();
                localQueryService = ((ClientCache) getCache()).getLocalQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            try {
                sr[0][0] = (SelectResults) remoteQueryService.newQuery(query).execute();
                assertEquals(20, sr[0][0].size());
                sr[0][1] = (SelectResults) localQueryService.newQuery(query).execute();
                assertEquals(20, sr[0][1].size());
            } catch (Exception e) {
                fail("Failed executing query " + e);
            }
            CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
            return null;
        }
    });
    Invoke.invokeInEveryVM(DistributedTestCase.class, "disconnectFromDS");
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) CacheException(org.apache.geode.cache.CacheException) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) SelectResults(org.apache.geode.cache.query.SelectResults) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 82 with ClientCache

use of org.apache.geode.cache.client.ClientCache in project geode by apache.

the class PdxStringQueryDUnitTest method testPRQueryForDuplicates.

/**
   * Test to verify if duplicate results are not being accumulated when PdxString is used in PR
   * query
   * 
   * @throws CacheException
   */
@Test
public void testPRQueryForDuplicates() throws CacheException {
    final String regionName = "exampleRegion";
    final Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    final String name = "/" + regionName;
    final String[] qs = { "select distinct pkid from " + name, "select distinct pkid, status from " + name };
    // Start server1
    final int port1 = (Integer) vm0.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // Start server2
    final int port2 = (Integer) vm1.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // create client load data and execute queries
    vm2.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
            cf.addPoolServer(NetworkUtils.getServerHostName(vm1.getHost()), port2);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
            // Put Portfolios with 2 different pkids
            for (int set = 1; set <= 2; set++) {
                for (int current = 1; current <= 5; current++) {
                    region.put("key-" + set + "_" + current, new PortfolioPdx(set, current));
                }
            }
            for (int i = 0; i < qs.length; i++) {
                SelectResults sr = (SelectResults) cache.getQueryService().newQuery(qs[i]).execute();
                assertEquals("Did not get expected result from query: " + qs[i] + " ", 2, sr.size());
            }
            return null;
        }
    });
    // execute query on server by setting DefaultQuery.setPdxReadSerialized
    // to simulate remote query
    vm0.invoke(new SerializableCallable("Create server") {

        @Override
        public Object call() throws Exception {
            DefaultQuery.setPdxReadSerialized(true);
            try {
                for (int i = 0; i < qs.length; i++) {
                    SelectResults sr = (SelectResults) getCache().getQueryService().newQuery(qs[i]).execute();
                    assertEquals("Did not get expected result from query: " + qs[i] + " ", 2, sr.size());
                }
            } finally {
                DefaultQuery.setPdxReadSerialized(false);
            }
            return null;
        }
    });
    disconnectAllFromDS();
}
Also used : Host(org.apache.geode.test.dunit.Host) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) ClientCache(org.apache.geode.cache.client.ClientCache) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) SelectResults(org.apache.geode.cache.query.SelectResults) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 83 with ClientCache

use of org.apache.geode.cache.client.ClientCache in project geode by apache.

the class QueryUsingFunctionContextDUnitTest method createCacheClientWithoutReg.

private void createCacheClientWithoutReg(String host, Integer port1, Integer port2, Integer port3) {
    this.disconnectFromDS();
    ClientCache cache = new ClientCacheFactory().addPoolServer(host, port1).addPoolServer(host, port2).addPoolServer(host, port3).create();
}
Also used : ClientCache(org.apache.geode.cache.client.ClientCache) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory)

Example 84 with ClientCache

use of org.apache.geode.cache.client.ClientCache in project geode by apache.

the class PdxLocalQueryDUnitTest method testLocalPdxQueries.

@Test
public void testLocalPdxQueries() throws Exception {
    final Host host = Host.getHost(0);
    final VM server1 = host.getVM(1);
    final VM client = host.getVM(2);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String name2 = "/" + regionName2;
    final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive'", // 1
    "select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select p from " + name + " p where p.status = 'inactive'", // 4
    "select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
    "select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
    "select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
    "select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
    "select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
    "select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
    "select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
    "select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
    "select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
    " p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
    " p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
    "select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
    "select * from " + name + ".values v where v.status = 'inactive'", // 19
    "select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')", // 200
    "select * from " + name + " r1, " + name2 + " r2 where r1.status = r2.status", "select * from " + name + " r1, " + name2 + // 100
    " r2 where r1.status = r2.status and r1.status = 'active'", "select r2.status from " + name + " r1, " + name2 + // 100
    " r2 where r1.status = r2.status and r1.status = 'active'", "select distinct r2.status from " + name + " r1, " + name2 + // 1
    " r2 where r1.status = r2.status and r1.status = 'active'", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')" };
    final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, 200, 100, 100, 1, numberOfEntries };
    // Start server1
    final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            Region r2 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName2);
            for (int i = 0; i < numberOfEntries; i++) {
                PortfolioPdx p = new PortfolioPdx(i);
                r1.put("key-" + i, p);
                r2.put("key-" + i, p);
            }
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client loads pdx objects on server
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
            Region region2 = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName2);
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PortfolioPdx p = new PortfolioPdx(i);
                region.put("key-" + i, p);
                region2.put("key-" + i, p);
            }
            return null;
        }
    });
    // query locally on server1 to verify pdx objects are not deserialized
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            int extra = 0;
            if (cache.getLogger().fineEnabled()) {
                extra = 20;
            }
            assertEquals(numberOfEntries * 6 + 1 + extra, PortfolioPdx.numInstance);
            // set readserealized and query
            ((GemFireCacheImpl) getCache()).setReadSerialized(true);
            PdxInstanceFactory out = PdxInstanceFactoryImpl.newCreator("org.apache.geode.cache.query.data.PositionPdx", false);
            out.writeLong("avg20DaysVol", 0);
            out.writeString("bondRating", "");
            out.writeDouble("convRatio", 0);
            out.writeString("country", "");
            out.writeDouble("delta", 0);
            out.writeLong("industry", 0);
            out.writeLong("issuer", 0);
            out.writeDouble("mktValue", pos.getMktValue());
            out.writeDouble("qty", 0);
            out.writeString("secId", pos.secId);
            out.writeString("secIdIndexed", pos.secIdIndexed);
            out.writeString("secLinks", "");
            out.writeDouble("sharesOutstanding", pos.getSharesOutstanding());
            out.writeString("underlyer", "");
            out.writeLong("volatility", 0);
            out.writeInt("pid", pos.getPid());
            out.writeInt("portfolioId", 0);
            // Identity Field.
            out.markIdentityField("secId");
            PdxInstance pi = out.create();
            PdxInstanceEnum pdxEnum = new PdxInstanceEnum(pDay);
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pi });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pdxEnum });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    // in case of PortfolioPdx
                    if (queries[i].indexOf("distinct") == -1) {
                        if (i == 0 || i == 1) {
                            assertEquals("Expected and actual results do not match for query: " + queries[i], 1, sr.size());
                        } else {
                            assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // reset readserealized and query
            ((GemFireCacheImpl) getCache()).setReadSerialized(false);
            return null;
        }
    });
    // query from client
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query remotely
            try {
                qs = cache.getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                    for (Object result : sr) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            return null;
        }
    });
    // query locally on server1
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults[][] sr = new SelectResults[queries.length][2];
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            int cnt = PositionPdx.cnt;
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
                    for (Object result : sr[i][0]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // create index
            qs.createIndex("statusIndex", "status", name);
            qs.createIndex("IDIndex", "ID", name);
            qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
            qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
                    for (Object result : sr[i][1]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
            ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
            return null;
        }
    });
    this.closeClient(client);
    this.closeClient(server1);
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) PositionPdx(org.apache.geode.cache.query.data.PositionPdx) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) Struct(org.apache.geode.cache.query.Struct) PdxInstanceEnum(org.apache.geode.pdx.internal.PdxInstanceEnum) SelectResults(org.apache.geode.cache.query.SelectResults) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) PdxString(org.apache.geode.pdx.internal.PdxString) CacheException(org.apache.geode.cache.CacheException) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 85 with ClientCache

use of org.apache.geode.cache.client.ClientCache in project geode by apache.

the class LauncherLifecycleCommandsDUnitTest method test014GemFireServerJvmProcessTerminatesOnOutOfMemoryError.

@Test
public void test014GemFireServerJvmProcessTerminatesOnOutOfMemoryError() throws Exception {
    int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
    final int serverPort = ports[0];
    final int locatorPort = ports[1];
    String pathname = getClass().getSimpleName().concat("_").concat(getTestMethodName());
    File workingDirectory = temporaryFolder.newFolder(pathname);
    assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
    CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
    command.addOption(CliStrings.START_SERVER__NAME, pathname + TIMESTAMP.format(Calendar.getInstance().getTime()));
    command.addOption(CliStrings.START_SERVER__SERVER_PORT, String.valueOf(serverPort));
    command.addOption(CliStrings.START_SERVER__USE_CLUSTER_CONFIGURATION, Boolean.FALSE.toString());
    command.addOption(CliStrings.START_SERVER__MAXHEAP, "10M");
    command.addOption(CliStrings.START_SERVER__LOG_LEVEL, "config");
    command.addOption(CliStrings.START_SERVER__DIR, workingDirectory.getCanonicalPath());
    command.addOption(CliStrings.START_SERVER__CACHE_XML_FILE, IOUtils.tryGetCanonicalPathElseGetAbsolutePath(writeAndGetCacheXmlFile(workingDirectory)));
    command.addOption(CliStrings.START_SERVER__INCLUDE_SYSTEM_CLASSPATH);
    command.addOption(CliStrings.START_SERVER__J, "-D" + DistributionConfig.GEMFIRE_PREFIX + "" + START_LOCATOR + "=localhost[" + locatorPort + "]");
    CommandResult result = executeCommand(command.toString());
    System.out.println("result=" + result);
    assertNotNull(result);
    assertEquals(Result.Status.OK, result.getStatus());
    ServerLauncher serverLauncher = new ServerLauncher.Builder().setCommand(ServerLauncher.Command.STATUS).setWorkingDirectory(IOUtils.tryGetCanonicalPathElseGetAbsolutePath(workingDirectory)).build();
    assertNotNull(serverLauncher);
    ServerState serverState = serverLauncher.status();
    assertNotNull(serverState);
    assertEquals(Status.ONLINE, serverState.getStatus());
    // Verify our GemFire Server JVM process is running!
    assertTrue(serverState.isVmWithProcessIdRunning());
    ClientCache clientCache = setupClientCache(pathname + String.valueOf(serverPort), serverPort);
    assertNotNull(clientCache);
    try {
        Region<Long, String> exampleRegion = clientCache.getRegion("/Example");
        // run the GemFire Server "out-of-town" with an OutOfMemoryError!
        for (long index = 0; index < Long.MAX_VALUE; index++) {
            exampleRegion.put(index, String.valueOf(index));
        }
    } catch (Exception ignore) {
        System.err.printf("%1$s: %2$s%n", ignore.getClass().getName(), ignore.getMessage());
    } finally {
        clientCache.close();
        final int serverPid = serverState.getPid();
        WaitCriterion waitCriteria = new WaitCriterion() {

            private LauncherLifecycleCommands launcherLifecycleCommands = new LauncherLifecycleCommands();

            @Override
            public boolean done() {
                return !ProcessUtils.isProcessAlive(serverPid);
            }

            @Override
            public String description() {
                return "Wait for the GemFire Server JVM process that ran out-of-memory to exit.";
            }
        };
        waitForCriterion(waitCriteria, TimeUnit.SECONDS.toMillis(30), TimeUnit.SECONDS.toMillis(10), true);
        // Verify our GemFire Server JVM process is was terminated!
        assertFalse(serverState.isVmWithProcessIdRunning());
        serverState = serverLauncher.status();
        assertNotNull(serverState);
        assertEquals(Status.NOT_RESPONDING, serverState.getStatus());
    }
}
Also used : ServerState(org.apache.geode.distributed.ServerLauncher.ServerState) ClientCache(org.apache.geode.cache.client.ClientCache) IOException(java.io.IOException) CommandResult(org.apache.geode.management.internal.cli.result.CommandResult) ServerLauncher(org.apache.geode.distributed.ServerLauncher) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) CommandStringBuilder(org.apache.geode.management.internal.cli.util.CommandStringBuilder) File(java.io.File) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

ClientCache (org.apache.geode.cache.client.ClientCache)112 Test (org.junit.Test)74 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)73 ClientCacheFactory (org.apache.geode.cache.client.ClientCacheFactory)65 Region (org.apache.geode.cache.Region)64 VM (org.apache.geode.test.dunit.VM)43 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)42 Host (org.apache.geode.test.dunit.Host)40 SecurityTest (org.apache.geode.test.junit.categories.SecurityTest)31 QueryService (org.apache.geode.cache.query.QueryService)26 SelectResults (org.apache.geode.cache.query.SelectResults)25 SecurityTestUtil.createClientCache (org.apache.geode.security.SecurityTestUtil.createClientCache)25 SecurityTestUtil.createProxyRegion (org.apache.geode.security.SecurityTestUtil.createProxyRegion)23 CacheServer (org.apache.geode.cache.server.CacheServer)22 Cache (org.apache.geode.cache.Cache)20 CacheException (org.apache.geode.cache.CacheException)15 PortfolioPdx (org.apache.geode.cache.query.data.PortfolioPdx)13 IOException (java.io.IOException)12 Properties (java.util.Properties)12 IgnoredException (org.apache.geode.test.dunit.IgnoredException)12