Search in sources :

Example 66 with Struct

use of org.apache.geode.cache.query.Struct in project geode by apache.

the class CopyOnReadIndexDUnitTest method helpTestTransactionsOnReplicatedRegion.

public void helpTestTransactionsOnReplicatedRegion(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
    resetInstanceCount(vm0);
    resetInstanceCount(vm1);
    resetInstanceCount(vm2);
    createReplicatedRegion(vm0, "portfolios");
    createReplicatedRegion(vm1, "portfolios");
    createReplicatedRegion(vm2, "portfolios");
    // counts
    if (hasIndex) {
        vm0.invoke(new SerializableCallable() {

            public Object call() throws Exception {
                QueryTestUtils utils = new QueryTestUtils();
                utils.createHashIndex("idIndex", "p.ID", "/portfolios p");
                return null;
            }
        });
        // let's not create index on vm1 to check different scenarios
        vm2.invoke(new SerializableCallable() {

            public Object call() throws Exception {
                QueryTestUtils utils = new QueryTestUtils();
                utils.createHashIndex("idIndex", "p.ID", "/portfolios p");
                return null;
            }
        });
    }
    vm0.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            Region region = getCache().getRegion("/portfolios");
            for (int i = 0; i < numPortfolios; i++) {
                Portfolio p = new Portfolio(i);
                p.status = "testStatus";
                p.positions = new HashMap();
                p.positions.put("" + i, new Position("" + i, 20));
                region.put("key " + i, p);
            }
            // We should have the same number of portfolio objects that we created for the put
            Wait.waitForCriterion(verifyPortfolioCount(numPortfolios), 5000, 200, true);
            return null;
        }
    });
    vm1.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            // At this point, we should only have serialized values in this vm
            Region region = getCache().getRegion("/portfolios");
            Wait.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
            return null;
        }
    });
    vm2.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            // There is an index for vm2, so we should have deserialized values at this point,
            Region region = getCache().getRegion("/portfolios");
            if (hasIndex) {
                Wait.waitForCriterion(verifyPortfolioCount(numPortfolios), 0, 200, true);
            } else {
                Wait.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
            }
            return null;
        }
    });
    // start transaction
    // execute query
    // modify results
    // check instance count
    vm0.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            Region region = getCache().getRegion("/portfolios");
            CacheTransactionManager txManager = region.getCache().getCacheTransactionManager();
            try {
                txManager.begin();
                QueryService qs = getCache().getQueryService();
                Query query = qs.newQuery(queryString);
                SelectResults results = (SelectResults) query.execute();
                assertEquals(numExpectedResults, results.size());
                for (Object o : results) {
                    if (o instanceof Portfolio) {
                        Portfolio p = (Portfolio) o;
                        p.status = "discardStatus";
                    } else {
                        Struct struct = (Struct) o;
                        Portfolio p = (Portfolio) struct.getFieldValues()[0];
                        p.status = "discardStatus";
                    }
                }
                txManager.commit();
            } catch (CommitConflictException conflict) {
                Assert.fail("commit conflict exception", conflict);
            }
            // We have created puts from our previous callable
            // Now we have copied the results from the query
            Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
            return null;
        }
    });
    // Check objects in cache on vm1
    vm1.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            Region region = getCache().getRegion("/portfolios");
            QueryService qs = getCache().getQueryService();
            Query query = qs.newQuery(queryString);
            SelectResults results = (SelectResults) query.execute();
            assertEquals(numExpectedResults, results.size());
            for (Object o : results) {
                if (o instanceof Portfolio) {
                    Portfolio p = (Portfolio) o;
                    assertEquals("status should not have been changed", "testStatus", p.status);
                    p.status = "discardStatus";
                } else {
                    Struct struct = (Struct) o;
                    Portfolio p = (Portfolio) struct.getFieldValues()[0];
                    assertEquals("status should not have been changed", "testStatus", p.status);
                    p.status = "discardStatus";
                }
            }
            // first it must deserialize the portfolios in the replicated region
            // then we do a copy on read of these deserialized objects for the final result set
            Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
            results = (SelectResults) query.execute();
            assertEquals(numExpectedResults, results.size());
            for (Object o : results) {
                if (o instanceof Portfolio) {
                    Portfolio p = (Portfolio) o;
                    assertEquals("status should not have been changed", "testStatus", p.status);
                } else {
                    Struct struct = (Struct) o;
                    Portfolio p = (Portfolio) struct.getFieldValues()[0];
                    assertEquals("status should not have been changed", "testStatus", p.status);
                }
            }
            // we never created index on vm1
            // so in this case, we always have to deserialize the value from the region
            Wait.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);
            return null;
        }
    });
    // Check objects in cache on vm2
    vm2.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            Region region = getCache().getRegion("/portfolios");
            QueryService qs = getCache().getQueryService();
            Query query = qs.newQuery(queryString);
            SelectResults results = (SelectResults) query.execute();
            assertEquals(numExpectedResults, results.size());
            for (Object o : results) {
                if (o instanceof Portfolio) {
                    Portfolio p = (Portfolio) o;
                    assertEquals("status should not have been changed", "testStatus", p.status);
                    p.status = "discardStatus";
                } else {
                    Struct struct = (Struct) o;
                    Portfolio p = (Portfolio) struct.getFieldValues()[0];
                    assertEquals("status should not have been changed", "testStatus", p.status);
                    p.status = "discardStatus";
                }
            }
            // with or without index, the values had to have been deserialized at one point
            Wait.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults), 0, 200, true);
            results = (SelectResults) query.execute();
            assertEquals(numExpectedResults, results.size());
            for (Object o : results) {
                if (o instanceof Portfolio) {
                    Portfolio p = (Portfolio) o;
                    assertEquals("status should not have been changed", "testStatus", p.status);
                } else {
                    Struct struct = (Struct) o;
                    Portfolio p = (Portfolio) struct.getFieldValues()[0];
                    assertEquals("status should not have been changed", "testStatus", p.status);
                }
            }
            if (hasIndex) {
                // we have an index, so the values are already deserialized
                // total is now our original deserialization amount : numPortfolios
                // two query results copied.
                Wait.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults * 2), 0, 200, true);
            } else {
                // we never created index on vm1
                // so in this case, we always have to deserialize the value from the region
                Wait.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);
            }
            return null;
        }
    });
    // Check objects in cache on vm0
    vm0.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            Region region = getCache().getRegion("/portfolios");
            QueryService qs = getCache().getQueryService();
            Query query = qs.newQuery(queryString);
            SelectResults results = (SelectResults) query.execute();
            assertEquals(numExpectedResults, results.size());
            for (Object o : results) {
                if (o instanceof Portfolio) {
                    Portfolio p = (Portfolio) o;
                    assertEquals("status should not have been changed", "testStatus", p.status);
                } else {
                    Struct struct = (Struct) o;
                    Portfolio p = (Portfolio) struct.getFieldValues()[0];
                    assertEquals("status should not have been changed", "testStatus", p.status);
                }
            }
            // with or without index, the values we put in the region were already deserialized values
            Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults * 2 + numPortfolios), 0, 200, true);
            return null;
        }
    });
    destroyRegion("portfolio", vm0);
}
Also used : Query(org.apache.geode.cache.query.Query) HashMap(java.util.HashMap) Position(org.apache.geode.cache.query.data.Position) Portfolio(org.apache.geode.cache.query.data.Portfolio) CacheException(org.apache.geode.cache.CacheException) CommitConflictException(org.apache.geode.cache.CommitConflictException) CacheTransactionManager(org.apache.geode.cache.CacheTransactionManager) Struct(org.apache.geode.cache.query.Struct) CommitConflictException(org.apache.geode.cache.CommitConflictException) SelectResults(org.apache.geode.cache.query.SelectResults) QueryService(org.apache.geode.cache.query.QueryService) QueryTestUtils(org.apache.geode.cache.query.QueryTestUtils) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion)

Example 67 with Struct

use of org.apache.geode.cache.query.Struct in project geode by apache.

the class PdxQueryDUnitTest method testClientServerQueryWithRangeIndex.

/**
   * Tests client-server query on PdxInstance.
   */
@Test
public void testClientServerQueryWithRangeIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    final String[] qs = new String[] { "SELECT * FROM " + regName + " p WHERE p.ID > 0", "SELECT p FROM " + regName + " p WHERE p.ID > 0", "SELECT * FROM " + regName + " p WHERE p.ID = 1", "SELECT * FROM " + regName + " p WHERE p.ID < 10", "SELECT * FROM " + regName + " p WHERE p.ID != 10", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE p.ID > 0", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE p.ID = 10", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE p.ID > 0", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE p.ID = 10", "SELECT pos FROM " + regName + " p, p.positions.values pos WHERE p.ID > 0", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId != 'XXX'", "SELECT pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId != 'XXX'", "SELECT pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'SUN'", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'SUN'", "SELECT p, pos FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'DELL'", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'SUN'", "SELECT * FROM " + regName + " p, p.positions.values pos WHERE pos.secId = 'DELL'", "SELECT p, p.position1 FROM " + regName + " p where p.position1.secId != 'XXX'", "SELECT p, p.position1 FROM " + regName + " p where p.position1.secId = 'SUN'", "SELECT p.position1 FROM " + regName + " p WHERE p.ID > 0", "SELECT * FROM " + regName + " p WHERE p.status = 'active'", "SELECT p FROM " + regName + " p WHERE p.status != 'active'" };
    // Start server1
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Async index
            configAndStartBridgeServer(false, false, true, null);
            Region region = getRootRegion().getSubregion(regionName);
            // Create Range index.
            QueryService qs = getCache().getQueryService();
            try {
                qs.createIndex("idIndex", "p.ID", regName + " p");
                qs.createIndex("statusIndex", "p.status", regName + " p");
                qs.createIndex("secIdIndex", "pos.secId", regName + " p, p.positions.values pos");
                qs.createIndex("pSecIdIdIndex", "p.position1.secId", regName + " p");
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    vm1.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Async index
            configAndStartBridgeServer(false, false, true, null);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = vm0.invoke(() -> PdxQueryDUnitTest.getCacheServerPort());
    final int port1 = vm1.invoke(() -> PdxQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(vm2, poolName, new String[] { host0 }, new int[] { port0 }, true);
    createPool(vm3, poolName, new String[] { host0 }, new int[] { port1 }, true);
    // Create client region
    vm3.invoke(new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            int j = 0;
            for (int i = 0; i < 100; i++) {
                region.put("key-" + i, new PortfolioPdx(j, j++));
                // To add duplicate:
                if (i % 24 == 0) {
                    // reset
                    j = 0;
                }
            }
        }
    });
    // Execute query and make sure there is no PdxInstance in the results.
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Execute query locally.
            QueryService queryService = getCache().getQueryService();
            for (int i = 0; i < qs.length; i++) {
                try {
                    Query query = queryService.newQuery(qs[i]);
                    SelectResults results = (SelectResults) query.execute();
                    for (Object o : results.asList()) {
                        if (o instanceof Struct) {
                            Object[] values = ((Struct) o).getFieldValues();
                            for (int c = 0; c < values.length; c++) {
                                if (values[c] instanceof PdxInstance) {
                                    fail("Found unexpected PdxInstance in the query results. At struct field [" + c + "] query :" + qs[i] + " Object is: " + values[c]);
                                }
                            }
                        } else {
                            if (o instanceof PdxInstance) {
                                fail("Found unexpected PdxInstance in the query results. " + qs[i]);
                            }
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing " + qs[i], e);
                }
            }
        }
    });
    // Re-execute query to fetch PdxInstance in the results.
    vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            // Execute query locally.
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            try {
                QueryService queryService = getCache().getQueryService();
                for (int i = 0; i < qs.length; i++) {
                    try {
                        Query query = queryService.newQuery(qs[i]);
                        SelectResults results = (SelectResults) query.execute();
                        for (Object o : results.asList()) {
                            if (o instanceof Struct) {
                                Object[] values = ((Struct) o).getFieldValues();
                                for (int c = 0; c < values.length; c++) {
                                    if (!(values[c] instanceof PdxInstance)) {
                                        fail("Didn't found expected PdxInstance in the query results. At struct field [" + c + "] query :" + qs[i] + " Object is: " + values[c]);
                                    }
                                }
                            } else {
                                if (!(o instanceof PdxInstance)) {
                                    fail("Didn't found expected PdxInstance in the query results. " + qs[i] + " Object is: " + o);
                                }
                            }
                        }
                    } catch (Exception e) {
                        Assert.fail("Failed executing " + qs[i], e);
                    }
                }
            } finally {
                cache.setReadSerialized(false);
            }
        }
    });
    this.closeClient(vm2);
    this.closeClient(vm3);
    this.closeClient(vm1);
    this.closeClient(vm0);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) CacheException(org.apache.geode.cache.CacheException) Struct(org.apache.geode.cache.query.Struct) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 68 with Struct

use of org.apache.geode.cache.query.Struct in project geode by apache.

the class PdxLocalQueryDUnitTest method testLocalPdxQueries.

@Test
public void testLocalPdxQueries() throws Exception {
    final Host host = Host.getHost(0);
    final VM server1 = host.getVM(1);
    final VM client = host.getVM(2);
    final int numberOfEntries = 10;
    final String name = "/" + regionName;
    final String name2 = "/" + regionName2;
    final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive'", // 1
    "select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select p from " + name + " p where p.status = 'inactive'", // 4
    "select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
    "select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
    "select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
    "select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
    "select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
    "select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
    "select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
    "select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
    "select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
    "select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
    " p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
    " p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
    "select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
    "select * from " + name + ".values v where v.status = 'inactive'", // 19
    "select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')", // 200
    "select * from " + name + " r1, " + name2 + " r2 where r1.status = r2.status", "select * from " + name + " r1, " + name2 + // 100
    " r2 where r1.status = r2.status and r1.status = 'active'", "select r2.status from " + name + " r1, " + name2 + // 100
    " r2 where r1.status = r2.status and r1.status = 'active'", "select distinct r2.status from " + name + " r1, " + name2 + // 1
    " r2 where r1.status = r2.status and r1.status = 'active'", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
    " p where p.status = 'inactive')" };
    final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, 200, 100, 100, 1, numberOfEntries };
    // Start server1
    final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {

        @Override
        public Object call() throws Exception {
            Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
            Region r2 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName2);
            for (int i = 0; i < numberOfEntries; i++) {
                PortfolioPdx p = new PortfolioPdx(i);
                r1.put("key-" + i, p);
                r2.put("key-" + i, p);
            }
            CacheServer server = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
            server.setPort(port);
            server.start();
            return port;
        }
    });
    // client loads pdx objects on server
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
            Region region2 = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName2);
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                PortfolioPdx p = new PortfolioPdx(i);
                region.put("key-" + i, p);
                region2.put("key-" + i, p);
            }
            return null;
        }
    });
    // query locally on server1 to verify pdx objects are not deserialized
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            int extra = 0;
            if (cache.getLogger().fineEnabled()) {
                extra = 20;
            }
            assertEquals(numberOfEntries * 6 + 1 + extra, PortfolioPdx.numInstance);
            // set readserealized and query
            ((GemFireCacheImpl) getCache()).setReadSerialized(true);
            PdxInstanceFactory out = PdxInstanceFactoryImpl.newCreator("org.apache.geode.cache.query.data.PositionPdx", false);
            out.writeLong("avg20DaysVol", 0);
            out.writeString("bondRating", "");
            out.writeDouble("convRatio", 0);
            out.writeString("country", "");
            out.writeDouble("delta", 0);
            out.writeLong("industry", 0);
            out.writeLong("issuer", 0);
            out.writeDouble("mktValue", pos.getMktValue());
            out.writeDouble("qty", 0);
            out.writeString("secId", pos.secId);
            out.writeString("secIdIndexed", pos.secIdIndexed);
            out.writeString("secLinks", "");
            out.writeDouble("sharesOutstanding", pos.getSharesOutstanding());
            out.writeString("underlyer", "");
            out.writeLong("volatility", 0);
            out.writeInt("pid", pos.getPid());
            out.writeInt("portfolioId", 0);
            // Identity Field.
            out.markIdentityField("secId");
            PdxInstance pi = out.create();
            PdxInstanceEnum pdxEnum = new PdxInstanceEnum(pDay);
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pi });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pdxEnum });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    // in case of PortfolioPdx
                    if (queries[i].indexOf("distinct") == -1) {
                        if (i == 0 || i == 1) {
                            assertEquals("Expected and actual results do not match for query: " + queries[i], 1, sr.size());
                        } else {
                            assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // reset readserealized and query
            ((GemFireCacheImpl) getCache()).setReadSerialized(false);
            return null;
        }
    });
    // query from client
    client.invoke(new SerializableCallable("Create client") {

        @Override
        public Object call() throws Exception {
            ClientCacheFactory cf = new ClientCacheFactory();
            cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
            ClientCache cache = getClientCache(cf);
            QueryService qs = null;
            SelectResults sr = null;
            // Execute query remotely
            try {
                qs = cache.getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
                    for (Object result : sr) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            return null;
        }
    });
    // query locally on server1
    server1.invoke(new SerializableCallable("query locally on server1") {

        @Override
        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            QueryService qs = null;
            SelectResults[][] sr = new SelectResults[queries.length][2];
            // Execute query locally
            try {
                qs = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            int cnt = PositionPdx.cnt;
            PositionPdx pos = new PositionPdx("IBM", 100);
            PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
                    for (Object result : sr[i][0]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            // create index
            qs.createIndex("statusIndex", "status", name);
            qs.createIndex("IDIndex", "ID", name);
            qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
            qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
            for (int i = 0; i < queries.length; i++) {
                try {
                    if (i == 0) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
                    } else if (i == 1) {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
                    } else {
                        sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
                    }
                    assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
                    assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
                    for (Object result : sr[i][1]) {
                        if (result instanceof Struct) {
                            Object[] r = ((Struct) result).getFieldValues();
                            for (int j = 0; j < r.length; j++) {
                                if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
                                    fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
                                }
                            }
                        } else if (result instanceof PdxInstance || result instanceof PdxString) {
                            fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
                        }
                    }
                } catch (Exception e) {
                    Assert.fail("Failed executing query " + queries[i], e);
                }
            }
            StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
            ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
            return null;
        }
    });
    this.closeClient(client);
    this.closeClient(server1);
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) PositionPdx(org.apache.geode.cache.query.data.PositionPdx) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) Struct(org.apache.geode.cache.query.Struct) PdxInstanceEnum(org.apache.geode.pdx.internal.PdxInstanceEnum) SelectResults(org.apache.geode.cache.query.SelectResults) CacheServer(org.apache.geode.cache.server.CacheServer) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) Host(org.apache.geode.test.dunit.Host) ClientCache(org.apache.geode.cache.client.ClientCache) PdxString(org.apache.geode.pdx.internal.PdxString) CacheException(org.apache.geode.cache.CacheException) PdxInstance(org.apache.geode.pdx.PdxInstance) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Region(org.apache.geode.cache.Region) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 69 with Struct

use of org.apache.geode.cache.query.Struct in project geode by apache.

the class QueryUtils method setIndexFieldValuesInRespectiveIterators.

// TODO: Explain the function & write test cases. A boolean false means by pass i.e the set value
// to be ignored
// End result if we have not already expanded is that we have created a new struct and added to a
// set to prevent future expansions of the same object
// It also advances the current object for the iterator.
private static boolean setIndexFieldValuesInRespectiveIterators(Object value, RuntimeIterator[] indexFieldToItrsMapping, IndexCutDownExpansionHelper icdeh) {
    boolean select = true;
    int len = indexFieldToItrsMapping.length;
    if (len == 1) {
        // this means we have a ResultSet
        Support.Assert(!icdeh.cutDownNeeded, "If the index fields to iter mapping is of of size 1 then cut down cannot occur");
        indexFieldToItrsMapping[0].setCurrent(value);
    } else {
        Struct struct = (Struct) value;
        Object[] fieldValues = struct.getFieldValues();
        int size = fieldValues.length;
        Object[] checkFields = null;
        if (icdeh.cutDownNeeded)
            checkFields = new Object[icdeh.checkSize];
        // Object values[] = new Object[numItersInResultSet];
        int j = 0;
        RuntimeIterator rItr = null;
        for (int i = 0; i < size; i++) {
            rItr = indexFieldToItrsMapping[i];
            if (rItr != null) {
                rItr.setCurrent(fieldValues[i]);
                if (icdeh.cutDownNeeded) {
                    checkFields[j++] = fieldValues[i];
                }
            }
        }
        if (icdeh.cutDownNeeded) {
            Object temp = null;
            if (icdeh.checkSize == 1) {
                temp = checkFields[0];
            } else {
                temp = new StructImpl((StructTypeImpl) icdeh.checkType, checkFields);
            }
            if (icdeh.checkSet.contains(temp)) {
                select = false;
            } else {
                icdeh.checkSet.add(temp);
            }
        }
    }
    return select;
}
Also used : StructTypeImpl(org.apache.geode.cache.query.internal.types.StructTypeImpl) Struct(org.apache.geode.cache.query.Struct)

Example 70 with Struct

use of org.apache.geode.cache.query.Struct in project geode by apache.

the class NWayMergeResults method toData.

// TODO : optimize for struct elements , by directly writing the fields
// instead
// of struct
@Override
public void toData(DataOutput out) throws IOException {
    boolean isStruct = this.collectionType.getElementType().isStructType();
    DataSerializer.writeObject(this.collectionType.getElementType(), out);
    DataSerializer.writePrimitiveBoolean(this.isDistinct, out);
    HeapDataOutputStream hdos = new HeapDataOutputStream(1024, null);
    LongUpdater lu = hdos.reserveLong();
    Iterator<E> iter = this.iterator();
    int numElements = 0;
    while (iter.hasNext()) {
        E data = iter.next();
        if (isStruct) {
            Object[] fields = ((Struct) data).getFieldValues();
            DataSerializer.writeObjectArray(fields, out);
        } else {
            DataSerializer.writeObject(data, hdos);
        }
        ++numElements;
    }
    lu.update(numElements);
    hdos.sendTo(out);
}
Also used : LongUpdater(org.apache.geode.internal.HeapDataOutputStream.LongUpdater) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) Struct(org.apache.geode.cache.query.Struct)

Aggregations

Struct (org.apache.geode.cache.query.Struct)110 SelectResults (org.apache.geode.cache.query.SelectResults)80 QueryService (org.apache.geode.cache.query.QueryService)70 Region (org.apache.geode.cache.Region)67 Test (org.junit.Test)66 Query (org.apache.geode.cache.query.Query)57 Iterator (java.util.Iterator)54 Portfolio (org.apache.geode.cache.query.data.Portfolio)46 DefaultQuery (org.apache.geode.cache.query.internal.DefaultQuery)30 ObjectType (org.apache.geode.cache.query.types.ObjectType)30 CompiledSelect (org.apache.geode.cache.query.internal.CompiledSelect)27 StructType (org.apache.geode.cache.query.types.StructType)26 HashSet (java.util.HashSet)19 List (java.util.List)17 PortfolioPdx (org.apache.geode.cache.query.data.PortfolioPdx)17 CacheException (org.apache.geode.cache.CacheException)16 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)16 ArrayList (java.util.ArrayList)15 Host (org.apache.geode.test.dunit.Host)14 VM (org.apache.geode.test.dunit.VM)14