Search in sources :

Example 26 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class ConnectionPoolDUnitTest method basicTestBridgeServerFailover.

private void basicTestBridgeServerFailover(final int cnxCount) throws CacheException {
    final String name = this.getName();
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    // Create two bridge servers
    SerializableRunnable createCacheServer = new CacheSerializableRunnable("Create Cache Server") {

        public void run2() throws CacheException {
            AttributesFactory factory = getBridgeServerRegionAttributes(null, null);
            createRegion(name, factory.create());
            // pause(1000);
            try {
                startBridgeServer(0);
            } catch (Exception ex) {
                org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
            }
        }
    };
    vm0.invoke(createCacheServer);
    vm1.invoke(createCacheServer);
    final int port0 = vm0.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
    final int port1 = vm1.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
    // final String host1 = getServerHostName(vm1.getHost());
    // Create one bridge client in this VM
    SerializableRunnable create = new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            getLonerSystem();
            getCache();
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            factory.setConcurrencyChecksEnabled(false);
            ClientServerTestCase.configureConnectionPool(factory, host0, port0, port1, true, -1, cnxCount, null, 100);
            Region region = createRegion(name, factory.create());
            // force connections to form
            region.put("keyInit", new Integer(0));
            region.put("keyInit2", new Integer(0));
        }
    };
    vm2.invoke(create);
    // Launch async thread that puts objects into cache. This thread will execute until
    // the test has ended (which is why the RegionDestroyedException and CacheClosedException
    // are caught and ignored. If any other exception occurs, the test will fail. See
    // the putAI.exceptionOccurred() assertion below.
    AsyncInvocation putAI = vm2.invokeAsync(new CacheSerializableRunnable("Put objects") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                for (int i = 0; i < 100000; i++) {
                    region.put("keyAI", new Integer(i));
                    try {
                        Thread.sleep(100);
                    } catch (InterruptedException ie) {
                        fail("interrupted");
                    }
                }
            } catch (NoAvailableServersException ignore) {
            /* ignore */
            } catch (RegionDestroyedException e) {
            // will be thrown when the test ends
            /* ignore */
            } catch (CancelException e) {
            // will be thrown when the test ends
            /* ignore */
            }
        }
    });
    SerializableRunnable verify1Server = new CacheSerializableRunnable("verify1Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            PoolImpl pool = getPool(region);
            verifyServerCount(pool, 1);
        }
    };
    SerializableRunnable verify2Servers = new CacheSerializableRunnable("verify2Servers") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            PoolImpl pool = getPool(region);
            verifyServerCount(pool, 2);
        }
    };
    vm2.invoke(verify2Servers);
    SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {

        public void run() {
            stopBridgeServer(getCache());
        }
    };
    final String expected = "java.io.IOException";
    final String addExpected = "<ExpectedException action=add>" + expected + "</ExpectedException>";
    final String removeExpected = "<ExpectedException action=remove>" + expected + "</ExpectedException>";
    vm2.invoke(new SerializableRunnable() {

        public void run() {
            LogWriter bgexecLogger = new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
            bgexecLogger.info(addExpected);
        }
    });
    try {
        // make sure we removeExpected
        // Bounce the non-current server (I know that VM1 contains the non-current server
        // because ...
        vm1.invoke(stopCacheServer);
        vm2.invoke(verify1Server);
        final int restartPort = port1;
        vm1.invoke(new SerializableRunnable("Restart CacheServer") {

            public void run() {
                try {
                    Region region = getRootRegion().getSubregion(name);
                    assertNotNull(region);
                    startBridgeServer(restartPort);
                } catch (Exception e) {
                    getSystem().getLogWriter().fine(new Exception(e));
                    org.apache.geode.test.dunit.Assert.fail("Failed to start CacheServer", e);
                }
            }
        });
        // Pause long enough for the monitor to realize the server has been bounced
        // and reconnect to it.
        vm2.invoke(verify2Servers);
    } finally {
        vm2.invoke(new SerializableRunnable() {

            public void run() {
                LogWriter bgexecLogger = new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
                bgexecLogger.info(removeExpected);
            }
        });
    }
    // Stop the other cache server
    vm0.invoke(stopCacheServer);
    // Run awhile
    vm2.invoke(verify1Server);
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(// FIXME
    "FIXME: this thread does not terminate");
    // // Verify that no exception has occurred in the putter thread
    // join(putAI, 5 * 60 * 1000, getLogWriter());
    // //assertTrue("Exception occurred while invoking " + putAI, !putAI.exceptionOccurred());
    // if (putAI.exceptionOccurred()) {
    // fail("While putting entries: ", putAI.getException());
    // }
    // Close Pool
    vm2.invoke(new CacheSerializableRunnable("Close Pool") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.localDestroyRegion();
        }
    });
    // Stop the last cache server
    vm1.invoke(stopCacheServer);
}
Also used : NoAvailableServersException(org.apache.geode.cache.client.NoAvailableServersException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) PoolImpl(org.apache.geode.cache.client.internal.PoolImpl) LocalLogWriter(org.apache.geode.internal.logging.LocalLogWriter) NoAvailableServersException(org.apache.geode.cache.client.NoAvailableServersException) CancelException(org.apache.geode.CancelException) IOException(java.io.IOException) Endpoint(org.apache.geode.cache.client.internal.Endpoint) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LocalLogWriter(org.apache.geode.internal.logging.LocalLogWriter) InternalLogWriter(org.apache.geode.internal.logging.InternalLogWriter) LogWriter(org.apache.geode.LogWriter) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) CancelException(org.apache.geode.CancelException)

Example 27 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPROrderByQueryWithLimit.

public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryWithLimit(final String regionName, final String localRegion) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the localRegion and the PR region
            String[] queries = new String[] { "status as st from /REGION_NAME order by status", "p.status from /REGION_NAME p order by p.status", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId, p.ID desc", "key from /REGION_NAME.keys key order by key.status, key.ID", "key.ID from /REGION_NAME.keys key order by key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID asc", "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID", "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID", "p.ID from /REGION_NAME p, p.positions.values order by p.ID", "* from /REGION_NAME p, p.positions.values val order by p.ID, val.secId", "p.iD, p.status from /REGION_NAME p order by p.iD", "iD, status from /REGION_NAME order by iD", "* from /REGION_NAME p order by p.getID()", "* from /REGION_NAME p order by p.getP1().secId, p.ID desc, p.ID", " p.position1.secId , p.ID as st from /REGION_NAME p order by p.position1.secId, p.ID", "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key from /REGION_NAME.entrySet e order by e.key.ID, e.key.pkid desc", "p, pos from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId desc", "p, pos from /REGION_NAME p, p.positions.values pos order by pos.secId, p.ID", "status , ID as ied from /REGION_NAME where ID > 0 order by status, ID desc", "p.status as st, p.ID as id from /REGION_NAME p where ID > 0 and status = 'inactive' order by p.status, p.ID desc", "p.position1.secId as st, p.ID as ied from /REGION_NAME p where p.ID > 0 and p.position1.secId != 'IBM' order by p.position1.secId, p.ID", " key.status as st, key.ID from /REGION_NAME.keys key where key.ID > 5 order by key.status, key.ID desc", " key.ID, key.status as st from /REGION_NAME.keys key where key.status = 'inactive' order by key.status desc, key.ID" };
            Object[][] r = new Object[queries.length][2];
            Region local = cache.getRegion(localRegion);
            Region region = cache.getRegion(regionName);
            assertNotNull(region);
            final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
            for (final String expectedException : expectedExceptions) {
                getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
            }
            String distinct = "<TRACE>SELECT DISTINCT ";
            QueryService qs = getCache().getQueryService();
            Object[] params;
            try {
                for (int l = 1; l <= 3; l++) {
                    String[] rq = new String[queries.length];
                    for (int j = 0; j < queries.length; j++) {
                        String qStr = null;
                        synchronized (region) {
                            // Execute on local region.
                            qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
                            qStr += (" LIMIT " + (l * l));
                            rq[j] = qStr;
                            SelectResults sr = (SelectResults) qs.newQuery(qStr).execute();
                            r[j][0] = sr;
                            if (sr.asList().size() > l * l) {
                                fail("The resultset size exceeds limit size. Limit size=" + l * l + ", result size =" + sr.asList().size());
                            }
                            // Execute on remote region.
                            qStr = (distinct + queries[j].replace("REGION_NAME", regionName));
                            qStr += (" LIMIT " + (l * l));
                            rq[j] = qStr;
                            SelectResults srr = (SelectResults) qs.newQuery(qStr).execute();
                            r[j][1] = srr;
                            if (srr.size() > l * l) {
                                fail("The resultset size exceeds limit size. Limit size=" + l * l + ", result size =" + srr.asList().size());
                            }
                        // assertIndexDetailsEquals("The resultset size is not same as limit size.", l*l,
                        // srr.asList().size());
                        // getCache().getLogger().info("Finished executing PR query: " + qStr);
                        }
                    }
                    StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
                    ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length, true, rq);
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
            } catch (QueryInvocationTargetException e) {
                // not it's okay
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (RegionDestroyedException rde) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
            } catch (CancelException cce) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
            } finally {
                for (final String expectedException : expectedExceptions) {
                    getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
                }
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) TestException(util.TestException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache)

Example 28 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPROrderByQueryAndCompareResults.

public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndCompareResults(final String regionName, final String localRegion) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the localRegion and the PR region
            String[] queries = new String[] { "p.status from /REGION_NAME p order by p.status", "* from /REGION_NAME order by status, ID desc", "status, ID from /REGION_NAME order by status", "p.status, p.ID from /REGION_NAME p order by p.status", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId", "key from /REGION_NAME.keys key order by key.status", "key.ID from /REGION_NAME.keys key order by key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID desc", "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID", "* from /REGION_NAME p order by p.status, p.ID", "p.ID from /REGION_NAME p, p.positions.values order by p.ID", "* from /REGION_NAME p, p.positions.values order by p.ID", "p.ID, p.status from /REGION_NAME p, p.positions.values order by p.status", "pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId", "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId", "* from /REGION_NAME p order by p.iD", "p.iD from /REGION_NAME p order by p.iD", "p.iD, p.status from /REGION_NAME p order by p.iD", "iD, status from /REGION_NAME order by iD", "* from /REGION_NAME p order by p.getID()", "p.getID() from /REGION_NAME p order by p.getID()", "* from /REGION_NAME p order by p.names[1]", "* from /REGION_NAME p order by p.getP1().secId", "* from /REGION_NAME p order by p.getP1().getSecId()", "* from /REGION_NAME p order by p.position1.secId", "p.ID, p.position1.secId from /REGION_NAME p order by p.position1.secId", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId", "e.key.ID from /REGION_NAME.entries e order by e.key.ID", "e.key.ID, e.value.status from /REGION_NAME.entries e order by e.key.ID", "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key, e.value from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key from /REGION_NAME.entrySet e order by e.key.ID, e.key.pkid desc", "p, pos from /REGION_NAME p, p.positions.values pos order by p.ID", "p, pos from /REGION_NAME p, p.positions.values pos order by pos.secId", "p, pos from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId" };
            Object[][] r = new Object[queries.length][2];
            Region local = cache.getRegion(localRegion);
            Region region = cache.getRegion(regionName);
            assertNotNull(region);
            final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
            for (final String expectedException : expectedExceptions) {
                getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
            }
            String distinct = "SELECT DISTINCT ";
            QueryService qs = getCache().getQueryService();
            Object[] params;
            try {
                for (int j = 0; j < queries.length; j++) {
                    String qStr = null;
                    synchronized (region) {
                        // Execute on local region.
                        qStr = (distinct + queries[j].replace("REGION_NAME", localRegion));
                        r[j][0] = qs.newQuery(qStr).execute();
                        // Execute on remote region.
                        qStr = (distinct + queries[j].replace("REGION_NAME", regionName));
                        r[j][1] = qs.newQuery(qStr).execute();
                    }
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
                StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
                ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length, queries);
            } catch (QueryInvocationTargetException e) {
                // not it's okay
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (RegionDestroyedException rde) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
            } catch (CancelException cce) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
            } finally {
                for (final String expectedException : expectedExceptions) {
                    getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
                }
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) TestException(util.TestException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) QueryException(org.apache.geode.cache.query.QueryException) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache)

Example 29 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRAndRRQueryWithCompactAndRangeIndexAndCompareResults.

public SerializableRunnableIF getCacheSerializableRunnableForPRAndRRQueryWithCompactAndRangeIndexAndCompareResults(final String name, final String coloName, final String localName, final String coloLocalName) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the PR region
            String[] queries = new String[] { "r1.ID = pos2.id", "r1.ID = pos2.id AND r1.ID > 5", "r1.ID = pos2.id AND r1.status = 'active'", "r1.ID = pos2.id ORDER BY r1.ID", "r1.ID = pos2.id ORDER BY pos2.id", "r1.ID = pos2.id ORDER BY r2.status", "r1.ID = pos2.id AND r1.status != r2.status", "r1.ID = pos2.id AND r1.status = r2.status", "r1.ID = pos2.id AND r1.positions.size = r2.positions.size", "r1.ID = pos2.id AND r1.positions.size > r2.positions.size", "r1.ID = pos2.id AND r1.positions.size < r2.positions.size", "r1.ID = pos2.id AND r1.positions.size = r2.positions.size AND r2.positions.size > 0", "r1.ID = pos2.id AND (r1.positions.size > r2.positions.size OR r2.positions.size > 0)", "r1.ID = pos2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)" };
            Object[][] r = new Object[queries.length][2];
            Region region = null;
            region = cache.getRegion(name);
            assertNotNull(region);
            region = cache.getRegion(coloName);
            assertNotNull(region);
            region = cache.getRegion(localName);
            assertNotNull(region);
            region = cache.getRegion(coloLocalName);
            assertNotNull(region);
            final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
            for (final String expectedException : expectedExceptions) {
                getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
            }
            QueryService qs = getCache().getQueryService();
            try {
                for (int j = 0; j < queries.length; j++) {
                    getCache().getLogger().info("About to execute local query: " + queries[j]);
                    Function func = new TestQueryFunction("testfunction");
                    Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("<trace> Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2, r2.positions.values pos2 where " + queries[j]).execute(func).getResult();
                    r[j][0] = ((ArrayList) funcResult).get(0);
                    getCache().getLogger().info("About to execute local query: " + queries[j]);
                    SelectResults r2 = (SelectResults) qs.newQuery("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + localName + " r1, /" + coloLocalName + " r2, r2.positions.values pos2 where " + queries[j]).execute();
                    r[j][1] = r2.asList();
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
                StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
                ssORrs.CompareQueryResultsAsListWithoutAndWithIndexes(r, queries.length, false, false, queries);
            } catch (QueryInvocationTargetException e) {
                // cause and see whether or not it's okay
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (RegionDestroyedException rde) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
            } catch (CancelException cce) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
            } finally {
                for (final String expectedException : expectedExceptions) {
                    getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
                }
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) TestException(util.TestException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) Function(org.apache.geode.cache.execute.Function) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache)

Example 30 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults.

public SerializableRunnableIF getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(final String name, final String coloName, final String localName, final String coloLocalName) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the PR region
            String[] queries = new String[] { "r1.ID = r2.id", "r1.ID = r2.id AND r1.ID > 5", "r1.ID = r2.id AND r1.status = 'active'", // "r1.ID = r2.id LIMIT 10",
            "r1.ID = r2.id ORDER BY r1.ID", "r1.ID = r2.id ORDER BY r2.id", "r1.ID = r2.id ORDER BY r2.status", "r1.ID = r2.id AND r1.status != r2.status", "r1.ID = r2.id AND r1.status = r2.status", "r1.ID = r2.id AND r1.positions.size = r2.positions.size", "r1.ID = r2.id AND r1.positions.size > r2.positions.size", "r1.ID = r2.id AND r1.positions.size < r2.positions.size", "r1.ID = r2.id AND r1.positions.size = r2.positions.size AND r2.positions.size > 0", "r1.ID = r2.id AND (r1.positions.size > r2.positions.size OR r2.positions.size > 0)", "r1.ID = r2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)" };
            Object[][] r = new Object[queries.length][2];
            Region region = null;
            region = cache.getRegion(name);
            assertNotNull(region);
            region = cache.getRegion(coloName);
            assertNotNull(region);
            region = cache.getRegion(localName);
            assertNotNull(region);
            region = cache.getRegion(coloLocalName);
            assertNotNull(region);
            final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
            for (final String expectedException : expectedExceptions) {
                getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
            }
            QueryService qs = getCache().getQueryService();
            Object[] params;
            try {
                for (int j = 0; j < queries.length; j++) {
                    getCache().getLogger().info("About to execute local query: " + queries[j]);
                    Function func = new TestQueryFunction("testfunction");
                    Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("<trace> Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2 where " + queries[j]).execute(func).getResult();
                    r[j][0] = ((ArrayList) funcResult).get(0);
                    getCache().getLogger().info("About to execute local query: " + queries[j]);
                    SelectResults r2 = (SelectResults) qs.newQuery("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + localName + " r1, /" + coloLocalName + " r2 where " + queries[j]).execute();
                    r[j][1] = r2.asList();
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
                // compareTwoQueryResults(r, queries.length);
                StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
                ssORrs.CompareQueryResultsAsListWithoutAndWithIndexes(r, queries.length, false, false, queries);
            } catch (QueryInvocationTargetException e) {
                // cause and see whether or not it's okay
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (RegionDestroyedException rde) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
            } catch (CancelException cce) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
            } finally {
                for (final String expectedException : expectedExceptions) {
                    getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
                }
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) TestException(util.TestException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) Function(org.apache.geode.cache.execute.Function) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache)

Aggregations

CancelException (org.apache.geode.CancelException)135 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)46 IOException (java.io.IOException)40 ReplyException (org.apache.geode.distributed.internal.ReplyException)30 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)25 CacheClosedException (org.apache.geode.cache.CacheClosedException)23 Region (org.apache.geode.cache.Region)22 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)21 LocalRegion (org.apache.geode.internal.cache.LocalRegion)18 Set (java.util.Set)16 Cache (org.apache.geode.cache.Cache)16 CacheException (org.apache.geode.cache.CacheException)16 HashSet (java.util.HashSet)15 Iterator (java.util.Iterator)15 QueryException (org.apache.geode.cache.query.QueryException)15 ArrayList (java.util.ArrayList)13 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)13 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)13 DistributedSystemDisconnectedException (org.apache.geode.distributed.DistributedSystemDisconnectedException)13 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)13