Search in sources :

Example 36 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnPR.

/**
   *
   */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnPR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.PARTITION);
                attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 37 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class PRQueryRemoteNodeExceptionDUnitTest method testCacheCloseExceptionFromLocalAndRemote.

@Test
public void testCacheCloseExceptionFromLocalAndRemote() throws Exception {
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    setCacheInVMs(vm0, vm1);
    List vmList = new LinkedList();
    vmList.add(vm1);
    vmList.add(vm0);
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
    // creating a local region on one of the JVM's
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
    // Putting the data into the accessor node
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
    // Putting the same data in the local region created
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node  VM0 for result Set Comparison");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
    LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
    // Insert the test hooks on local and remote node.
    // Test hook on remote node will throw CacheException while Test hook on local node will throw
    // QueryException.
    vm1.invoke(new CacheSerializableRunnable(name) {

        @Override
        public void run2() throws CacheException {
            class MyQueryObserver extends IndexTrackingQueryObserver {

                private int noOfAccess = 0;

                @Override
                public void afterIterationEvaluation(Object result) {
                    LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
                    if (noOfAccess > 2) {
                        PRQHelp.getCache().getRegion(name).destroyRegion();
                    }
                    ++noOfAccess;
                }
            }
            ;
            QueryObserverHolder.setInstance(new MyQueryObserver());
        }

        ;
    });
    vm0.invoke(new CacheSerializableRunnable(name) {

        @Override
        public void run2() throws CacheException {
            boolean gotException = false;
            Cache cache = PRQHelp.getCache();
            class MyQueryObserver extends QueryObserverAdapter {

                private int noOfAccess = 0;

                @Override
                public void afterIterationEvaluation(Object result) {
                    // Object region = ((DefaultQuery)query).getRegionsInQuery(null).iterator().next();
                    LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
                    if (noOfAccess > 2) {
                        PRQHelp.getCache().close();
                    }
                    ++noOfAccess;
                }
            }
            ;
            QueryObserverHolder.setInstance(new MyQueryObserver());
            final DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery("Select * from /" + name + " p where p.ID > 0");
            try {
                query.execute();
            } catch (Exception ex) {
                gotException = true;
                if (ex instanceof CacheClosedException || ex instanceof QueryInvocationTargetException) {
                    LogWriterUtils.getLogWriter().info(ex.getMessage());
                    LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
                } else {
                    Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
                }
            }
            if (!gotException) {
                fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test did not receive Exception as expected from local as well as remote node");
            }
        }
    });
    LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) CacheException(org.apache.geode.cache.CacheException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) Host(org.apache.geode.test.dunit.Host) CacheClosedException(org.apache.geode.cache.CacheClosedException) LinkedList(java.util.LinkedList) CacheClosedException(org.apache.geode.cache.CacheClosedException) CacheException(org.apache.geode.cache.CacheException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) VM(org.apache.geode.test.dunit.VM) LinkedList(java.util.LinkedList) List(java.util.List) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 38 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class PRQueryDUnitTest method testPRAccessorCreationAndQuerying.

/**
   * This test <pr> 1. Creates PR regions across with scope = DACK , with one VM as the accessor
   * Node & others as Datastores <br>
   * 2. Creates a Local region on one of the VM's <br>
   * 3. Puts in the same data both in PR region & the Local Region <br>
   * 4. Queries the data both in local & PR <br>
   * 5. Verfies the size , type , contents of both the resultSets Obtained
   *
   * @throws Exception
   */
@Test
public void testPRAccessorCreationAndQuerying() throws Exception {
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Querying PR Test with DACK Started*****");
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    setCacheInVMs(vm0, vm1, vm2, vm3);
    // Creting PR's on the participating VM's
    // Creating Accessor node on the VM
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Accessor node in the PR");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name, 0, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully created the Accessor node in the PR");
    // Creating the Datastores Nodes in the VM's
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Datastore node in the PR");
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created the Datastore node in the PR");
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created PR's across all VM's");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created Local Region on VM0");
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
    final PortfolioData[] portfolio = createPortfolioData(cnt, totalDataSize);
    // Putting the data into the PR's created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, i, stepSize));
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, stepSize, (2 * stepSize)));
    vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, (2 * stepSize), (3 * stepSize)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, (3 * (stepSize)), totalDataSize));
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Inserted Portfolio data across PR's");
    // Putting the same data in the local region created
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, i, totalDataSize));
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName));
    LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Querying PR's Test ENDED*****");
}
Also used : VM(org.apache.geode.test.dunit.VM) Host(org.apache.geode.test.dunit.Host) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 39 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class PRInvalidQueryDUnitTest method testPRDAckCreationAndQueryingWithInvalidQuery.

/**
   * This test <pr> 1. Creates PR regions across with scope = DACK <br>
   * 2. Creates a Local region on one of the VM's <br>
   * 3. Puts in the data in PR region & the Local Region <br>
   * 4. Queries the PR qith an Invalid Query Syntax <br>
   * 5. Verfies that there is an QueryInavalidException
   * 
   * @throws Exception
   */
@Test
public void testPRDAckCreationAndQueryingWithInvalidQuery() throws Exception {
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Querying PR Test with Expected InvalidQueryException*****");
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    setCacheInVMs(vm0, vm1, vm2, vm3);
    // Creting PR's on the participating VM's
    // Creating Accessor node on the VM
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Creating the Accessor node in the PR");
    vm0.invoke(prq.getCacheSerializableRunnableForPRAccessorCreate(name, redundancy, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully created the Accessor node in the PR");
    // Creating the Datastores Nodes in the VM's
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Creating the Datastore node in the PR");
    vm1.invoke(prq.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    vm2.invoke(prq.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    vm3.invoke(prq.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Created the Datastore node in the PR");
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Created PR's across all VM's");
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
    // Putting the data into the PR's created
    vm0.invoke(prq.getCacheSerializableRunnableForPRPuts(name, portfolio, i, i + step));
    vm1.invoke(prq.getCacheSerializableRunnableForPRPuts(name, portfolio, i + step, i + (2 * step)));
    vm2.invoke(prq.getCacheSerializableRunnableForPRPuts(name, portfolio, i + (2 * step), i + (3 * step)));
    vm3.invoke(prq.getCacheSerializableRunnableForPRPuts(name, portfolio, i + (3 * step), cntDest));
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Inserted Portfolio data across PR's");
    final String invalidQuery = "Invalid Query";
    // querying the VM for data
    vm0.invoke(prq.getCacheSerializableRunnableForPRInvalidQuery(name));
    LogWriterUtils.getLogWriter().info("PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: *****Querying PR's Test with Expected Invalid Query Exception *****");
}
Also used : VM(org.apache.geode.test.dunit.VM) Host(org.apache.geode.test.dunit.Host) Utils.createPortfolioData(org.apache.geode.cache.query.Utils.createPortfolioData) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 40 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class PRQueryCacheClosedJUnitTest method testQueryOnSingleDataStoreWithCacheClose.

/**
   * Tests the execution of query on a PartitionedRegion created on a single data store. <br>
   * 1. Creates a PR with redundancy=0 on a single VM. <br>
   * 2. Puts some test Objects in cache.<br>
   * 3. Create a Thread and fire queries on the data and verifies the result.<br>
   * 4. Create another Thread and call cache#close()<br>
   * 5. Recreates the cache after a delay of 1500ms
   * 
   * 
   * @throws Exception
   */
@Test
public void testQueryOnSingleDataStoreWithCacheClose() throws Exception {
    logger.info("PRQueryRegionDestroyedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Test Started  ");
    logger.info("PRQueryRegionDestroyedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: creating PR Region ");
    final Region region = PartitionedRegionTestHelper.createPartitionedRegion(regionName, localMaxMemory, redundancy);
    final Region localRegion = PartitionedRegionTestHelper.createLocalRegion(localRegionName);
    final StringBuffer errorBuf = new StringBuffer("");
    PortfolioData[] portfolios = new PortfolioData[dataSize];
    try {
        for (int j = 0; j < dataSize; j++) {
            portfolios[j] = new PortfolioData(j);
        }
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: populating PortfolioData into the PR Datastore  ");
        populateData(region, portfolios);
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: populating PortfolioData into the PR Datastore  ");
        populateData(localRegion, portfolios);
        final String[] queryString = { "ID = 0 OR ID = 1", "ID > 4 AND ID < 9", "ID = 5", "ID < 5 ", "ID <= 5" };
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Creating a Thread which will fire queries on the datastore");
        Thread t1 = new Thread(new Runnable() {

            public void run() {
                final String expectedCacheClosedException = CacheClosedException.class.getName();
                logger.info("<ExpectedException action=add>" + expectedCacheClosedException + "</ExpectedException>");
                for (int i = 0; i < queryString.length; i++) {
                    try {
                        SelectResults resSetPR = region.query(queryString[i]);
                        SelectResults resSetLocal = localRegion.query(queryString[i]);
                        String failureString = PartitionedRegionTestHelper.compareResultSets(resSetPR, resSetLocal);
                        Thread.sleep(delayQuery);
                        if (failureString != null) {
                            errorBuf.append(failureString);
                            throw (new Exception(failureString));
                        }
                    } catch (InterruptedException ie) {
                        fail("interrupted");
                    } catch (CancelException cce) {
                        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: CancelException as Expected " + cce);
                    }// it's also possible to get a RegionNotFoundException
                     catch (RegionNotFoundException rnfe) {
                        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: RegionNotFoundException as Expected " + rnfe);
                    } catch (Exception qe) {
                        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Unexpected Exception " + qe);
                        encounteredException = true;
                        StringWriter sw = new StringWriter();
                        qe.printStackTrace(new PrintWriter(sw, true));
                        errorBuf.append(sw);
                    }
                }
                logger.info("<ExpectedException action=remove>" + expectedCacheClosedException + "</ExpectedException>");
            }
        });
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Creating a Thread which will call cache.close() on the datastore ");
        Thread t2 = new Thread(new Runnable() {

            public void run() {
                PartitionedRegionTestHelper.closeCache();
                try {
                    Thread.sleep(delayCC);
                } catch (InterruptedException ie) {
                    fail("interrupted");
                }
                PartitionedRegionTestHelper.createCache();
            }
        });
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Initiating the  Threads");
        t1.start();
        t2.start();
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Waiting for the Threads to join ");
        ThreadUtils.join(t1, 30 * 1000);
        ThreadUtils.join(t2, 30 * 1000);
        logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: checking for any Unexpected Exception's occurred");
        assertFalse("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Exception occurred in Query-thread: " + errorBuf, encounteredException);
    } catch (Exception e) {
        e.printStackTrace();
        fail("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Test failed because of exception " + e);
    }
    logger.info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Test Ended");
}
Also used : RegionNotFoundException(org.apache.geode.cache.query.RegionNotFoundException) CacheClosedException(org.apache.geode.cache.CacheClosedException) RegionNotFoundException(org.apache.geode.cache.query.RegionNotFoundException) CacheClosedException(org.apache.geode.cache.CacheClosedException) CancelException(org.apache.geode.CancelException) SelectResults(org.apache.geode.cache.query.SelectResults) StringWriter(java.io.StringWriter) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) CancelException(org.apache.geode.CancelException) PrintWriter(java.io.PrintWriter) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

PortfolioData (org.apache.geode.cache.query.data.PortfolioData)57 Test (org.junit.Test)57 Host (org.apache.geode.test.dunit.Host)52 VM (org.apache.geode.test.dunit.VM)52 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)52 Cache (org.apache.geode.cache.Cache)18 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)18 Region (org.apache.geode.cache.Region)17 AsyncInvocation (org.apache.geode.test.dunit.AsyncInvocation)17 CacheException (org.apache.geode.cache.CacheException)16 SelectResults (org.apache.geode.cache.query.SelectResults)14 Index (org.apache.geode.cache.query.Index)13 Query (org.apache.geode.cache.query.Query)12 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)12 ArrayList (java.util.ArrayList)11 AttributesFactory (org.apache.geode.cache.AttributesFactory)11 RegionFactory (org.apache.geode.cache.RegionFactory)11 LinkedList (java.util.LinkedList)9 List (java.util.List)9 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)9