Search in sources :

Example 1 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class ConcurrentIndexInitOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnRR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.status", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if
    // the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            while (!hooked) {
                Wait.pause(100);
            }
            // Create and hence initialize Index
            try {
                Index index = cache.getQueryService().createIndex("idIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) ClientCache(org.apache.geode.cache.client.ClientCache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 2 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnRR.

// GEODE-1828
@Category(FlakyTest.class)
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) Category(org.junit.experimental.categories.Category) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 3 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnOnNonOverflowPR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnOnNonOverflowPR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                attr.setDataPolicy(DataPolicy.PARTITION);
                attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(1).create());
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerNoWaitTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(10);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 4 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class PRQueryRegionCloseDUnitTest method testPRWithRegionCloseInOneDatastoreWithoutDelay.

/**
   * This test <br>
   * 1. Creates PR regions across with scope = DACK, one accessor node & 2 datastores <br>
   * 2. Creates a Local region on one of the VM's <br>
   * 3. Puts in the same data both in PR region & the Local Region <br>
   * 4. Queries the data both in local & PR <br>
   * 5. Also calls Region.close() randomly on one of the datastore VM's with delay <br>
   * 6. then recreates the PR on the same VM <br>
   * 7. Verfies the size , type , contents of both the resultSets Obtained <br>
   */
// GEODE-1720
@Category(FlakyTest.class)
@Test
public void testPRWithRegionCloseInOneDatastoreWithoutDelay() throws Exception {
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Querying PR Test with region Close PR operation*****");
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    setCacheInVMs(vm0, vm1, vm2);
    List vmList = new LinkedList();
    vmList.add(vm1);
    vmList.add(vm2);
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating Accessor node on VM0");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name, redundancy, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created Accessor node on VM0");
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating PR's across all VM1 , VM2");
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created PR on VM1 , VM2");
    // creating a local region on one of the JVM's
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating Local Region on VM0");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created Local Region on VM0");
    // Generating portfolio object array to be populated across the PR's & Local
    // Regions
    final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
    // Putting the data into the accessor node
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Inserting Portfolio data through the accessor node");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Inserted Portfolio data through the accessor node");
    // Putting the same data in the local region created
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Inserting Portfolio data on local node  VM0 for result Set Comparison");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
    Random random = new Random();
    AsyncInvocation async0;
    // querying the VM for data
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Querying on VM0 both on PR Region & local ,also  Comparing the Results sets from both");
    async0 = vm0.invokeAsync(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName));
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Calling for Region.close() on either of the Datastores VM1 , VM2 at random and then recreating the cache, with a predefined Delay ");
    for (int j = 0; j < queryTestCycle; j++) {
        int k = (random.nextInt(vmList.size()));
        if (0 != k) {
            ((VM) (vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForRegionClose(name, redundancy, PortfolioData.class));
            Wait.pause(threadSleepTime);
        }
    }
    ThreadUtils.join(async0, 30 * 1000);
    if (async0.exceptionOccurred()) {
        // for now, certain exceptions when a region is closed are acceptable
        // including ForceReattemptException (e.g. resulting from RegionDestroyed)
        boolean isForceReattempt = false;
        Throwable t = async0.getException();
        do {
            if (t instanceof ForceReattemptException) {
                isForceReattempt = true;
                break;
            }
            t = t.getCause();
        } while (t != null);
        if (!isForceReattempt) {
            Assert.fail("Unexpected exception during query", async0.getException());
        }
    }
    LogWriterUtils.getLogWriter().info("PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Querying with PR Operations ENDED*****");
}
Also used : ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) Random(java.util.Random) VM(org.apache.geode.test.dunit.VM) Host(org.apache.geode.test.dunit.Host) List(java.util.List) LinkedList(java.util.LinkedList) Utils.createPortfolioData(org.apache.geode.cache.query.Utils.createPortfolioData) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) LinkedList(java.util.LinkedList) Category(org.junit.experimental.categories.Category) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 5 with PortfolioData

use of org.apache.geode.cache.query.data.PortfolioData in project geode by apache.

the class PRQueryRegionClosedJUnitTest method testQueryingWithRegionClose.

/**
   * Tests the execution of query on a PartitionedRegion created on a single data store. <br>
   * 1. Creates a PR with redundancy=0 on a single VM. <br>
   * 2. Puts some test Objects in cache.<br>
   * 3. Create a Thread and fire queries on the data and verifies the result.<br>
   * 4. Create another Thread and call Region#close() on the PR region.<br>
   * 
   * 
   * @throws Exception
   */
@Test
public void testQueryingWithRegionClose() throws Exception {
    logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Test Started  ");
    logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: creating PR Region ");
    final Region region = PartitionedRegionTestHelper.createPartitionedRegion(regionName, localMaxMemory, redundancy);
    final Region localRegion = PartitionedRegionTestHelper.createLocalRegion(localRegionName);
    final StringBuffer errorBuf = new StringBuffer("");
    PortfolioData[] portfolios = new PortfolioData[100];
    try {
        for (int j = 0; j < 100; j++) {
            portfolios[j] = new PortfolioData(j);
        }
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: populating PortfolioData into the PR Datastore  ");
        populateData(region, portfolios);
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: populating PortfolioData into the PR Datastore  ");
        populateData(localRegion, portfolios);
        final String[] queryString = { "ID = 0 OR ID = 1", "ID > 4 AND ID < 9", "ID = 5", "ID < 5 ", "ID <= 5" };
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Creating a Thread which will fire queries on the datastore");
        Thread t1 = new Thread(new Runnable() {

            public void run() {
                final String expectedRegionDestroyedException = RegionDestroyedException.class.getName();
                logger.info("<ExpectedException action=add>" + expectedRegionDestroyedException + "</ExpectedException>");
                for (int i = 0; i < queryString.length; i++) {
                    try {
                        SelectResults resSetPR = region.query(queryString[i]);
                        SelectResults resSetLocal = localRegion.query(queryString[i]);
                        String failureString = PartitionedRegionTestHelper.compareResultSets(resSetPR, resSetLocal);
                        Thread.sleep(delayQuery);
                        if (failureString != null) {
                            errorBuf.append(failureString);
                            throw (new Exception(failureString));
                        }
                    } catch (InterruptedException ie) {
                        fail("interrupted");
                    } catch (RegionDestroyedException rde) {
                        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: RegionDestroyedException as Expected " + rde);
                    } catch (RegionNotFoundException rnfe) {
                        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: RegionNotFoundException as Expected " + rnfe);
                    } catch (QueryInvocationTargetException qite) {
                        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: QueryInvocationTargetException as Expected " + qite);
                    } catch (Exception qe) {
                        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Unexpected Exception " + qe);
                        encounteredException = true;
                        StringWriter sw = new StringWriter();
                        qe.printStackTrace(new PrintWriter(sw));
                        errorBuf.append(sw);
                    }
                }
                logger.info("<ExpectedException action=remove>" + expectedRegionDestroyedException + "</ExpectedException>");
            }
        });
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Creating a Thread which will call Region.destroyRegion() on the datastore ");
        Thread t2 = new Thread(new Runnable() {

            public void run() {
                try {
                    Thread.sleep(2500);
                } catch (InterruptedException ie) {
                    fail("interrupted");
                }
                region.close();
                logger.info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Region Closed on VM ");
            }
        });
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Initiating the  Threads");
        t1.start();
        t2.start();
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Waiting for the Threads to join ");
        t1.join(30000);
        assertFalse(t1.isAlive());
        t2.join(30000);
        assertFalse(t2.isAlive());
        logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: checking for any Unexpected Exception's occurred");
        assertFalse("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Exception occurred in Query-thread", encounteredException);
    } catch (Exception e) {
        e.printStackTrace();
        fail("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Test failed because of exception " + e);
    }
    logger.info("PRQueryRegionClosedJUnitTest#testQueryingWithRegionClose: Test Ended");
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) RegionNotFoundException(org.apache.geode.cache.query.RegionNotFoundException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) RegionNotFoundException(org.apache.geode.cache.query.RegionNotFoundException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) SelectResults(org.apache.geode.cache.query.SelectResults) StringWriter(java.io.StringWriter) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) PrintWriter(java.io.PrintWriter) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

PortfolioData (org.apache.geode.cache.query.data.PortfolioData)57 Test (org.junit.Test)57 Host (org.apache.geode.test.dunit.Host)52 VM (org.apache.geode.test.dunit.VM)52 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)52 Cache (org.apache.geode.cache.Cache)18 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)18 Region (org.apache.geode.cache.Region)17 AsyncInvocation (org.apache.geode.test.dunit.AsyncInvocation)17 CacheException (org.apache.geode.cache.CacheException)16 SelectResults (org.apache.geode.cache.query.SelectResults)14 Index (org.apache.geode.cache.query.Index)13 Query (org.apache.geode.cache.query.Query)12 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)12 ArrayList (java.util.ArrayList)11 AttributesFactory (org.apache.geode.cache.AttributesFactory)11 RegionFactory (org.apache.geode.cache.RegionFactory)11 LinkedList (java.util.LinkedList)9 List (java.util.List)9 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)9