Search in sources :

Example 86 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PersistPRKRFDUnitTest method testCloseDiskStoreWhenPut.

/**
   * do a put/modify/destroy while closing disk store
   * 
   * to turn on debug, add following parameter in local.conf: hydra.VmPrms-extraVMArgs +=
   * "-Ddisk.KRF_DEBUG=true";
   */
@Test
public void testCloseDiskStoreWhenPut() {
    final String title = "testCloseDiskStoreWhenPut:";
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    createPR(vm0, 0);
    createData(vm0, 0, 10, "a");
    vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {

        public void run2() throws CacheException {
            Region region = getRootRegion(PR_REGION_NAME);
            // let the region to hold on the put until diskstore is closed
            if (!DiskStoreImpl.KRF_DEBUG) {
                region.getAttributesMutator().setCacheWriter(new MyWriter());
            }
        }
    });
    // create test
    AsyncInvocation async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async create") {

        public void run2() throws CacheException {
            Region region = getRootRegion(PR_REGION_NAME);
            IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
            try {
                region.put(10, "b");
                fail("Expect CacheClosedException here");
            } catch (CacheClosedException cce) {
                System.out.println(title + cce.getMessage());
                if (DiskStoreImpl.KRF_DEBUG) {
                    assert cce.getMessage().contains("The disk store is closed.");
                } else {
                    assert cce.getMessage().contains("The disk store is closed");
                }
            } finally {
                expect.remove();
            }
        }
    });
    vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {

        public void run2() throws CacheException {
            GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
            Wait.pause(500);
            gfc.closeDiskStores();
            synchronized (lockObject) {
                lockObject.notify();
            }
        }
    });
    ThreadUtils.join(async1, MAX_WAIT);
    closeCache(vm0);
    // update
    createPR(vm0, 0);
    vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {

        public void run2() throws CacheException {
            Region region = getRootRegion(PR_REGION_NAME);
            // let the region to hold on the put until diskstore is closed
            if (!DiskStoreImpl.KRF_DEBUG) {
                region.getAttributesMutator().setCacheWriter(new MyWriter());
            }
        }
    });
    async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async update") {

        public void run2() throws CacheException {
            Region region = getRootRegion(PR_REGION_NAME);
            IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
            try {
                region.put(1, "b");
                fail("Expect CacheClosedException here");
            } catch (CacheClosedException cce) {
                System.out.println(title + cce.getMessage());
                if (DiskStoreImpl.KRF_DEBUG) {
                    assert cce.getMessage().contains("The disk store is closed.");
                } else {
                    assert cce.getMessage().contains("The disk store is closed");
                }
            } finally {
                expect.remove();
            }
        }
    });
    vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {

        public void run2() throws CacheException {
            GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
            Wait.pause(500);
            gfc.closeDiskStores();
            synchronized (lockObject) {
                lockObject.notify();
            }
        }
    });
    ThreadUtils.join(async1, MAX_WAIT);
    closeCache(vm0);
    // destroy
    createPR(vm0, 0);
    vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {

        public void run2() throws CacheException {
            Region region = getRootRegion(PR_REGION_NAME);
            // let the region to hold on the put until diskstore is closed
            if (!DiskStoreImpl.KRF_DEBUG) {
                region.getAttributesMutator().setCacheWriter(new MyWriter());
            }
        }
    });
    async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async destroy") {

        public void run2() throws CacheException {
            Region region = getRootRegion(PR_REGION_NAME);
            IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
            try {
                region.destroy(2, "b");
                fail("Expect CacheClosedException here");
            } catch (CacheClosedException cce) {
                System.out.println(title + cce.getMessage());
                if (DiskStoreImpl.KRF_DEBUG) {
                    assert cce.getMessage().contains("The disk store is closed.");
                } else {
                    assert cce.getMessage().contains("The disk store is closed");
                }
            } finally {
                expect.remove();
            }
        }
    });
    vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {

        public void run2() throws CacheException {
            GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
            Wait.pause(500);
            gfc.closeDiskStores();
            synchronized (lockObject) {
                lockObject.notify();
            }
        }
    });
    ThreadUtils.join(async1, MAX_WAIT);
    checkData(vm0, 0, 10, "a");
    checkData(vm0, 10, 11, null);
    closeCache(vm0);
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) CacheException(org.apache.geode.cache.CacheException) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) Host(org.apache.geode.test.dunit.Host) CacheClosedException(org.apache.geode.cache.CacheClosedException) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 87 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PersistentPartitionedRegionDUnitTest method testCleanupAfterConflict.

@Test
public void testCleanupAfterConflict() throws Exception {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    createPR(vm0, 0);
    // create some buckets
    createData(vm0, 0, 2, "a");
    closePR(vm0);
    createPR(vm1, 0);
    // create an overlapping bucket
    createData(vm1, 1, 2, "a");
    IgnoredException[] expectVm0 = { IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0), IgnoredException.addIgnoredException("CacheClosedException", vm0) };
    try {
        // This results in ConflictingPersistentDataException. As part of
        // GEODE-2918, the cache is closed, when ConflictingPersistentDataException
        // is encountered.
        createPR(vm0, 0);
        fail("should have seen a conflicting data exception");
    } catch (Exception ex) {
        boolean expectedException = false;
        if (ex.getCause() instanceof CacheClosedException) {
            CacheClosedException cce = (CacheClosedException) ex.getCause();
            if (cce.getCause() instanceof ConflictingPersistentDataException) {
                expectedException = true;
            }
        }
        if (!expectedException) {
            throw ex;
        }
    } finally {
        for (IgnoredException ie : expectVm0) {
            ie.remove();
        }
    }
    IgnoredException expectVm1 = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
    try {
        createData(vm1, 0, 1, "a");
    } catch (Exception e) {
        // restart.
        if (!(e.getCause() instanceof PartitionOfflineException)) {
            throw e;
        }
    } finally {
        expectVm1.remove();
    }
    closePR(vm1);
    // This should succeed, vm0 should not have persisted any view
    // information from vm1
    createPR(vm0, 0);
    checkData(vm0, 0, 2, "a");
    checkData(vm0, 2, 3, null);
}
Also used : PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) VM(org.apache.geode.test.dunit.VM) IgnoredException(org.apache.geode.test.dunit.IgnoredException) ConflictingPersistentDataException(org.apache.geode.cache.persistence.ConflictingPersistentDataException) Host(org.apache.geode.test.dunit.Host) CacheClosedException(org.apache.geode.cache.CacheClosedException) RevokeFailedException(org.apache.geode.cache.persistence.RevokeFailedException) RevokedPersistentDataException(org.apache.geode.cache.persistence.RevokedPersistentDataException) ConflictingPersistentDataException(org.apache.geode.cache.persistence.ConflictingPersistentDataException) DiskAccessException(org.apache.geode.cache.DiskAccessException) ConcurrentModificationException(java.util.ConcurrentModificationException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) RMIException(org.apache.geode.test.dunit.RMIException) CacheClosedException(org.apache.geode.cache.CacheClosedException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) IOException(java.io.IOException) ReplyException(org.apache.geode.distributed.internal.ReplyException) QueryException(org.apache.geode.cache.query.QueryException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 88 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PersistentPartitionedRegionDUnitTest method testRevokeBeforeStartup.

// GEODE-974: async actions, time sensitive, 65 second timeouts
@Category(FlakyTest.class)
@Test
public void testRevokeBeforeStartup() throws Throwable {
    IgnoredException.addIgnoredException("RevokeFailedException");
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    int numBuckets = 50;
    createPR(vm0, 1);
    createPR(vm1, 1);
    createData(vm0, 0, numBuckets, "a");
    Set<Integer> vm0Buckets = getBucketList(vm0);
    Set<Integer> vm1Buckets = getBucketList(vm1);
    assertEquals(vm0Buckets, vm1Buckets);
    // This should fail with a revocation failed message
    try {
        revokeAllMembers(vm2);
        fail("The revoke should have failed, because members are running");
    } catch (RMIException e) {
        if (!(e.getCause() instanceof ReplyException && e.getCause().getCause() instanceof RevokeFailedException)) {
            throw e;
        }
    }
    closeCache(vm0);
    createData(vm1, 0, numBuckets, "b");
    File vm1Directory = getDiskDirectory(vm1);
    closeCache(vm1);
    vm0.invoke(new SerializableRunnable("get cache") {

        public void run() {
            getCache();
        }
    });
    revokeMember(vm2, vm1Directory);
    AsyncInvocation a1 = createPRAsync(vm0, 1);
    a1.getResult(MAX_WAIT);
    assertEquals(vm0Buckets, getBucketList(vm0));
    checkData(vm0, 0, numBuckets, "a");
    createData(vm0, numBuckets, 113, "b");
    checkData(vm0, numBuckets, 113, "b");
    IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
    try {
        createPR(vm1, 1);
        fail("Should have recieved a SplitDistributedSystemException");
    } catch (RMIException e) {
        // We revoked this member.
        if (!(e.getCause() instanceof RevokedPersistentDataException)) {
            throw e;
        }
    }
    ex.remove();
}
Also used : RevokedPersistentDataException(org.apache.geode.cache.persistence.RevokedPersistentDataException) RMIException(org.apache.geode.test.dunit.RMIException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) ReplyException(org.apache.geode.distributed.internal.ReplyException) VM(org.apache.geode.test.dunit.VM) IgnoredException(org.apache.geode.test.dunit.IgnoredException) File(java.io.File) RevokeFailedException(org.apache.geode.cache.persistence.RevokeFailedException) Category(org.junit.experimental.categories.Category) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 89 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PersistentPartitionedRegionDUnitTest method checkReadWriteOperationsWithOfflineMember.

private void checkReadWriteOperationsWithOfflineMember(VM vm0, final int aVM0Bucket, final int aVM1Bucket) {
    // This should work, because this bucket is still available.
    checkData(vm0, aVM0Bucket, aVM0Bucket + 1, "a");
    try {
        checkData(vm0, aVM1Bucket, aVM1Bucket + 1, null);
        fail("Should not have been able to read from missing buckets!");
    } catch (RMIException e) {
        // We expect a PartitionOfflineException
        if (!(e.getCause() instanceof PartitionOfflineException)) {
            throw e;
        }
    }
    IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
    // Try a function execution
    vm0.invoke(new SerializableRunnable("Test ways to read") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion(PR_REGION_NAME);
            try {
                FunctionService.onRegion(region).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            // This should work, because this bucket is still available.
            FunctionService.onRegion(region).withFilter(Collections.singleton(aVM0Bucket)).execute(new TestFunction());
            // This should fail, because this bucket is offline
            try {
                FunctionService.onRegion(region).withFilter(Collections.singleton(aVM1Bucket)).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            // This should fail, because a bucket is offline
            try {
                HashSet filter = new HashSet();
                filter.add(aVM0Bucket);
                filter.add(aVM1Bucket);
                FunctionService.onRegion(region).withFilter(filter).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            // This should fail, because a bucket is offline
            try {
                FunctionService.onRegion(region).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                cache.getQueryService().newQuery("select * from /" + PR_REGION_NAME).execute();
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            } catch (QueryException e) {
                throw new RuntimeException(e);
            }
            try {
                Set keys = region.keySet();
                // iterate over all of the keys
                for (Object key : keys) {
                }
                fail("Should not have been able to iterate over keyset");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                // iterate over all of the keys
                for (Object key : region.values()) {
                }
                fail("Should not have been able to iterate over set");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                // iterate over all of the keys
                for (Object key : region.entrySet()) {
                }
                fail("Should not have been able to iterate over set");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.get(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.containsKey(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.getEntry(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.invalidate(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.destroy(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
        }
    });
    try {
        createData(vm0, aVM1Bucket, aVM1Bucket + 1, "b");
        fail("Should not have been able to write to missing buckets!");
    } catch (RMIException e) {
        // We expect to see a partition offline exception here.
        if (!(e.getCause() instanceof PartitionOfflineException)) {
            throw e;
        }
    }
    expect.remove();
}
Also used : RMIException(org.apache.geode.test.dunit.RMIException) QueryException(org.apache.geode.cache.query.QueryException) Set(java.util.Set) HashSet(java.util.HashSet) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) IgnoredException(org.apache.geode.test.dunit.IgnoredException) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache) HashSet(java.util.HashSet)

Example 90 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PersistentPartitionedRegionDUnitTest method testBadSerializationInAsyncThread.

/**
   * Test for bug #49972 - handle a serialization error in the async writer thread.
   */
@Ignore("Bug 50376")
@Test
public void testBadSerializationInAsyncThread() throws Throwable {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    final int numBuckets = 50;
    vm0.invoke(new SerializableRunnable() {

        @Override
        public void run() {
            FAIL_IN_THIS_VM = true;
        }
    });
    IgnoredException expected1 = IgnoredException.addIgnoredException("Fatal error from asynch");
    IgnoredException expected2 = IgnoredException.addIgnoredException("ToDataException");
    try {
        int redundancy = 1;
        createPR(vm0, redundancy, -1, 113, false);
        createPR(vm2, redundancy, -1, 113, false);
        // Trigger bucket creation
        createData(vm0, 0, numBuckets, "a");
        createPR(vm1, redundancy, -1, 113, false);
        // write objects which will fail serialization in async writer thread.
        vm0.invoke(new SerializableRunnable() {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion(PR_REGION_NAME);
                try {
                    for (int i = 0; i < numBuckets; i++) {
                        region.put(i, new BadSerializer());
                        // this will trigger a deserialiation (could have also done this put with a function I
                        // guess.
                        region.get(i);
                    }
                } catch (DiskAccessException ex) {
                    if (ex.getMessage().contains("the flusher thread had been terminated")) {
                    // expected
                    } else {
                        throw ex;
                    }
                }
            }
        });
        // Wait for the thread to get hosed.
        Thread.sleep(2000);
        createData(vm1, 0, numBuckets, "b");
        // Try to do puts from vm1, which doesn't have any buckets
        createData(vm1, numBuckets, numBuckets * 2, "b");
        createData(vm1, numBuckets, numBuckets * 2, "c");
        // make sure everything has settle out (these VM's I suppose may be terminated)
        checkData(vm2, 0, numBuckets, "b");
        checkData(vm2, numBuckets, numBuckets * 2, "c");
    } finally {
        expected1.remove();
        expected2.remove();
    }
}
Also used : VM(org.apache.geode.test.dunit.VM) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) DiskAccessException(org.apache.geode.cache.DiskAccessException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Host(org.apache.geode.test.dunit.Host) Cache(org.apache.geode.cache.Cache) Ignore(org.junit.Ignore) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Aggregations

IgnoredException (org.apache.geode.test.dunit.IgnoredException)142 Test (org.junit.Test)89 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)71 Region (org.apache.geode.cache.Region)46 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)46 VM (org.apache.geode.test.dunit.VM)43 Host (org.apache.geode.test.dunit.Host)38 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)34 AttributesFactory (org.apache.geode.cache.AttributesFactory)30 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)28 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)23 IOException (java.io.IOException)21 CacheClosedException (org.apache.geode.cache.CacheClosedException)21 LocalRegion (org.apache.geode.internal.cache.LocalRegion)20 PartitionOfflineException (org.apache.geode.cache.persistence.PartitionOfflineException)16 RMIException (org.apache.geode.test.dunit.RMIException)15 GatewaySender (org.apache.geode.cache.wan.GatewaySender)14 BucketRegion (org.apache.geode.internal.cache.BucketRegion)14 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)14 CacheXmlException (org.apache.geode.cache.CacheXmlException)12