Search in sources :

Example 1 with PartitionedRegionStorageException

use of org.apache.geode.cache.PartitionedRegionStorageException in project geode by apache.

the class PersistentColocatedPartitionedRegionDUnitTest method testRecoverySystemWithConcurrentPutter.

/**
   * Test what happens when we restart persistent members while there is an accessor concurrently
   * performing puts. This is for bug 43899
   */
@Test
public void testRecoverySystemWithConcurrentPutter() throws Throwable {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    // Define all of the runnables used in this test
    // runnable to create accessors
    SerializableRunnable createAccessor = new SerializableRunnable("createAccessor") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory af = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setLocalMaxMemory(0);
            af.setPartitionAttributes(paf.create());
            af.setDataPolicy(DataPolicy.PARTITION);
            cache.createRegion(PR_REGION_NAME, af.create());
            paf.setColocatedWith(PR_REGION_NAME);
            af.setPartitionAttributes(paf.create());
            cache.createRegion("region2", af.create());
        }
    };
    // runnable to create PRs
    SerializableRunnable createPRs = new SerializableRunnable("createPRs") {

        public void run() {
            Cache cache = getCache();
            DiskStore ds = cache.findDiskStore("disk");
            if (ds == null) {
                ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
            }
            AttributesFactory af = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            af.setPartitionAttributes(paf.create());
            af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            af.setDiskStoreName("disk");
            cache.createRegion(PR_REGION_NAME, af.create());
            paf.setColocatedWith(PR_REGION_NAME);
            af.setPartitionAttributes(paf.create());
            cache.createRegion("region2", af.create());
        }
    };
    // runnable to close the cache.
    SerializableRunnable closeCache = new SerializableRunnable("closeCache") {

        public void run() {
            closeCache();
        }
    };
    // Runnable to do a bunch of puts handle exceptions
    // due to the fact that member is offline.
    SerializableRunnable doABunchOfPuts = new SerializableRunnable("doABunchOfPuts") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion(PR_REGION_NAME);
            try {
                for (int i = 0; ; i++) {
                    try {
                        region.get(i % NUM_BUCKETS);
                    } catch (PartitionOfflineException expected) {
                    // do nothing.
                    } catch (PartitionedRegionStorageException expected) {
                    // do nothing.
                    }
                    Thread.yield();
                }
            } catch (CacheClosedException expected) {
            // ok, we're done.
            }
        }
    };
    // Runnable to clean up disk dirs on a members
    SerializableRunnable cleanDiskDirs = new SerializableRunnable("Clean disk dirs") {

        public void run() {
            try {
                cleanDiskDirs();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };
    // Create the PR two members
    vm1.invoke(createPRs);
    vm2.invoke(createPRs);
    // create the accessor.
    vm0.invoke(createAccessor);
    // Create some buckets.
    createData(vm0, 0, NUM_BUCKETS, "a");
    createData(vm0, 0, NUM_BUCKETS, "a", "region2");
    // backup the system. We use this to get a snapshot of vm1 and vm2
    // when they both are online. Recovering from this backup simulates
    // a simulataneous kill and recovery.
    backup(vm3);
    // close vm1 and vm2.
    vm1.invoke(closeCache);
    vm2.invoke(closeCache);
    // restore the backup
    vm1.invoke(cleanDiskDirs);
    vm2.invoke(cleanDiskDirs);
    restoreBackup(2);
    // in vm0, start doing a bunch of concurrent puts.
    AsyncInvocation async0 = vm0.invokeAsync(doABunchOfPuts);
    // This recovery should not hang (that's what we're testing for
    // here.
    AsyncInvocation async1 = vm1.invokeAsync(createPRs);
    AsyncInvocation async2 = vm2.invokeAsync(createPRs);
    async1.getResult(MAX_WAIT);
    async2.getResult(MAX_WAIT);
    // close the cache in vm0 to stop the async puts.
    vm0.invoke(closeCache);
    // make sure we didn't get an exception
    async0.getResult(MAX_WAIT);
}
Also used : SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) CacheClosedException(org.apache.geode.cache.CacheClosedException) IOException(java.io.IOException) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) VM(org.apache.geode.test.dunit.VM) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 2 with PartitionedRegionStorageException

use of org.apache.geode.cache.PartitionedRegionStorageException in project geode by apache.

the class PRHARedundancyProvider method insufficientStores.

/**
   * Indicate that we are unable to allocate sufficient stores and the timeout period has passed
   * 
   * @param allStores stores we know about
   * @param alreadyUsed ones already committed
   * @param onlyLog true if only a warning log messages should be generated.
   */
private void insufficientStores(Set allStores, Collection alreadyUsed, boolean onlyLog) {
    final String regionStat = regionStatus(this.prRegion, allStores, alreadyUsed, onlyLog);
    final char newLine;
    if (onlyLog) {
        newLine = ' ';
    } else {
        newLine = '\n';
    }
    final StringId notEnoughValidNodes;
    if (alreadyUsed.isEmpty()) {
        notEnoughValidNodes = LocalizedStrings.PRHARRedundancyProvider_UNABLE_TO_FIND_ANY_MEMBERS_TO_HOST_A_BUCKET_IN_THE_PARTITIONED_REGION_0;
    } else {
        notEnoughValidNodes = LocalizedStrings.PRHARRedundancyProvider_CONFIGURED_REDUNDANCY_LEVEL_COULD_NOT_BE_SATISFIED_0;
    }
    final Object[] notEnoughValidNodesArgs = new Object[] { PRHARedundancyProvider.INSUFFICIENT_STORES_MSG, newLine + regionStat + newLine };
    if (onlyLog) {
        logger.warn(LocalizedMessage.create(notEnoughValidNodes, notEnoughValidNodesArgs));
    } else {
        throw new PartitionedRegionStorageException(notEnoughValidNodes.toLocalizedString(notEnoughValidNodesArgs));
    }
}
Also used : StringId(org.apache.geode.i18n.StringId) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException)

Example 3 with PartitionedRegionStorageException

use of org.apache.geode.cache.PartitionedRegionStorageException in project geode by apache.

the class PartitionedRegionStatsJUnitTest method validateStats.

/**
   * This method verifies that PR statistics are working properly for a PartitionedRegion.
   * putsCompleted, getsCompleted, createsCompleted, destroysCompleted, containsKeyCompleted,
   * containsValueForKeyCompleted, invalidatesCompleted, totalBucketSize and temporarily commented
   * avgRedundantCopies, maxRedundantCopies, minRedundantCopies are validated in this method.
   */
private void validateStats(PartitionedRegion pr) throws Exception {
    Statistics stats = pr.getPrStats().getStats();
    int bucketCount = stats.get("bucketCount").intValue();
    int putsCompleted = stats.get("putsCompleted").intValue();
    int totalBucketSize = stats.get("dataStoreEntryCount").intValue();
    assertEquals(0, bucketCount);
    assertEquals(0, putsCompleted);
    assertEquals(0, totalBucketSize);
    int totalGets = 0;
    final int bucketMax = pr.getTotalNumberOfBuckets();
    for (int i = 0; i < bucketMax + 1; i++) {
        Long val = new Long(i);
        try {
            pr.put(val, val);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 0; i < bucketMax + 1; i++) {
        Long val = new Long(i);
        try {
            pr.get(val);
            totalGets++;
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    bucketCount = stats.get("bucketCount").intValue();
    putsCompleted = stats.get("putsCompleted").intValue();
    totalBucketSize = stats.get("dataStoreEntryCount").intValue();
    assertEquals(bucketMax, bucketCount);
    assertEquals(bucketMax + 1, putsCompleted);
    assertEquals(bucketMax + 1, totalBucketSize);
    pr.destroy(new Long(bucketMax));
    putsCompleted = stats.get("putsCompleted").intValue();
    totalBucketSize = stats.get("dataStoreEntryCount").intValue();
    assertEquals(bucketMax, bucketCount);
    assertEquals(bucketMax + 1, putsCompleted);
    assertEquals(bucketMax, totalBucketSize);
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        String val = "" + i;
        try {
            pr.create(key, val);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.get(key);
            totalGets++;
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.containsKey(key);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.containsValueForKey(key);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.invalidate(key);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    int getsCompleted = stats.get("getsCompleted").intValue();
    int createsCompleted = stats.get("createsCompleted").intValue();
    int containsKeyCompleted = stats.get("containsKeyCompleted").intValue();
    int containsValueForKeyCompleted = stats.get("containsValueForKeyCompleted").intValue();
    int invalidatesCompleted = stats.get("invalidatesCompleted").intValue();
    int destroysCompleted = stats.get("destroysCompleted").intValue();
    assertEquals(totalGets, getsCompleted);
    assertEquals(10, createsCompleted);
    assertEquals(10, containsKeyCompleted);
    assertEquals(10, containsValueForKeyCompleted);
    assertEquals(10, invalidatesCompleted);
    assertEquals(1, destroysCompleted);
// Redundant copies related statistics
/*
     * int maxRedundantCopies = stats.get("maxRedundantCopies").intValue(); int minRedundantCopies =
     * stats.get("minRedundantCopies").intValue(); int avgRedundantCopies =
     * stats.get("avgRedundantCopies").intValue();
     * 
     * assertIndexDetailsEquals(minRedundantCopies, 2); assertIndexDetailsEquals(maxRedundantCopies,
     * 2); assertIndexDetailsEquals(avgRedundantCopies, 2);
     */
}
Also used : PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) Statistics(org.apache.geode.Statistics)

Example 4 with PartitionedRegionStorageException

use of org.apache.geode.cache.PartitionedRegionStorageException in project geode by apache.

the class PersistentPartitionedRegionDUnitTest method testRegisterInterestNoDataStores.

@Test
public void testRegisterInterestNoDataStores() {
    // Closing the client may log a warning on the server
    IgnoredException.addIgnoredException("Connection reset");
    IgnoredException.addIgnoredException("SocketTimeoutException");
    IgnoredException.addIgnoredException("ServerConnectivityException");
    IgnoredException.addIgnoredException("Socket Closed");
    IgnoredException.addIgnoredException("Unexpected IOException");
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    final Integer serverPort = (Integer) vm0.invoke(new SerializableCallable("create per") {

        public Object call() {
            Cache cache = getCache();
            AttributesFactory af = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(0);
            paf.setLocalMaxMemory(0);
            af.setPartitionAttributes(paf.create());
            af.setDataPolicy(DataPolicy.PARTITION);
            cache.createRegion(PR_REGION_NAME, af.create());
            CacheServer server = cache.addCacheServer();
            server.setPort(AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET));
            server.setNotifyBySubscription(true);
            try {
                server.start();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            return server.getPort();
        }
    });
    vm1.invoke(new SerializableRunnable("create client") {

        public void run() {
            Properties props = new Properties();
            props.setProperty(MCAST_PORT, "0");
            props.setProperty(LOCATORS, "");
            getSystem(props);
            try {
                Cache cache = getCache();
                PoolFactory pf = PoolManager.createFactory();
                pf.addServer(NetworkUtils.getServerHostName(host), serverPort);
                pf.setSubscriptionEnabled(true);
                pf.create("pool");
                AttributesFactory af = new AttributesFactory();
                af.setDataPolicy(DataPolicy.NORMAL);
                af.setScope(Scope.LOCAL);
                af.setPoolName("pool");
                Region region = cache.createRegion(PR_REGION_NAME, af.create());
                try {
                    region.registerInterestRegex(".*");
                } catch (ServerOperationException e) {
                    if (!(e.getCause() instanceof PartitionedRegionStorageException)) {
                        throw e;
                    }
                }
            } finally {
                disconnectFromDS();
            }
        }
    });
}
Also used : SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) IOException(java.io.IOException) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) PoolFactory(org.apache.geode.cache.client.PoolFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) CacheServer(org.apache.geode.cache.server.CacheServer) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 5 with PartitionedRegionStorageException

use of org.apache.geode.cache.PartitionedRegionStorageException in project geode by apache.

the class PartitionedRegionLocalMaxMemoryDUnitTest method putObjectInPartitionRegion.

/**
   * This function is used to put objects of different hashcode depending upon value of objectFlag
   * 
   * @param objectFlg
   * @return
   */
private CacheSerializableRunnable putObjectInPartitionRegion(final boolean objectFlg) {
    CacheSerializableRunnable putObject = new CacheSerializableRunnable("putObject") {

        public void run2() {
            Cache cache = getCache();
            PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + "testLocalMaxMemoryInPartitionedRegion0");
            assertNotNull("Name of region : " + pr.getName(), pr);
            int i = 0;
            if (objectFlg == true) {
                long size = 0;
                while ((size = pr.getDataStore().currentAllocatedMemory()) < PartitionedRegionHelper.BYTES_PER_MB) {
                    cache.getLogger().info("size: " + size);
                    Object obj = new TestObject1("testObject1" + i, 10);
                    pr.put(obj, obj);
                    i++;
                }
                assertEquals(1, pr.getDataStore().localBucket2RegionMap.size());
                LogWriterUtils.getLogWriter().info("putObjectInPartitionRegion() - Put operation done successfully");
            } else {
                final String expectedExceptions = PartitionedRegionStorageException.class.getName();
                getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
                try {
                    TestObject1 kv = new TestObject1("testObject1" + i, 21);
                    pr.put(kv, kv);
                    fail("Bucket gets created even if no memory is available");
                } catch (PartitionedRegionStorageException e) {
                    LogWriterUtils.getLogWriter().info("putObjectInPartitionRegion()- got correct PartitionedRegionStorageException while creating bucket when no memory is available");
                }
                getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
            }
        }
    };
    return putObject;
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) Cache(org.apache.geode.cache.Cache)

Aggregations

PartitionedRegionStorageException (org.apache.geode.cache.PartitionedRegionStorageException)9 Cache (org.apache.geode.cache.Cache)4 IOException (java.io.IOException)3 CacheClosedException (org.apache.geode.cache.CacheClosedException)3 Region (org.apache.geode.cache.Region)3 PartitionOfflineException (org.apache.geode.cache.persistence.PartitionOfflineException)3 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)3 Test (org.junit.Test)3 CancelException (org.apache.geode.CancelException)2 AttributesFactory (org.apache.geode.cache.AttributesFactory)2 CacheLoaderException (org.apache.geode.cache.CacheLoaderException)2 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)2 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)2 TimeoutException (org.apache.geode.cache.TimeoutException)2 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)2 NoSuchElementException (java.util.NoSuchElementException)1 Properties (java.util.Properties)1 ExecutionException (java.util.concurrent.ExecutionException)1 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1