Search in sources :

Example 71 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class MultiVMRegionTestCase method testRemoteCacheWriter.

/**
   * Tests that a remote {@link CacheWriter} is invoked and that <code>CacheWriter</code> arguments
   * and {@link CacheWriterException}s are propagated appropriately.
   */
@Test
public void testRemoteCacheWriter() throws Exception {
    assertTrue(getRegionAttributes().getScope().isDistributed());
    final String name = this.getUniqueName();
    final Object key = "KEY";
    final Object oldValue = "OLD_VALUE";
    final Object newValue = "NEW_VALUE";
    final Object arg = "ARG";
    final Object exception = "EXCEPTION";
    final Object key2 = "KEY2";
    final Object value2 = "VALUE2";
    SerializableRunnable create = new CacheSerializableRunnable("Create Region") {

        @Override
        public void run2() throws CacheException {
            Region region = createRegion(name);
            // Put key2 in the region before any callbacks are
            // registered, so it can be destroyed later
            region.put(key2, value2);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                LocalRegion reRegion;
                reRegion = (LocalRegion) region;
                RegionEntry re = reRegion.getRegionEntry(key2);
                StoredObject so = (StoredObject) re._getValue();
                assertEquals(1, so.getRefCount());
                assertEquals(1, ma.getStats().getObjects());
            }
        }
    };
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    vm0.invoke(create);
    vm1.invoke(create);
    //////// Create
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeCreate2(EntryEvent event) throws CacheWriterException {
                    if (exception.equals(event.getCallbackArgument())) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isCreate());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(null, event.getOldValue());
                    assertEquals(oldValue, event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
            flushIfNecessary(region);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Create with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.put(key, oldValue, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                assertNull(region.getEntry(key));
                assertEquals(1, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(1, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Create with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, oldValue, arg);
            assertEquals(2, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(2, ma.getStats().getObjects());
                LocalRegion reRegion;
                reRegion = (LocalRegion) region;
                StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
                assertEquals(1, so.getRefCount());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Update
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeUpdate2(EntryEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isUpdate());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(oldValue, event.getOldValue());
                    assertEquals(newValue, event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Update with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.put(key, newValue, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                Region.Entry entry = region.getEntry(key);
                assertEquals(oldValue, entry.getValue());
                assertEquals(2, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(2, ma.getStats().getObjects());
                    LocalRegion reRegion;
                    reRegion = (LocalRegion) region;
                    StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
                    assertEquals(1, so.getRefCount());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Update with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, newValue, arg);
            assertEquals(2, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(2, ma.getStats().getObjects());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Destroy
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeDestroy2(EntryEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isDestroy());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(newValue, event.getOldValue());
                    assertNull(event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.destroy(key, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                assertNotNull(region.getEntry(key));
                assertEquals(2, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(2, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.destroy(key, arg);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(1, ma.getStats().getObjects());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Region Destroy
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeRegionDestroy2(RegionEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isRegionDestroy());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.destroyRegion(exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                if (region.isDestroyed()) {
                    fail("should not have an exception if region is destroyed", ex);
                }
                assertEquals(1, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(1, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(1, ma.getStats().getObjects());
            }
            region.destroyRegion(arg);
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                final MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                WaitCriterion waitForStatChange = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        return ma.getStats().getObjects() == 0;
                    }

                    @Override
                    public String description() {
                        return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
                    }
                };
                Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) MemoryAllocatorImpl(org.apache.geode.internal.offheap.MemoryAllocatorImpl) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) RegionEvent(org.apache.geode.cache.RegionEvent) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) StoredObject(org.apache.geode.internal.offheap.StoredObject) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) EntryEvent(org.apache.geode.cache.EntryEvent) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RegionEntry(org.apache.geode.internal.cache.RegionEntry) StoredObject(org.apache.geode.internal.offheap.StoredObject) CacheWriterException(org.apache.geode.cache.CacheWriterException) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 72 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxDeleteFieldJUnitTest method testPdxFieldDelete.

@Test
public void testPdxFieldDelete() throws Exception {
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    try {
        Cache cache = (new CacheFactory(props)).create();
        try {
            PdxValue pdxValue = new PdxValue(1, 2L);
            byte[] pdxValueBytes = BlobHelper.serializeToBlob(pdxValue);
            {
                PdxValue deserializedPdxValue = (PdxValue) BlobHelper.deserializeBlob(pdxValueBytes);
                assertEquals(1, deserializedPdxValue.value);
                assertEquals(2L, deserializedPdxValue.fieldToDelete);
            }
            PdxType pt;
            // force PdxInstance on deserialization
            DefaultQuery.setPdxReadSerialized(true);
            try {
                PdxInstanceImpl pi = (PdxInstanceImpl) BlobHelper.deserializeBlob(pdxValueBytes);
                pt = pi.getPdxType();
                assertEquals(1, pi.getField("value"));
                assertEquals(2L, pi.getField("fieldToDelete"));
            } finally {
                DefaultQuery.setPdxReadSerialized(false);
            }
            assertEquals(PdxValue.class.getName(), pt.getClassName());
            PdxField field = pt.getPdxField("fieldToDelete");
            pt.setHasDeletedField(true);
            field.setDeleted(true);
            assertEquals(null, pt.getPdxField("fieldToDelete"));
            assertEquals(2, pt.getFieldCount());
            {
                PdxValue deserializedPdxValue = (PdxValue) BlobHelper.deserializeBlob(pdxValueBytes);
                assertEquals(1, deserializedPdxValue.value);
                // fieldToDelete should now be 0 (the default) instead of 2.
                assertEquals(0L, deserializedPdxValue.fieldToDelete);
            }
            // force PdxInstance on deserialization
            DefaultQuery.setPdxReadSerialized(true);
            try {
                PdxInstance pi = (PdxInstance) BlobHelper.deserializeBlob(pdxValueBytes);
                assertEquals(1, pi.getField("value"));
                assertEquals(false, pi.hasField("fieldToDelete"));
                assertEquals(null, pi.getField("fieldToDelete"));
                assertSame(pt, ((PdxInstanceImpl) pi).getPdxType());
                PdxValue deserializedPdxValue = (PdxValue) pi.getObject();
                assertEquals(1, deserializedPdxValue.value);
                assertEquals(0L, deserializedPdxValue.fieldToDelete);
            } finally {
                DefaultQuery.setPdxReadSerialized(false);
            }
            TypeRegistry tr = ((GemFireCacheImpl) cache).getPdxRegistry();
            // Clear the local registry so we will regenerate a type for the same class
            tr.testClearLocalTypeRegistry();
            {
                PdxInstanceFactory piFactory = cache.createPdxInstanceFactory(PdxValue.class.getName());
                piFactory.writeInt("value", 1);
                PdxInstance pi = piFactory.create();
                assertEquals(1, pi.getField("value"));
                assertEquals(null, pi.getField("fieldToDelete"));
                PdxType pt2 = ((PdxInstanceImpl) pi).getPdxType();
                assertEquals(null, pt2.getPdxField("fieldToDelete"));
                assertEquals(1, pt2.getFieldCount());
            }
        } finally {
            if (!cache.isClosed()) {
                cache.close();
            }
        }
    } finally {
    }
}
Also used : PdxInstanceFactory(org.apache.geode.pdx.PdxInstanceFactory) PdxType(org.apache.geode.pdx.internal.PdxType) PdxInstanceImpl(org.apache.geode.pdx.internal.PdxInstanceImpl) PdxInstance(org.apache.geode.pdx.PdxInstance) PdxField(org.apache.geode.pdx.internal.PdxField) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) Properties(java.util.Properties) CacheFactory(org.apache.geode.cache.CacheFactory) TypeRegistry(org.apache.geode.pdx.internal.TypeRegistry) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 73 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class RegionOperationsEqualityShouldUseArrayEqualsIntegrationTest method testPartition.

@Test
public void testPartition() {
    GemFireCacheImpl gfc = createCache();
    try {
        Region r = gfc.createRegionFactory(RegionShortcut.PARTITION).create("ArrayEqualsJUnitTestPartitionRegion");
        doOps(r);
    } finally {
        closeCache(gfc);
    }
}
Also used : GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) Region(org.apache.geode.cache.Region) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 74 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class RegionOperationsEqualityShouldUseArrayEqualsIntegrationTest method testLocal.

@Test
public void testLocal() {
    GemFireCacheImpl gfc = createCache();
    try {
        Region r = gfc.createRegionFactory(RegionShortcut.LOCAL).create("ArrayEqualsJUnitTestLocalRegion");
        doOps(r);
    } finally {
        closeCache(gfc);
    }
}
Also used : GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) Region(org.apache.geode.cache.Region) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 75 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class RebalanceOperationDUnitTest method testRecoverRedundancyBalancingIfCreateBucketFails.

@Test
public void testRecoverRedundancyBalancingIfCreateBucketFails() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    final DistributedMember member1 = createPrRegion(vm0, "region1", 100, null);
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            for (int i = 0; i < 1; i++) {
                region.put(Integer.valueOf(i), "A");
            }
        }
    });
    SerializableRunnable checkRedundancy = new SerializableRunnable("checkRedundancy") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(1, details.getCreatedBucketCount());
            assertEquals(0, details.getActualRedundantCopies());
            assertEquals(1, details.getLowRedundancyBucketCount());
        }
    };
    vm0.invoke(checkRedundancy);
    // Now create the region in 2 more VMs
    // Let localMaxMemory(VM1) > localMaxMemory(VM2)
    // so that redundant bucket will always be attempted on VM1
    final DistributedMember member2 = createPrRegion(vm1, "region1", 100, null);
    final DistributedMember member3 = createPrRegion(vm2, "region1", 90, null);
    vm0.invoke(checkRedundancy);
    // Inject mock PRHARedundancyProvider to simulate createBucketFailures
    vm0.invoke(new SerializableRunnable("injectCreateBucketFailureAndRebalance") {

        @Override
        public void run() {
            GemFireCacheImpl cache = spy(getGemfireCache());
            // set the spied cache instance
            GemFireCacheImpl origCache = GemFireCacheImpl.setInstanceForTests(cache);
            PartitionedRegion origRegion = (PartitionedRegion) cache.getRegion("region1");
            PartitionedRegion spyRegion = spy(origRegion);
            PRHARedundancyProvider redundancyProvider = spy(new PRHARedundancyProvider(spyRegion));
            // return the spied region when ever getPartitionedRegions() is invoked
            Set<PartitionedRegion> parRegions = cache.getPartitionedRegions();
            parRegions.remove(origRegion);
            parRegions.add(spyRegion);
            doReturn(parRegions).when(cache).getPartitionedRegions();
            doReturn(redundancyProvider).when(spyRegion).getRedundancyProvider();
            // simulate create bucket fails on member2 and test if it creates on member3
            doReturn(false).when(redundancyProvider).createBackupBucketOnMember(anyInt(), eq((InternalDistributedMember) member2), anyBoolean(), anyBoolean(), any(), anyBoolean());
            // Now simulate a rebalance
            // Create operationImpl and not factory as we need spied cache to be passed to operationImpl
            RegionFilter filter = new FilterByPath(null, null);
            RebalanceOperationImpl operation = new RebalanceOperationImpl(cache, false, filter);
            operation.start();
            RebalanceResults results = null;
            try {
                results = operation.getResults(MAX_WAIT, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                Assert.fail("Interrupted waiting on rebalance", e);
            } catch (TimeoutException e) {
                Assert.fail("Timeout waiting on rebalance", e);
            }
            assertEquals(1, results.getTotalBucketCreatesCompleted());
            assertEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(0, results.getTotalBucketTransferBytes());
            assertEquals(0, results.getTotalBucketTransfersCompleted());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(1, details.getBucketCreatesCompleted());
            assertEquals(0, details.getPrimaryTransfersCompleted());
            assertEquals(0, details.getBucketTransferBytes());
            assertEquals(0, details.getBucketTransfersCompleted());
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(3, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                if (memberDetails.getDistributedMember().equals(member1)) {
                    assertEquals(1, memberDetails.getBucketCount());
                    assertEquals(1, memberDetails.getPrimaryCount());
                } else if (memberDetails.getDistributedMember().equals(member2)) {
                    assertEquals(0, memberDetails.getBucketCount());
                    assertEquals(0, memberDetails.getPrimaryCount());
                } else if (memberDetails.getDistributedMember().equals(member3)) {
                    assertEquals(1, memberDetails.getBucketCount());
                    assertEquals(0, memberDetails.getPrimaryCount());
                }
            }
            ResourceManagerStats stats = cache.getInternalResourceManager().getStats();
            assertEquals(0, stats.getRebalancesInProgress());
            assertEquals(1, stats.getRebalancesCompleted());
            assertEquals(0, stats.getRebalanceBucketCreatesInProgress());
            assertEquals(results.getTotalBucketCreatesCompleted(), stats.getRebalanceBucketCreatesCompleted());
            assertEquals(1, stats.getRebalanceBucketCreatesFailed());
            // set the original cache
            GemFireCacheImpl.setInstanceForTests(origCache);
        }
    });
    SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkLowRedundancy") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(1, details.getCreatedBucketCount());
            assertEquals(1, details.getActualRedundantCopies());
            assertEquals(0, details.getLowRedundancyBucketCount());
        }
    };
    vm0.invoke(checkRedundancyFixed);
    vm1.invoke(checkRedundancyFixed);
    vm2.invoke(checkRedundancyFixed);
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) PRHARedundancyProvider(org.apache.geode.internal.cache.PRHARedundancyProvider) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache) TimeoutException(java.util.concurrent.TimeoutException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

GemFireCacheImpl (org.apache.geode.internal.cache.GemFireCacheImpl)213 Test (org.junit.Test)127 Region (org.apache.geode.cache.Region)86 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)55 LocalRegion (org.apache.geode.internal.cache.LocalRegion)54 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)51 VM (org.apache.geode.test.dunit.VM)49 DistributedRegion (org.apache.geode.internal.cache.DistributedRegion)47 Host (org.apache.geode.test.dunit.Host)42 ClientCacheCreation (org.apache.geode.internal.cache.xmlcache.ClientCacheCreation)40 RegionAttributes (org.apache.geode.cache.RegionAttributes)39 CacheCreation (org.apache.geode.internal.cache.xmlcache.CacheCreation)35 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)33 CacheException (org.apache.geode.cache.CacheException)32 RegionCreation (org.apache.geode.internal.cache.xmlcache.RegionCreation)32 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)31 Properties (java.util.Properties)24 AttributesFactory (org.apache.geode.cache.AttributesFactory)24 Cache (org.apache.geode.cache.Cache)23 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)23