Search in sources :

Example 16 with PartitionMemberInfo

use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.

the class PartitionedRegionLoadModelJUnitTest method testRedundancySatisfactionDoNotEnforceLocalMaxMemory.

/**
   * This test makes sure we ignore the size limit if requested
   */
@Test
public void testRedundancySatisfactionDoNotEnforceLocalMaxMemory() throws Exception {
    PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 2, 3, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
    InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
    InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
    // A member with 1 bucket with low redundancy, but it is too big to copy anywhere
    PartitionMemberInfoImpl details1 = buildDetails(member1, 50, 50, new long[] { 30, 0, 0 }, new long[] { 1, 0, 0 });
    // A member with 2 buckets with low redundancy that can be copied
    PartitionMemberInfoImpl details2 = buildDetails(member2, 40, 40, new long[] { 0, 10, 10 }, new long[] { 0, 1, 1 });
    model.addRegion("a", Arrays.asList(details1, details2), new FakeOfflineDetails(), false);
    // we expect 2 moves
    assertEquals(3, doMoves(new CompositeDirector(true, true, false, false), model));
    List<Create> expectedCreates = new ArrayList<Create>();
    expectedCreates.add(new Create(member2, 0));
    expectedCreates.add(new Create(member1, 1));
    expectedCreates.add(new Create(member1, 2));
    assertEquals(expectedCreates, bucketOperator.creates);
    Set<PartitionMemberInfo> afterDetails = model.getPartitionedMemberDetails("a");
    assertEquals(afterDetails.size(), 2);
    for (PartitionMemberInfo member : afterDetails) {
        if (member.getDistributedMember().equals(member1)) {
            assertEquals(details1.getConfiguredMaxMemory(), member.getConfiguredMaxMemory());
        } else {
            assertEquals(details2.getConfiguredMaxMemory(), member.getConfiguredMaxMemory());
        }
    }
}
Also used : PartitionedRegionLoadModel(org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) ArrayList(java.util.ArrayList) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector) UnitTest(org.apache.geode.test.junit.categories.UnitTest) Test(org.junit.Test)

Example 17 with PartitionMemberInfo

use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.

the class AutoBalancerJUnitTest method testFacadeTotalBytes2Regions.

@Test
@Ignore("GEODE-2789: need to rewrite this test")
public void testFacadeTotalBytes2Regions() {
    final PartitionedRegion mockR1 = mockContext.mock(PartitionedRegion.class, "r1");
    final PartitionedRegion mockR2 = mockContext.mock(PartitionedRegion.class, "r2");
    final HashSet<PartitionedRegion> regions = new HashSet<>();
    regions.add(mockR1);
    regions.add(mockR2);
    final InternalPRInfo mockR1PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR1");
    final PartitionMemberInfo mockR1M1Info = mockContext.mock(PartitionMemberInfo.class, "r1M1");
    final PartitionMemberInfo mockR1M2Info = mockContext.mock(PartitionMemberInfo.class, "r1M2");
    final HashSet<PartitionMemberInfo> r1Members = new HashSet<>();
    r1Members.add(mockR1M1Info);
    r1Members.add(mockR1M2Info);
    final InternalPRInfo mockR2PRInfo = mockContext.mock(InternalPRInfo.class, "prInforR2");
    final PartitionMemberInfo mockR2M1Info = mockContext.mock(PartitionMemberInfo.class, "r2M1");
    final HashSet<PartitionMemberInfo> r2Members = new HashSet<>();
    r2Members.add(mockR2M1Info);
    final Map<PartitionedRegion, InternalPRInfo> details = new HashMap<>();
    details.put(mockR1, mockR1PRInfo);
    details.put(mockR2, mockR2PRInfo);
    mockContext.checking(new Expectations() {

        {
            allowing(mockR1).getFullPath();
            allowing(mockR2).getFullPath();
            oneOf(mockR1PRInfo).getPartitionMemberInfo();
            will(returnValue(r1Members));
            atLeast(1).of(mockR1M1Info).getSize();
            will(returnValue(123L));
            atLeast(1).of(mockR1M2Info).getSize();
            will(returnValue(74L));
            oneOf(mockR2PRInfo).getPartitionMemberInfo();
            will(returnValue(r2Members));
            atLeast(1).of(mockR2M1Info).getSize();
            will(returnValue(3475L));
        }
    });
    GeodeCacheFacade facade = new GeodeCacheFacade() {

        @Override
        public Map<PartitionedRegion, InternalPRInfo> getRegionMemberDetails() {
            return details;
        }
    };
    assertEquals(123 + 74 + 3475, facade.getTotalDataSize(details));
}
Also used : InternalPRInfo(org.apache.geode.internal.cache.partitioned.InternalPRInfo) Expectations(org.jmock.Expectations) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) HashMap(java.util.HashMap) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) GeodeCacheFacade(org.apache.geode.cache.util.AutoBalancer.GeodeCacheFacade) HashSet(java.util.HashSet) Ignore(org.junit.Ignore) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 18 with PartitionMemberInfo

use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.

the class RebalanceOperationDUnitTest method recoverRedundancyWithOfflinePersistence.

public void recoverRedundancyWithOfflinePersistence(final boolean simulate, final boolean useAccessor) throws Throwable {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create(getUniqueName());
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            attr.setDiskSynchronous(true);
            attr.setDiskStoreName(getUniqueName());
            cache.createRegion("region1", attr.create());
        }
    };
    // Create the region in only 2 VMs
    vm0.invoke(createPrRegion);
    vm1.invoke(createPrRegion);
    VM rebalanceVM;
    SerializableRunnable createAccessor = new SerializableRunnable(("createAccessor")) {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create("ds1");
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            paf.setLocalMaxMemory(0);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    if (useAccessor) {
        // Create an accessor and reblance from that VM
        vm3.invoke(createAccessor);
        rebalanceVM = vm3;
    } else {
        rebalanceVM = vm0;
    }
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            region.put(Integer.valueOf(1), "A");
            region.put(Integer.valueOf(2), "A");
            region.put(Integer.valueOf(3), "A");
            region.put(Integer.valueOf(4), "A");
            region.put(Integer.valueOf(5), "A");
            region.put(Integer.valueOf(6), "A");
        }
    });
    SerializableRunnable closeCache = new SerializableRunnable("close cache") {

        public void run() {
            Cache cache = getCache();
            cache.getRegion("region1").close();
        }
    };
    // Close the cache in vm1
    final Set<Integer> vm1Buckets = getBucketList("region1", vm1);
    vm1.invoke(closeCache);
    SerializableRunnable checkLowRedundancyBeforeRebalance = new SerializableRunnable("checkLowRedundancyBeforeRebalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(6, details.getCreatedBucketCount());
            assertEquals(0, details.getActualRedundantCopies());
            assertEquals(6, details.getLowRedundancyBucketCount());
        }
    };
    SerializableRunnable checkLowRedundancyAfterRebalance = new SerializableRunnable("checkLowRedundancyAfterRebalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(6, details.getCreatedBucketCount());
            assertEquals(1, details.getActualRedundantCopies());
            assertEquals(0, details.getLowRedundancyBucketCount());
        }
    };
    // make sure we can tell that the buckets have low redundancy
    vm0.invoke(checkLowRedundancyBeforeRebalance);
    // Now create the cache in another member
    vm2.invoke(createPrRegion);
    // Make sure we still have low redundancy
    vm0.invoke(checkLowRedundancyBeforeRebalance);
    /*
     * Simulates a rebalance if simulation flag is set. Otherwise, performs a rebalance.
     * 
     * A rebalance will replace offline buckets, so this should restore redundancy
     */
    rebalanceVM.invoke(new SerializableRunnable("simulateRebalance") {

        public void run() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager);
            assertEquals(6, results.getTotalBucketCreatesCompleted());
            assertEquals(3, results.getTotalPrimaryTransfersCompleted());
            assertEquals(0, results.getTotalBucketTransfersCompleted());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(6, details.getBucketCreatesCompleted());
            assertEquals(3, details.getPrimaryTransfersCompleted());
            assertEquals(0, details.getBucketTransfersCompleted());
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(2, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(6, memberDetails.getBucketCount());
                assertEquals(3, memberDetails.getPrimaryCount());
            }
            if (!simulate) {
                verifyStats(manager, results);
            }
        }
    });
    Set<Integer> vm0Buckets = getBucketList("region1", vm0);
    Set<Integer> vm2Buckets = getBucketList("region1", vm2);
    // Make sure redundancy is repaired if not simulated
    if (!simulate) {
        vm0.invoke(checkLowRedundancyAfterRebalance);
    } else {
        // Othewise, we should still have broken redundancy at this point
        vm0.invoke(checkLowRedundancyBeforeRebalance);
    }
    vm2.invoke(closeCache);
    vm0.invoke(closeCache);
    if (useAccessor) {
        vm3.invoke(closeCache);
    }
    // We need to restart both VMs at the same time, because
    // they will wait for each other before allowing operations.
    AsyncInvocation async0 = vm0.invokeAsync(createPrRegion);
    AsyncInvocation async2 = vm2.invokeAsync(createPrRegion);
    async0.getResult(30000);
    async0.getResult(30000);
    if (useAccessor) {
        vm3.invoke(createAccessor);
    }
    // pause for async bucket recovery threads to finish their work. Otherwise
    // the rebalance op may think that the other member doesn't have buckets, then
    // ask it to create them and get a negative reply because it actually does
    // have the buckets, causing the test to fail
    Wait.pause(10000);
    // or it might not.
    if (!simulate) {
        rebalanceVM.invoke(new SerializableRunnable("rebalance") {

            public void run() {
                Cache cache = getCache();
                ResourceManager manager = cache.getResourceManager();
                RebalanceResults results = doRebalance(simulate, manager);
                assertEquals(0, results.getTotalBucketCreatesCompleted());
                assertEquals(0, results.getTotalBucketTransfersCompleted());
                Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
                assertEquals(1, detailSet.size());
                PartitionRebalanceInfo details = detailSet.iterator().next();
                assertEquals(0, details.getBucketCreatesCompleted());
                assertEquals(0, details.getBucketTransfersCompleted());
                Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
                assertEquals(2, afterDetails.size());
                for (PartitionMemberInfo memberDetails : afterDetails) {
                    assertEquals(6, memberDetails.getBucketCount());
                    assertEquals(3, memberDetails.getPrimaryCount());
                }
            }
        });
        // Redundancy should be repaired.
        vm0.invoke(checkLowRedundancyAfterRebalance);
    }
    vm1.invoke(createPrRegion);
    // Look at vm0 buckets.
    assertEquals(vm0Buckets, getBucketList("region1", vm0));
    /*
     * Look at vm1 buckets.
     */
    if (!simulate) {
        /*
       * vm1 should have no buckets because offline buckets were recovered when vm0 and vm2 were
       * rebalanced above.
       */
        assertEquals(0, getBucketList("region1", vm1).size());
    } else {
        /*
       * No rebalancing above because the simulation flag is on. Therefore, vm1 will have recovered
       * its buckets. We need to wait for the buckets because they might still be in the middle of
       * creation in the background
       */
        waitForBucketList("region1", vm1, vm1Buckets);
    }
    // look at vm2 buckets
    assertEquals(vm2Buckets, getBucketList("region1", vm2));
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Example 19 with PartitionMemberInfo

use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.

the class RebalanceOperationDUnitTest method moveBucketsWithRedundancy.

/**
   * Test to make sure we balance buckets between three hosts with redundancy
   */
public void moveBucketsWithRedundancy(final boolean simulate) {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    // Create the region in two VMs
    vm0.invoke(createPrRegion);
    vm1.invoke(createPrRegion);
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            for (int i = 0; i < 12; i++) {
                region.put(Integer.valueOf(i), "A");
            }
        }
    });
    // Create the region in one more VM.
    vm2.invoke(createPrRegion);
    // Now simulate a rebalance
    final Long totalSize = (Long) vm0.invoke(new SerializableCallable("simulateRebalance") {

        public Object call() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager);
            assertEquals(0, results.getTotalBucketCreatesCompleted());
            // We don't know how many primaries will move, it depends on
            // if the move bucket code moves the primary or a redundant bucket
            // assertIndexDetailsEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(8, results.getTotalBucketTransfersCompleted());
            assertTrue(0 < results.getTotalBucketTransferBytes());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(0, details.getBucketCreatesCompleted());
            assertTrue(0 < details.getBucketTransferBytes());
            assertEquals(8, details.getBucketTransfersCompleted());
            long totalSize = 0;
            Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsAfter();
            for (PartitionMemberInfo memberDetails : beforeDetails) {
                totalSize += memberDetails.getSize();
            }
            long afterSize = 0;
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(3, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(8, memberDetails.getBucketCount());
                assertEquals(4, memberDetails.getPrimaryCount());
                afterSize += memberDetails.getSize();
            }
            assertEquals(totalSize, afterSize);
            if (!simulate) {
                verifyStats(manager, results);
            }
            return Long.valueOf(totalSize);
        }
    });
    if (!simulate) {
        SerializableRunnable checkBalance = new SerializableRunnable("checkBalance") {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion("region1");
                PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                assertEquals(12, details.getCreatedBucketCount());
                assertEquals(1, details.getActualRedundantCopies());
                assertEquals(0, details.getLowRedundancyBucketCount());
                LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
                long afterSize = 0;
                for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
                    assertEquals(8, memberDetails.getBucketCount());
                    assertEquals(4, memberDetails.getPrimaryCount());
                    afterSize += memberDetails.getSize();
                }
                assertEquals(totalSize.longValue(), afterSize);
            }
        };
        vm0.invoke(checkBalance);
        vm1.invoke(checkBalance);
        vm2.invoke(checkBalance);
    }
}
Also used : SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Example 20 with PartitionMemberInfo

use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.

the class RebalanceOperationDUnitTest method filterRegions.

/**
   * Check to make sure that we balance buckets between two hosts with no redundancy.
   * 
   * @param simulate
   */
public void filterRegions(final boolean simulate) {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    final int NUM_REGIONS = 4;
    final Set<String> INCLUDED = new HashSet<String>();
    INCLUDED.add("region0");
    INCLUDED.add("region1");
    final Set<String> EXCLUDED = new HashSet<String>();
    EXCLUDED.add("region0");
    EXCLUDED.add("region3");
    final HashSet<String> EXPECTED_REBALANCED = new HashSet<String>();
    EXPECTED_REBALANCED.add("/region0");
    EXPECTED_REBALANCED.add("/region1");
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(0);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            for (int i = 0; i < NUM_REGIONS; i++) {
                cache.createRegion("region" + i, attr.create());
            }
        }
    };
    // Create the region in only 1 VM
    vm0.invoke(createPrRegion);
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            for (int i = 0; i < NUM_REGIONS; i++) {
                Region region = cache.getRegion("region" + i);
                for (int j = 0; j < 6; j++) {
                    region.put(Integer.valueOf(j), "A");
                }
            }
        }
    });
    // Create the region in the other VM (should have no effect)
    vm1.invoke(createPrRegion);
    // Now simulate a rebalance
    vm0.invoke(new SerializableRunnable("simulateRebalance") {

        public void run() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager, INCLUDED, EXCLUDED);
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            // assertIndexDetailsEquals(3, detailSet.size());
            Set<String> names = new HashSet<String>();
            for (PartitionRebalanceInfo details : detailSet) {
                assertEquals(0, details.getBucketCreatesCompleted());
                assertEquals(0, details.getPrimaryTransfersCompleted());
                assertTrue(0 < details.getBucketTransferBytes());
                assertEquals(3, details.getBucketTransfersCompleted());
                names.add(details.getRegionPath());
                Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
                assertEquals(2, afterDetails.size());
                for (PartitionMemberInfo memberDetails : afterDetails) {
                    assertEquals(3, memberDetails.getBucketCount());
                    assertEquals(3, memberDetails.getPrimaryCount());
                }
            }
            assertEquals(EXPECTED_REBALANCED, names);
            assertEquals(0, results.getTotalBucketCreatesCompleted());
            assertEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(6, results.getTotalBucketTransfersCompleted());
            assertTrue(0 < results.getTotalBucketTransferBytes());
            if (!simulate) {
                verifyStats(manager, results);
            }
        }
    });
    if (!simulate) {
        SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {

            public void run() {
                Cache cache = getCache();
                for (String name : EXPECTED_REBALANCED) {
                    Region region = cache.getRegion(name);
                    PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                    assertEquals(6, details.getCreatedBucketCount());
                    assertEquals(0, details.getActualRedundantCopies());
                    assertEquals(0, details.getLowRedundancyBucketCount());
                    assertEquals(2, details.getPartitionMemberInfo().size());
                    for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
                        assertEquals(3, memberDetails.getBucketCount());
                        assertEquals(3, memberDetails.getPrimaryCount());
                    }
                }
                Region region = cache.getRegion("region2");
                PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                assertEquals(6, details.getCreatedBucketCount());
                assertEquals(0, details.getActualRedundantCopies());
                assertEquals(0, details.getLowRedundancyBucketCount());
                assertEquals(2, details.getPartitionMemberInfo().size());
                for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
                    int bucketCount = memberDetails.getBucketCount();
                    int primaryCount = memberDetails.getPrimaryCount();
                    assertTrue("Wrong number of buckets on non rebalanced region buckets=" + bucketCount + " primarys=" + primaryCount, bucketCount == 6 && primaryCount == 6 || bucketCount == 0 && primaryCount == 0);
                }
            }
        };
        vm0.invoke(checkRedundancyFixed);
        vm1.invoke(checkRedundancyFixed);
    }
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) HashSet(java.util.HashSet) Cache(org.apache.geode.cache.Cache)

Aggregations

PartitionMemberInfo (org.apache.geode.cache.partition.PartitionMemberInfo)29 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)22 HashSet (java.util.HashSet)21 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)20 Cache (org.apache.geode.cache.Cache)18 Region (org.apache.geode.cache.Region)18 RebalanceResults (org.apache.geode.cache.control.RebalanceResults)18 PartitionRebalanceInfo (org.apache.geode.cache.partition.PartitionRebalanceInfo)18 PartitionRegionInfo (org.apache.geode.cache.partition.PartitionRegionInfo)18 BucketRegion (org.apache.geode.internal.cache.BucketRegion)18 Host (org.apache.geode.test.dunit.Host)18 VM (org.apache.geode.test.dunit.VM)18 Set (java.util.Set)17 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)17 ResourceManager (org.apache.geode.cache.control.ResourceManager)16 TreeSet (java.util.TreeSet)15 Test (org.junit.Test)15 AttributesFactory (org.apache.geode.cache.AttributesFactory)12 PartitionAttributes (org.apache.geode.cache.PartitionAttributes)12 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)12