Search in sources :

Example 21 with RebalanceResults

use of org.apache.geode.cache.control.RebalanceResults in project geode by apache.

the class AutoBalancerJUnitTest method getFacadeForResourceManagerOps.

private GeodeCacheFacade getFacadeForResourceManagerOps(final boolean simulate) throws Exception {
    final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
    final InternalResourceManager mockRM = mockContext.mock(InternalResourceManager.class);
    final RebalanceFactory mockRebalanceFactory = mockContext.mock(RebalanceFactory.class);
    final RebalanceOperation mockRebalanceOperation = mockContext.mock(RebalanceOperation.class);
    final RebalanceResults mockRebalanceResults = mockContext.mock(RebalanceResults.class);
    mockContext.checking(new Expectations() {

        {
            oneOf(mockCache).isClosed();
            will(returnValue(false));
            oneOf(mockCache).getResourceManager();
            will(returnValue(mockRM));
            oneOf(mockRM).createRebalanceFactory();
            will(returnValue(mockRebalanceFactory));
            if (simulate) {
                oneOf(mockRebalanceFactory).simulate();
            } else {
                oneOf(mockRebalanceFactory).start();
            }
            will(returnValue(mockRebalanceOperation));
            oneOf(mockRebalanceOperation).getResults();
            will(returnValue(mockRebalanceResults));
            if (simulate) {
                atLeast(1).of(mockRebalanceResults).getTotalBucketTransferBytes();
                will(returnValue(12345L));
            }
            allowing(mockRebalanceResults);
        }
    });
    GeodeCacheFacade facade = new GeodeCacheFacade(mockCache);
    return facade;
}
Also used : Expectations(org.jmock.Expectations) RebalanceFactory(org.apache.geode.cache.control.RebalanceFactory) RebalanceOperation(org.apache.geode.cache.control.RebalanceOperation) GeodeCacheFacade(org.apache.geode.cache.util.AutoBalancer.GeodeCacheFacade) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) InternalResourceManager(org.apache.geode.internal.cache.control.InternalResourceManager)

Example 22 with RebalanceResults

use of org.apache.geode.cache.control.RebalanceResults in project geode by apache.

the class BackupDUnitTest method backupWhileBucketIsMoved.

/**
   * Test for bug 42420. Invoke a backup when a bucket is in the middle of being moved.
   * 
   * @param observer - a message observer that triggers at the backup at the correct time.
   */
public void backupWhileBucketIsMoved(final DistributionMessageObserver observer) throws Throwable {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    vm0.invoke(new SerializableRunnable("Add listener to invoke backup") {

        public void run() {
            disconnectFromDS();
            // This listener will wait for a response to the
            // destroy region message, and then trigger a backup.
            // That will backup before this member has finished destroying
            // a bucket, but after the peer has removed the bucket.
            DistributionMessageObserver.setInstance(observer);
        }
    });
    try {
        LogWriterUtils.getLogWriter().info("Creating region in VM0");
        createPersistentRegion(vm0);
        // create twos bucket on vm0
        createData(vm0, 0, 2, "A", "region1");
        // create the pr on vm1, which won't have any buckets
        LogWriterUtils.getLogWriter().info("Creating region in VM1");
        createPersistentRegion(vm1);
        // Perform a rebalance. This will trigger the backup in the middle
        // of the bucket move.
        vm0.invoke(new SerializableRunnable("Do rebalance") {

            public void run() {
                Cache cache = getCache();
                RebalanceOperation op = cache.getResourceManager().createRebalanceFactory().start();
                RebalanceResults results;
                try {
                    results = op.getResults();
                    assertEquals(1, results.getTotalBucketTransfersCompleted());
                } catch (Exception e) {
                    Assert.fail("interupted", e);
                }
            }
        });
        validateBackupComplete();
        createData(vm0, 0, 5, "C", "region1");
        closeCache(vm0);
        closeCache(vm1);
        // Destroy the current data
        Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {

            public void run() {
                try {
                    cleanDiskDirs();
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        });
        restoreBackup(2);
        LogWriterUtils.getLogWriter().info("Creating region in VM0");
        AsyncInvocation async0 = createPersistentRegionAsync(vm0);
        LogWriterUtils.getLogWriter().info("Creating region in VM1");
        AsyncInvocation async1 = createPersistentRegionAsync(vm1);
        async0.getResult(MAX_WAIT);
        async1.getResult(MAX_WAIT);
        checkData(vm0, 0, 2, "A", "region1");
    } finally {
        // cleanup the distribution message observer
        vm0.invoke(new SerializableRunnable() {

            public void run() {
                DistributionMessageObserver.setInstance(null);
                disconnectFromDS();
            }
        });
    }
}
Also used : RebalanceOperation(org.apache.geode.cache.control.RebalanceOperation) VM(org.apache.geode.test.dunit.VM) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) IOException(java.io.IOException) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) FileNotFoundException(java.io.FileNotFoundException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) Cache(org.apache.geode.cache.Cache)

Example 23 with RebalanceResults

use of org.apache.geode.cache.control.RebalanceResults in project geode by apache.

the class PersistentColocatedPartitionedRegionDUnitTest method testRebalanceDuringRecovery.

/**
   * Test that a rebalance will regions are in the middle of recovery doesn't cause issues.
   * 
   * This is slightly different than {{@link #testRebalanceWithOfflineChildRegion()} because in this
   * case all of the regions have been created, but they are in the middle of actually recovering
   * buckets from disk.
   */
@Test
public void testRebalanceDuringRecovery() throws Throwable {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    SerializableRunnable createPRs = new SerializableRunnable() {

        public void run() {
            Cache cache = getCache();
            DiskStore ds = cache.findDiskStore("disk");
            if (ds == null) {
                ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
            }
            AttributesFactory af = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            af.setPartitionAttributes(paf.create());
            af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            af.setDiskStoreName("disk");
            cache.createRegion(PR_REGION_NAME, af.create());
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setColocatedWith(PR_REGION_NAME);
            af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            af.setDiskStoreName("disk");
            af.setPartitionAttributes(paf.create());
            cache.createRegion("region2", af.create());
        }
    };
    // Create the PRs on two members
    vm0.invoke(createPRs);
    vm1.invoke(createPRs);
    // Create some buckets.
    createData(vm0, 0, NUM_BUCKETS, "a");
    createData(vm0, 0, NUM_BUCKETS, "a", "region2");
    // Close the members
    closeCache(vm1);
    closeCache(vm0);
    SerializableRunnable addHook = new SerializableRunnable() {

        @Override
        public void run() {
            PartitionedRegionObserverHolder.setInstance(new PRObserver());
        }
    };
    SerializableRunnable waitForHook = new SerializableRunnable() {

        @Override
        public void run() {
            PRObserver observer = (PRObserver) PartitionedRegionObserverHolder.getInstance();
            try {
                observer.waitForCreate();
            } catch (InterruptedException e) {
                Assert.fail("interrupted", e);
            }
        }
    };
    SerializableRunnable removeHook = new SerializableRunnable() {

        @Override
        public void run() {
            PRObserver observer = (PRObserver) PartitionedRegionObserverHolder.getInstance();
            observer.release();
            PartitionedRegionObserverHolder.setInstance(new PartitionedRegionObserverAdapter());
        }
    };
    vm1.invoke(addHook);
    AsyncInvocation async0;
    AsyncInvocation async1;
    AsyncInvocation async2;
    RebalanceResults rebalanceResults;
    try {
        async0 = vm0.invokeAsync(createPRs);
        async1 = vm1.invokeAsync(createPRs);
        vm1.invoke(waitForHook);
        // Now create the parent region on vm-2. vm-2 did not
        // previous host the child region.
        vm2.invoke(createPRs);
        // Try to forcibly move some buckets to vm2 (this should not succeed).
        moveBucket(0, vm1, vm2);
        moveBucket(1, vm1, vm2);
    } finally {
        vm1.invoke(removeHook);
    }
    async0.getResult(MAX_WAIT);
    async1.getResult(MAX_WAIT);
    // Validate the data
    checkData(vm0, 0, NUM_BUCKETS, "a");
    checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
    // Make sure we can actually use the buckets in the child region.
    createData(vm0, 0, NUM_BUCKETS, "c", "region2");
    // Make sure the system is recoverable
    // by restarting it
    closeCache(vm2);
    closeCache(vm1);
    closeCache(vm0);
    async0 = vm0.invokeAsync(createPRs);
    async1 = vm1.invokeAsync(createPRs);
    async2 = vm2.invokeAsync(createPRs);
    async0.getResult();
    async1.getResult();
    async2.getResult();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) VM(org.apache.geode.test.dunit.VM) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 24 with RebalanceResults

use of org.apache.geode.cache.control.RebalanceResults in project geode by apache.

the class RebalanceOperationDUnitTest method recoverRedundancyWithOfflinePersistence.

public void recoverRedundancyWithOfflinePersistence(final boolean simulate, final boolean useAccessor) throws Throwable {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create(getUniqueName());
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            attr.setDiskSynchronous(true);
            attr.setDiskStoreName(getUniqueName());
            cache.createRegion("region1", attr.create());
        }
    };
    // Create the region in only 2 VMs
    vm0.invoke(createPrRegion);
    vm1.invoke(createPrRegion);
    VM rebalanceVM;
    SerializableRunnable createAccessor = new SerializableRunnable(("createAccessor")) {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create("ds1");
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            paf.setLocalMaxMemory(0);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    if (useAccessor) {
        // Create an accessor and reblance from that VM
        vm3.invoke(createAccessor);
        rebalanceVM = vm3;
    } else {
        rebalanceVM = vm0;
    }
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            region.put(Integer.valueOf(1), "A");
            region.put(Integer.valueOf(2), "A");
            region.put(Integer.valueOf(3), "A");
            region.put(Integer.valueOf(4), "A");
            region.put(Integer.valueOf(5), "A");
            region.put(Integer.valueOf(6), "A");
        }
    });
    SerializableRunnable closeCache = new SerializableRunnable("close cache") {

        public void run() {
            Cache cache = getCache();
            cache.getRegion("region1").close();
        }
    };
    // Close the cache in vm1
    final Set<Integer> vm1Buckets = getBucketList("region1", vm1);
    vm1.invoke(closeCache);
    SerializableRunnable checkLowRedundancyBeforeRebalance = new SerializableRunnable("checkLowRedundancyBeforeRebalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(6, details.getCreatedBucketCount());
            assertEquals(0, details.getActualRedundantCopies());
            assertEquals(6, details.getLowRedundancyBucketCount());
        }
    };
    SerializableRunnable checkLowRedundancyAfterRebalance = new SerializableRunnable("checkLowRedundancyAfterRebalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(6, details.getCreatedBucketCount());
            assertEquals(1, details.getActualRedundantCopies());
            assertEquals(0, details.getLowRedundancyBucketCount());
        }
    };
    // make sure we can tell that the buckets have low redundancy
    vm0.invoke(checkLowRedundancyBeforeRebalance);
    // Now create the cache in another member
    vm2.invoke(createPrRegion);
    // Make sure we still have low redundancy
    vm0.invoke(checkLowRedundancyBeforeRebalance);
    /*
     * Simulates a rebalance if simulation flag is set. Otherwise, performs a rebalance.
     * 
     * A rebalance will replace offline buckets, so this should restore redundancy
     */
    rebalanceVM.invoke(new SerializableRunnable("simulateRebalance") {

        public void run() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager);
            assertEquals(6, results.getTotalBucketCreatesCompleted());
            assertEquals(3, results.getTotalPrimaryTransfersCompleted());
            assertEquals(0, results.getTotalBucketTransfersCompleted());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(6, details.getBucketCreatesCompleted());
            assertEquals(3, details.getPrimaryTransfersCompleted());
            assertEquals(0, details.getBucketTransfersCompleted());
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(2, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(6, memberDetails.getBucketCount());
                assertEquals(3, memberDetails.getPrimaryCount());
            }
            if (!simulate) {
                verifyStats(manager, results);
            }
        }
    });
    Set<Integer> vm0Buckets = getBucketList("region1", vm0);
    Set<Integer> vm2Buckets = getBucketList("region1", vm2);
    // Make sure redundancy is repaired if not simulated
    if (!simulate) {
        vm0.invoke(checkLowRedundancyAfterRebalance);
    } else {
        // Othewise, we should still have broken redundancy at this point
        vm0.invoke(checkLowRedundancyBeforeRebalance);
    }
    vm2.invoke(closeCache);
    vm0.invoke(closeCache);
    if (useAccessor) {
        vm3.invoke(closeCache);
    }
    // We need to restart both VMs at the same time, because
    // they will wait for each other before allowing operations.
    AsyncInvocation async0 = vm0.invokeAsync(createPrRegion);
    AsyncInvocation async2 = vm2.invokeAsync(createPrRegion);
    async0.getResult(30000);
    async0.getResult(30000);
    if (useAccessor) {
        vm3.invoke(createAccessor);
    }
    // pause for async bucket recovery threads to finish their work. Otherwise
    // the rebalance op may think that the other member doesn't have buckets, then
    // ask it to create them and get a negative reply because it actually does
    // have the buckets, causing the test to fail
    Wait.pause(10000);
    // or it might not.
    if (!simulate) {
        rebalanceVM.invoke(new SerializableRunnable("rebalance") {

            public void run() {
                Cache cache = getCache();
                ResourceManager manager = cache.getResourceManager();
                RebalanceResults results = doRebalance(simulate, manager);
                assertEquals(0, results.getTotalBucketCreatesCompleted());
                assertEquals(0, results.getTotalBucketTransfersCompleted());
                Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
                assertEquals(1, detailSet.size());
                PartitionRebalanceInfo details = detailSet.iterator().next();
                assertEquals(0, details.getBucketCreatesCompleted());
                assertEquals(0, details.getBucketTransfersCompleted());
                Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
                assertEquals(2, afterDetails.size());
                for (PartitionMemberInfo memberDetails : afterDetails) {
                    assertEquals(6, memberDetails.getBucketCount());
                    assertEquals(3, memberDetails.getPrimaryCount());
                }
            }
        });
        // Redundancy should be repaired.
        vm0.invoke(checkLowRedundancyAfterRebalance);
    }
    vm1.invoke(createPrRegion);
    // Look at vm0 buckets.
    assertEquals(vm0Buckets, getBucketList("region1", vm0));
    /*
     * Look at vm1 buckets.
     */
    if (!simulate) {
        /*
       * vm1 should have no buckets because offline buckets were recovered when vm0 and vm2 were
       * rebalanced above.
       */
        assertEquals(0, getBucketList("region1", vm1).size());
    } else {
        /*
       * No rebalancing above because the simulation flag is on. Therefore, vm1 will have recovered
       * its buckets. We need to wait for the buckets because they might still be in the middle of
       * creation in the background
       */
        waitForBucketList("region1", vm1, vm1Buckets);
    }
    // look at vm2 buckets
    assertEquals(vm2Buckets, getBucketList("region1", vm2));
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Example 25 with RebalanceResults

use of org.apache.geode.cache.control.RebalanceResults in project geode by apache.

the class RebalanceOperationDUnitTest method moveBucketsWithRedundancy.

/**
   * Test to make sure we balance buckets between three hosts with redundancy
   */
public void moveBucketsWithRedundancy(final boolean simulate) {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    // Create the region in two VMs
    vm0.invoke(createPrRegion);
    vm1.invoke(createPrRegion);
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            for (int i = 0; i < 12; i++) {
                region.put(Integer.valueOf(i), "A");
            }
        }
    });
    // Create the region in one more VM.
    vm2.invoke(createPrRegion);
    // Now simulate a rebalance
    final Long totalSize = (Long) vm0.invoke(new SerializableCallable("simulateRebalance") {

        public Object call() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager);
            assertEquals(0, results.getTotalBucketCreatesCompleted());
            // We don't know how many primaries will move, it depends on
            // if the move bucket code moves the primary or a redundant bucket
            // assertIndexDetailsEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(8, results.getTotalBucketTransfersCompleted());
            assertTrue(0 < results.getTotalBucketTransferBytes());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(0, details.getBucketCreatesCompleted());
            assertTrue(0 < details.getBucketTransferBytes());
            assertEquals(8, details.getBucketTransfersCompleted());
            long totalSize = 0;
            Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsAfter();
            for (PartitionMemberInfo memberDetails : beforeDetails) {
                totalSize += memberDetails.getSize();
            }
            long afterSize = 0;
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(3, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(8, memberDetails.getBucketCount());
                assertEquals(4, memberDetails.getPrimaryCount());
                afterSize += memberDetails.getSize();
            }
            assertEquals(totalSize, afterSize);
            if (!simulate) {
                verifyStats(manager, results);
            }
            return Long.valueOf(totalSize);
        }
    });
    if (!simulate) {
        SerializableRunnable checkBalance = new SerializableRunnable("checkBalance") {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion("region1");
                PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                assertEquals(12, details.getCreatedBucketCount());
                assertEquals(1, details.getActualRedundantCopies());
                assertEquals(0, details.getLowRedundancyBucketCount());
                LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
                long afterSize = 0;
                for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
                    assertEquals(8, memberDetails.getBucketCount());
                    assertEquals(4, memberDetails.getPrimaryCount());
                    afterSize += memberDetails.getSize();
                }
                assertEquals(totalSize.longValue(), afterSize);
            }
        };
        vm0.invoke(checkBalance);
        vm1.invoke(checkBalance);
        vm2.invoke(checkBalance);
    }
}
Also used : SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Aggregations

RebalanceResults (org.apache.geode.cache.control.RebalanceResults)38 Cache (org.apache.geode.cache.Cache)26 Host (org.apache.geode.test.dunit.Host)26 VM (org.apache.geode.test.dunit.VM)26 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)25 Region (org.apache.geode.cache.Region)22 ResourceManager (org.apache.geode.cache.control.ResourceManager)22 PartitionRebalanceInfo (org.apache.geode.cache.partition.PartitionRebalanceInfo)21 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)21 BucketRegion (org.apache.geode.internal.cache.BucketRegion)20 PartitionRegionInfo (org.apache.geode.cache.partition.PartitionRegionInfo)19 Set (java.util.Set)18 AttributesFactory (org.apache.geode.cache.AttributesFactory)18 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)18 PartitionMemberInfo (org.apache.geode.cache.partition.PartitionMemberInfo)18 HashSet (java.util.HashSet)16 TreeSet (java.util.TreeSet)16 PartitionAttributes (org.apache.geode.cache.PartitionAttributes)16 RebalanceOperation (org.apache.geode.cache.control.RebalanceOperation)11 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)10