Search in sources :

Example 41 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class RebalanceOperationDUnitTest method moveBucketsWithUnrecoveredValuesRedundancy.

/**
   * Test to ensure that we wait for in progress write operations before moving a primary.
   * 
   * @throws CancellationException
   */
public void moveBucketsWithUnrecoveredValuesRedundancy(final boolean simulate) {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
            try {
                Cache cache = getCache();
                if (cache.findDiskStore("store") == null) {
                    cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).setMaxOplogSize(1).create("store");
                }
                AttributesFactory attr = new AttributesFactory();
                PartitionAttributesFactory paf = new PartitionAttributesFactory();
                attr.setDiskStoreName("store");
                attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
                paf.setRedundantCopies(0);
                paf.setRecoveryDelay(-1);
                paf.setStartupRecoveryDelay(-1);
                PartitionAttributes prAttr = paf.create();
                attr.setPartitionAttributes(prAttr);
                attr.setCacheLoader(new Bug40228Loader());
                cache.createRegion("region1", attr.create());
            } finally {
                System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
            }
        }
    };
    // Create the region in only 1 VM
    vm0.invoke(createPrRegion);
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            region.put(Integer.valueOf(1), "A");
            region.put(Integer.valueOf(2), "A");
            region.put(Integer.valueOf(3), "A");
            region.put(Integer.valueOf(4), "A");
            region.put(Integer.valueOf(5), "A");
            region.put(Integer.valueOf(6), "A");
        }
    });
    final long[] bucketSizes = (long[]) vm0.invoke(new SerializableCallable("get sizes and close cache") {

        public Object call() {
            PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1");
            PartitionedRegionDataStore dataStore = region.getDataStore();
            long[] bucketSizes = new long[7];
            for (int i = 1; i <= 6; i++) {
                BucketRegion bucket = dataStore.getLocalBucketById(i);
                bucketSizes[i] = bucket.getTotalBytes();
                assertEquals(0, bucket.getNumOverflowBytesOnDisk());
                assertEquals(0, bucket.getNumOverflowOnDisk());
                assertEquals(1, bucket.getNumEntriesInVM());
            }
            getCache().close();
            return bucketSizes;
        }
    });
    // now recover the region
    vm0.invoke(createPrRegion);
    vm0.invoke(new SerializableRunnable("check sizes") {

        public void run() {
            PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1");
            PartitionedRegionDataStore dataStore = region.getDataStore();
            for (int i = 1; i <= 6; i++) {
                BucketRegion bucket = dataStore.getLocalBucketById(i);
                assertEquals(1, bucket.getNumOverflowOnDisk());
                assertEquals(0, bucket.getNumEntriesInVM());
                // the size recorded on disk is not the same is the in memory size, apparently
                assertTrue("Bucket size was " + bucket.getNumOverflowBytesOnDisk(), 1 < bucket.getNumOverflowBytesOnDisk());
                assertEquals(bucket.getNumOverflowBytesOnDisk(), bucket.getTotalBytes());
            }
        }
    });
    // Create the region in the other VM (should have no effect)
    vm1.invoke(createPrRegion);
    // Now simulate a rebalance
    vm0.invoke(new SerializableRunnable("simulateRebalance") {

        public void run() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager);
            assertEquals(0, results.getTotalBucketCreatesCompleted());
            assertEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(3, results.getTotalBucketTransfersCompleted());
            assertTrue("Transfered Bytes = " + results.getTotalBucketTransferBytes(), 0 < results.getTotalBucketTransferBytes());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(0, details.getBucketCreatesCompleted());
            assertEquals(0, details.getPrimaryTransfersCompleted());
            assertTrue(0 < details.getBucketTransferBytes());
            assertEquals(3, details.getBucketTransfersCompleted());
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(2, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(3, memberDetails.getBucketCount());
                assertEquals(3, memberDetails.getPrimaryCount());
            }
            if (!simulate) {
                verifyStats(manager, results);
            }
        }
    });
    if (!simulate) {
        SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion("region1");
                PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                assertEquals(6, details.getCreatedBucketCount());
                assertEquals(0, details.getActualRedundantCopies());
                assertEquals(0, details.getLowRedundancyBucketCount());
                assertEquals(2, details.getPartitionMemberInfo().size());
                for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
                    assertEquals(3, memberDetails.getBucketCount());
                    assertEquals(3, memberDetails.getPrimaryCount());
                }
                // check to make sure that moving buckets didn't close the cache loader
                Bug40228Loader loader = (Bug40228Loader) cache.getRegion("region1").getAttributes().getCacheLoader();
                assertFalse(loader.isClosed());
            }
        };
        vm0.invoke(checkRedundancyFixed);
        vm1.invoke(checkRedundancyFixed);
        SerializableRunnable checkBug40228Fixed = new SerializableRunnable("checkBug40228Fixed") {

            public void run() {
                Cache cache = getCache();
                Bug40228Loader loader = (Bug40228Loader) cache.getRegion("region1").getAttributes().getCacheLoader();
                assertFalse(loader.isClosed());
                // check to make sure that closing the PR closes the cache loader
                cache.getRegion("region1").close();
                assertTrue(loader.isClosed());
            }
        };
        vm0.invoke(checkBug40228Fixed);
        vm1.invoke(checkBug40228Fixed);
    }
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) ResourceManager(org.apache.geode.cache.control.ResourceManager) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) BucketRegion(org.apache.geode.internal.cache.BucketRegion) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Example 42 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class RebalanceOperationDUnitTest method testMoveBucketsNestedPR.

/**
   * Test to make sure we balance buckets between three hosts with redundancy
   */
@Test
public void testMoveBucketsNestedPR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Region parent;
            Cache cache = getCache();
            {
                AttributesFactory attr = new AttributesFactory();
                attr.setDataPolicy(DataPolicy.REPLICATE);
                parent = cache.createRegion("parent", attr.create());
            }
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            parent.createSubregion("region1", attr.create());
        }
    };
    // Create the region in two VMs
    vm0.invoke(createPrRegion);
    vm1.invoke(createPrRegion);
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("parent/region1");
            for (int i = 0; i < 12; i++) {
                region.put(Integer.valueOf(i), "A");
            }
        }
    });
    // Create the region in one more VM.
    vm2.invoke(createPrRegion);
    // Now simulate a rebalance
    final Long totalSize = (Long) vm0.invoke(new SerializableCallable("simulateRebalance") {

        public Object call() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(false, manager);
            assertEquals(0, results.getTotalBucketCreatesCompleted());
            // We don't know how many primaries will move, it depends on
            // if the move bucket code moves the primary or a redundant bucket
            // assertIndexDetailsEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(8, results.getTotalBucketTransfersCompleted());
            assertTrue(0 < results.getTotalBucketTransferBytes());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(0, details.getBucketCreatesCompleted());
            assertTrue(0 < details.getBucketTransferBytes());
            assertEquals(8, details.getBucketTransfersCompleted());
            long totalSize = 0;
            Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsAfter();
            for (PartitionMemberInfo memberDetails : beforeDetails) {
                totalSize += memberDetails.getSize();
            }
            long afterSize = 0;
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(3, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(8, memberDetails.getBucketCount());
                assertEquals(4, memberDetails.getPrimaryCount());
                afterSize += memberDetails.getSize();
            }
            assertEquals(totalSize, afterSize);
            verifyStats(manager, results);
            return Long.valueOf(totalSize);
        }
    });
    SerializableRunnable checkBalance = new SerializableRunnable("checkBalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("parent/region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(12, details.getCreatedBucketCount());
            assertEquals(1, details.getActualRedundantCopies());
            assertEquals(0, details.getLowRedundancyBucketCount());
            LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
            long afterSize = 0;
            for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
                assertEquals(8, memberDetails.getBucketCount());
                assertEquals(4, memberDetails.getPrimaryCount());
                afterSize += memberDetails.getSize();
            }
            assertEquals(totalSize.longValue(), afterSize);
        }
    };
    vm0.invoke(checkBalance);
    vm1.invoke(checkBalance);
    vm2.invoke(checkBalance);
}
Also used : SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 43 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class RebalanceOperationDUnitTest method enforceZone.

/**
   * Test that we correctly use the redundancy-zone property to determine where to place redundant
   * copies of a buckets.
   * 
   * @param simulate
   */
public void enforceZone(final boolean simulate) {
    try {
        Host host = Host.getHost(0);
        VM vm0 = host.getVM(0);
        VM vm1 = host.getVM(1);
        VM vm2 = host.getVM(2);
        setRedundancyZone(vm0, "A");
        setRedundancyZone(vm1, "A");
        final DistributedMember zoneBMember = setRedundancyZone(vm2, "B");
        SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

            public void run() {
                Cache cache = getCache();
                AttributesFactory attr = new AttributesFactory();
                PartitionAttributesFactory paf = new PartitionAttributesFactory();
                paf.setRedundantCopies(1);
                paf.setRecoveryDelay(-1);
                paf.setStartupRecoveryDelay(-1);
                PartitionAttributes prAttr = paf.create();
                attr.setPartitionAttributes(prAttr);
                cache.createRegion("region1", attr.create());
            }
        };
        // Create the region in only 1 VM
        vm0.invoke(createPrRegion);
        // Create some buckets
        vm0.invoke(new SerializableRunnable("createSomeBuckets") {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion("region1");
                region.put(Integer.valueOf(1), "A");
                region.put(Integer.valueOf(2), "A");
                region.put(Integer.valueOf(3), "A");
                region.put(Integer.valueOf(4), "A");
                region.put(Integer.valueOf(5), "A");
                region.put(Integer.valueOf(6), "A");
            }
        });
        SerializableRunnable checkLowRedundancy = new SerializableRunnable("checkLowRedundancy") {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion("region1");
                PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                assertEquals(6, details.getCreatedBucketCount());
                assertEquals(0, details.getActualRedundantCopies());
                assertEquals(6, details.getLowRedundancyBucketCount());
            }
        };
        // make sure we can tell that the buckets have low redundancy
        vm0.invoke(checkLowRedundancy);
        // Create the region in the other VMs (should have no effect)
        vm1.invoke(createPrRegion);
        vm2.invoke(createPrRegion);
        // Make sure we still have low redundancy
        vm0.invoke(checkLowRedundancy);
        // Now simulate a rebalance
        vm0.invoke(new SerializableRunnable("simulateRebalance") {

            public void run() {
                Cache cache = getCache();
                ResourceManager manager = cache.getResourceManager();
                RebalanceResults results = doRebalance(simulate, manager);
                // We expect to satisfy redundancy with the zone B member
                assertEquals(6, results.getTotalBucketCreatesCompleted());
                // 2 primaries will go to vm2, leaving vm0 and vm1 with 2 primaries each
                assertEquals(2, results.getTotalPrimaryTransfersCompleted());
                // We actually *will* transfer 3 buckets to the other member in zone A, because that
                // improves
                // the balance
                assertEquals(3, results.getTotalBucketTransfersCompleted());
                Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
                assertEquals(1, detailSet.size());
                PartitionRebalanceInfo details = detailSet.iterator().next();
                assertEquals(6, details.getBucketCreatesCompleted());
                assertEquals(2, details.getPrimaryTransfersCompleted());
                assertEquals(3, details.getBucketTransfersCompleted());
                Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
                for (PartitionMemberInfo info : afterDetails) {
                    if (info.getDistributedMember().equals(zoneBMember)) {
                        assertEquals(6, info.getBucketCount());
                    } else {
                        assertEquals(3, info.getBucketCount());
                    }
                    assertEquals(2, info.getPrimaryCount());
                }
                // assertIndexDetailsEquals(0, details.getBucketTransferBytes());
                if (!simulate) {
                    verifyStats(manager, results);
                }
            }
        });
        if (!simulate) {
            checkBucketCount(vm0, "region1", 3);
            checkBucketCount(vm1, "region1", 3);
            checkBucketCount(vm2, "region1", 6);
        }
    } finally {
        disconnectFromDS();
        Invoke.invokeInEveryVM(new SerializableRunnable() {

            public void run() {
                // clear the redundancy zone setting
                disconnectFromDS();
            }
        });
    }
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Example 44 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class RebalanceOperationDUnitTest method testMembershipChange.

/**
   * Test that the rebalancing operation picks up on a concurrent membership change
   */
@Test
public void testMembershipChange() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    final SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(0);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    // Create the region in only 1 VM
    vm0.invoke(createPrRegion);
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            region.put(Integer.valueOf(1), "A");
            region.put(Integer.valueOf(2), "A");
            region.put(Integer.valueOf(3), "A");
            region.put(Integer.valueOf(4), "A");
            region.put(Integer.valueOf(5), "A");
            region.put(Integer.valueOf(6), "A");
        }
    });
    // Create the region in the other VM (should have no effect)
    vm1.invoke(createPrRegion);
    // Now do a rebalance, but start another member in the middle
    vm0.invoke(new SerializableCallable("D rebalance") {

        public Object call() throws Exception {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            InternalResourceManager manager = cache.getInternalResourceManager();
            final CountDownLatch rebalancingStarted = new CountDownLatch(1);
            final CountDownLatch memberAdded = new CountDownLatch(1);
            InternalResourceManager.setResourceObserver(new ResourceObserverAdapter() {

                boolean firstBucket = true;

                @Override
                public void movingBucket(Region region, int bucketId, DistributedMember source, DistributedMember target) {
                    if (firstBucket) {
                        firstBucket = false;
                        vm2.invoke(createPrRegion);
                    }
                }
            });
            RebalanceResults results = doRebalance(false, manager);
            assertEquals(0, results.getTotalBucketCreatesCompleted());
            assertEquals(0, results.getTotalPrimaryTransfersCompleted());
            assertEquals(4, results.getTotalBucketTransfersCompleted());
            assertTrue(0 < results.getTotalBucketTransferBytes());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(0, details.getBucketCreatesCompleted());
            assertEquals(0, details.getPrimaryTransfersCompleted());
            assertTrue(0 < details.getBucketTransferBytes());
            assertEquals(4, details.getBucketTransfersCompleted());
            Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsBefore();
            // there should have only been 2 members when the rebalancing started.
            assertEquals(2, beforeDetails.size());
            // if it was done, there should now be 3 members.
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(3, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(2, memberDetails.getBucketCount());
                assertEquals(2, memberDetails.getPrimaryCount());
            }
            verifyStats(manager, results);
            InternalResourceManager mgr = (InternalResourceManager) manager;
            ResourceManagerStats stats = mgr.getStats();
            assertEquals(1, stats.getRebalanceMembershipChanges());
            return null;
        }
    });
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) CountDownLatch(java.util.concurrent.CountDownLatch) ResourceObserverAdapter(org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) CacheWriterException(org.apache.geode.cache.CacheWriterException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 45 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class RebalanceOperationDUnitTest method runTestWaitForOperation.

/**
   * Test to ensure that we wait for in progress write operations before moving a primary.
   * 
   * @throws Exception
   */
public void runTestWaitForOperation(final Operation op) throws Exception {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            AttributesFactory attr = new AttributesFactory();
            attr.setCacheLoader(new CacheLoader() {

                public Object load(LoaderHelper helper) throws CacheLoaderException {
                    return "anobject";
                }

                public void close() {
                }
            });
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            paf.setLocalMaxMemory(100);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    // Create a region in this VM with a cache writer
    // and cache loader
    Cache cache = getCache();
    AttributesFactory attr = new AttributesFactory();
    attr.setCacheLoader(new CacheLoader() {

        public Object load(LoaderHelper helper) throws CacheLoaderException {
            return "anobject";
        }

        public void close() {
        }
    });
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(1);
    paf.setRecoveryDelay(-1);
    paf.setStartupRecoveryDelay(-1);
    paf.setLocalMaxMemory(100);
    PartitionAttributes prAttr = paf.create();
    attr.setPartitionAttributes(prAttr);
    final Region region = cache.createRegion("region1", attr.create());
    // create some buckets
    region.put(Integer.valueOf(1), "A");
    region.put(Integer.valueOf(2), "A");
    BlockingCacheListener cacheWriter = new BlockingCacheListener(2);
    region.getAttributesMutator().addCacheListener(cacheWriter);
    // start two threads doing operations, one on each bucket
    // the threads will block on the cache writer. The rebalance operation
    // will try to move one of these buckets, but it shouldn't
    // be able to because of the in progress operation.
    Thread thread1 = new Thread() {

        public void run() {
            op.execute(region, Integer.valueOf(1));
        }
    };
    thread1.start();
    Thread thread2 = new Thread() {

        public void run() {
            op.execute(region, Integer.valueOf(2));
        }
    };
    thread2.start();
    cacheWriter.waitForOperationsStarted();
    SerializableRunnable checkLowRedundancy = new SerializableRunnable("checkLowRedundancy") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(2, details.getCreatedBucketCount());
            assertEquals(0, details.getActualRedundantCopies());
            assertEquals(2, details.getLowRedundancyBucketCount());
        }
    };
    // make sure we can tell that the buckets have low redundancy
    checkLowRedundancy.run();
    // Create the region in the other VM (should have no effect)
    vm1.invoke(createPrRegion);
    // Make sure we still have low redundancy
    checkLowRedundancy.run();
    ResourceManager manager = cache.getResourceManager();
    RebalanceOperation rebalance = manager.createRebalanceFactory().start();
    try {
        rebalance.getResults(5, TimeUnit.SECONDS);
        fail("Operation should not have completed");
    } catch (TimeoutException expected) {
    // do nothing
    }
    cacheWriter.release();
    LogWriterUtils.getLogWriter().info("starting wait for rebalance.  Will wait for " + MAX_WAIT + " seconds");
    RebalanceResults results = rebalance.getResults(MAX_WAIT, TimeUnit.SECONDS);
    assertEquals(2, results.getTotalBucketCreatesCompleted());
    assertEquals(1, results.getTotalPrimaryTransfersCompleted());
    assertEquals(0, results.getTotalBucketTransferBytes());
    assertEquals(0, results.getTotalBucketTransfersCompleted());
    Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
    assertEquals(1, detailSet.size());
    PartitionRebalanceInfo details = detailSet.iterator().next();
    assertEquals(2, details.getBucketCreatesCompleted());
    assertEquals(1, details.getPrimaryTransfersCompleted());
    assertEquals(0, details.getBucketTransferBytes());
    assertEquals(0, details.getBucketTransfersCompleted());
    Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
    assertEquals(2, afterDetails.size());
    for (PartitionMemberInfo memberDetails : afterDetails) {
        assertEquals(2, memberDetails.getBucketCount());
        assertEquals(1, memberDetails.getPrimaryCount());
        SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {

            public void run() {
                Cache cache = getCache();
                Region region = cache.getRegion("region1");
                PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
                assertEquals(2, details.getCreatedBucketCount());
                assertEquals(1, details.getActualRedundantCopies());
                assertEquals(0, details.getLowRedundancyBucketCount());
            }
        };
        checkRedundancyFixed.run();
        vm1.invoke(checkRedundancyFixed);
    }
}
Also used : RebalanceOperation(org.apache.geode.cache.control.RebalanceOperation) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) LoaderHelper(org.apache.geode.cache.LoaderHelper) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) VM(org.apache.geode.test.dunit.VM) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) CacheLoader(org.apache.geode.cache.CacheLoader) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

PartitionAttributes (org.apache.geode.cache.PartitionAttributes)129 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)117 AttributesFactory (org.apache.geode.cache.AttributesFactory)107 Region (org.apache.geode.cache.Region)82 Test (org.junit.Test)67 Cache (org.apache.geode.cache.Cache)66 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)61 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)49 Host (org.apache.geode.test.dunit.Host)48 VM (org.apache.geode.test.dunit.VM)48 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)47 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)38 RegionAttributes (org.apache.geode.cache.RegionAttributes)28 CacheException (org.apache.geode.cache.CacheException)26 LocalRegion (org.apache.geode.internal.cache.LocalRegion)26 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)21 BucketRegion (org.apache.geode.internal.cache.BucketRegion)19 FixedPartitionAttributes (org.apache.geode.cache.FixedPartitionAttributes)18 RebalanceResults (org.apache.geode.cache.control.RebalanceResults)16 HashSet (java.util.HashSet)15