Search in sources :

Example 16 with CompositeDirector

use of org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector in project geode by apache.

the class RebalanceOperationImpl method scheduleRebalance.

private void scheduleRebalance() {
    ResourceManagerStats stats = cache.getInternalResourceManager().getStats();
    long start = stats.startRebalance();
    try {
        for (PartitionedRegion region : cache.getPartitionedRegions()) {
            if (cancelled.get()) {
                break;
            }
            try {
                // Colocated regions will be rebalanced as part of rebalancing their leader
                if (region.getColocatedWith() == null && filter.include(region)) {
                    if (region.isFixedPartitionedRegion()) {
                        if (Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "DISABLE_MOVE_PRIMARIES_ON_STARTUP")) {
                            PartitionedRegionRebalanceOp prOp = new PartitionedRegionRebalanceOp(region, simulation, new CompositeDirector(false, false, false, true), true, true, cancelled, stats);
                            this.futureList.add(submitRebalanceTask(prOp, start));
                        } else {
                            continue;
                        }
                    } else {
                        PartitionedRegionRebalanceOp prOp = new PartitionedRegionRebalanceOp(region, simulation, new CompositeDirector(true, true, true, true), true, true, cancelled, stats);
                        this.futureList.add(submitRebalanceTask(prOp, start));
                    }
                }
            } catch (RegionDestroyedException ignore) {
            // ignore, go on to the next region
            }
        }
    } finally {
        if (pendingTasks == 0) {
            // if we didn't submit any tasks, end the rebalance now.
            stats.endRebalance(start);
        }
    }
}
Also used : PartitionedRegionRebalanceOp(org.apache.geode.internal.cache.partitioned.PartitionedRegionRebalanceOp) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector)

Example 17 with CompositeDirector

use of org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector in project geode by apache.

the class PRHARedundancyProvider method scheduleRedundancyRecovery.

/**
   * Schedule a task to perform redundancy recovery for a new node or for the node departed.
   */
public void scheduleRedundancyRecovery(Object failedMemId) {
    final boolean isStartup = failedMemId == null ? true : false;
    final InternalCache cache = this.prRegion.getCache();
    final int redundantCopies = PRHARedundancyProvider.this.prRegion.getRedundantCopies();
    final long delay;
    final boolean movePrimaries;
    if (isStartup) {
        delay = this.prRegion.getPartitionAttributes().getStartupRecoveryDelay();
        movePrimaries = !Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "DISABLE_MOVE_PRIMARIES_ON_STARTUP");
    } else {
        delay = this.prRegion.getPartitionAttributes().getRecoveryDelay();
        movePrimaries = false;
    }
    final boolean requiresRedundancyRecovery = delay >= 0;
    if (!requiresRedundancyRecovery) {
        return;
    }
    if (!PRHARedundancyProvider.this.prRegion.isDataStore()) {
        return;
    }
    Runnable task = new RecoveryRunnable(this) {

        @Override
        public void run2() {
            try {
                final boolean isFixedPartitionedRegion = PRHARedundancyProvider.this.prRegion.isFixedPartitionedRegion();
                // Fix for 43582 - always replace offline data for fixed partitioned
                // regions - this guarantees we create the buckets we are supposed to
                // create on this node.
                boolean replaceOfflineData = isFixedPartitionedRegion || !isStartup;
                RebalanceDirector director;
                if (isFixedPartitionedRegion) {
                    director = new FPRDirector(true, movePrimaries);
                } else {
                    director = new CompositeDirector(true, true, false, movePrimaries);
                }
                final PartitionedRegionRebalanceOp rebalance = new PartitionedRegionRebalanceOp(PRHARedundancyProvider.this.prRegion, false, director, replaceOfflineData, false);
                long start = PRHARedundancyProvider.this.prRegion.getPrStats().startRecovery();
                if (isFixedPartitionedRegion) {
                    rebalance.executeFPA();
                } else {
                    rebalance.execute();
                }
                PRHARedundancyProvider.this.prRegion.getPrStats().endRecovery(start);
                PRHARedundancyProvider.this.recoveryFuture = null;
            } catch (CancelException e) {
                logger.debug("Cache closed while recovery in progress");
            } catch (RegionDestroyedException e) {
                logger.debug("Region destroyed while recovery in progress");
            } catch (Exception e) {
                logger.error(LocalizedMessage.create(LocalizedStrings.PRHARedundancyProvider_UNEXPECTED_EXCEPTION_DURING_BUCKET_RECOVERY), e);
            }
        }
    };
    synchronized (this.shutdownLock) {
        // possible fix for bug 41094
        if (!this.shutdown) {
            try {
                if (logger.isDebugEnabled()) {
                    if (isStartup) {
                        logger.debug(this.prRegion + " scheduling redundancy recovery in {} ms", delay);
                    } else {
                        logger.debug("prRegion scheduling redundancy recovery after departure/crash/error in {} in {} ms", failedMemId, delay);
                    }
                }
                recoveryFuture = this.recoveryExecutor.schedule(task, delay, TimeUnit.MILLISECONDS);
            } catch (RejectedExecutionException e) {
            // ok, the executor is shutting down.
            }
        }
    }
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CacheClosedException(org.apache.geode.cache.CacheClosedException) CancelException(org.apache.geode.CancelException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RebalanceDirector(org.apache.geode.internal.cache.partitioned.rebalance.RebalanceDirector) FPRDirector(org.apache.geode.internal.cache.partitioned.rebalance.FPRDirector) CancelException(org.apache.geode.CancelException)

Example 18 with CompositeDirector

use of org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector in project geode by apache.

the class PartitionedRegionLoadModelJUnitTest method testColocationIgnoreEnforceLocalMaxMemory.

/**
   * Test that each region indivually honors it's enforce local max memory flag.
   */
@Test
public void testColocationIgnoreEnforceLocalMaxMemory() throws Exception {
    PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 1, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
    InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
    InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
    InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
    // Create some buckets with low redundancy on member 1
    PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1 });
    PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
    model.addRegion("a", Arrays.asList(details1, details2), new FakeOfflineDetails(), true);
    // Member 2 has a lmm of 2, so it should only accept 2 buckets
    PartitionMemberInfoImpl bDetails1 = buildDetails(member1, 2, 2, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1 });
    PartitionMemberInfoImpl bDetails2 = buildDetails(member2, 2, 2, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
    model.addRegion("b", Arrays.asList(bDetails1, bDetails2), new FakeOfflineDetails(), false);
    assertEquals(6, doMoves(new CompositeDirector(true, true, false, true), model));
    // Everything should be created on member2
    Set<Create> expectedCreates = new HashSet<Create>();
    expectedCreates.add(new Create(member2, 0));
    expectedCreates.add(new Create(member2, 1));
    expectedCreates.add(new Create(member2, 2));
    expectedCreates.add(new Create(member2, 3));
    assertEquals(expectedCreates, new HashSet(bucketOperator.creates));
    Set<Move> expectedMoves = new HashSet<Move>();
    expectedMoves.add(new Move(member1, member2));
    expectedMoves.add(new Move(member1, member2));
    assertEquals(expectedMoves, new HashSet(bucketOperator.primaryMoves));
}
Also used : PartitionedRegionLoadModel(org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector) HashSet(java.util.HashSet) UnitTest(org.apache.geode.test.junit.categories.UnitTest) Test(org.junit.Test)

Example 19 with CompositeDirector

use of org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector in project geode by apache.

the class PartitionedRegionLoadModelJUnitTest method testMoveBucketsWithWeights.

/**
   * Test to make sure that we honor the weight of a node while moving buckets.
   */
@Test
public void testMoveBucketsWithWeights() throws Exception {
    PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 0, 6, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
    InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
    InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
    // Create some imbalanced nodes
    PartitionMemberInfoImpl details1 = buildDetails(member1, 250, 250, new long[] { 1, 1, 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1, 1, 1 });
    PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 0, 0, 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0, 0, 0 });
    model.addRegion("a", Arrays.asList(details1, details2), new FakeOfflineDetails(), true);
    assertEquals(4, doMoves(new CompositeDirector(false, false, true, true), model));
    assertEquals(Collections.emptyList(), bucketOperator.creates);
    assertEquals(Collections.emptyList(), bucketOperator.primaryMoves);
    // Four of the buckets should move to member2, because
    // member2 has twice the weight as member1.
    List<Move> expectedMoves = new ArrayList<Move>();
    expectedMoves.add(new Move(member1, member2));
    expectedMoves.add(new Move(member1, member2));
    expectedMoves.add(new Move(member1, member2));
    expectedMoves.add(new Move(member1, member2));
    assertEquals(expectedMoves, bucketOperator.bucketMoves);
}
Also used : PartitionedRegionLoadModel(org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) ArrayList(java.util.ArrayList) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector) UnitTest(org.apache.geode.test.junit.categories.UnitTest) Test(org.junit.Test)

Example 20 with CompositeDirector

use of org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector in project geode by apache.

the class PartitionedRegionLoadModelJUnitTest method testBug39953.

/**
   * Test a case where we seem to get into an infinite loop while balancing primaries.
   */
@Test
public void testBug39953() throws Exception {
    PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 2, 113, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
    InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
    InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
    InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
    InternalDistributedMember member4 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 4);
    // Create some imbalanced primaries
    PartitionMemberInfoImpl details1 = buildDetails(member1, 216, 216, new long[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new long[] { 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0 });
    PartitionMemberInfoImpl details2 = buildDetails(member2, 216, 216, new long[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new long[] { 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 });
    PartitionMemberInfoImpl details3 = buildDetails(member3, 216, 216, new long[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new long[] { 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1 });
    PartitionMemberInfoImpl details4 = buildDetails(member4, 216, 216, new long[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0 });
    model.addRegion("a", Arrays.asList(details1, details2, details3, details4), new FakeOfflineDetails(), true);
    assertEquals(0, doMoves(new CompositeDirector(false, false, false, true), model));
    assertEquals(Collections.emptyList(), bucketOperator.creates);
    assertEquals(Collections.emptyList(), bucketOperator.primaryMoves);
}
Also used : PartitionedRegionLoadModel(org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector) UnitTest(org.apache.geode.test.junit.categories.UnitTest) Test(org.junit.Test)

Aggregations

CompositeDirector (org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector)35 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)33 PartitionedRegionLoadModel (org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel)33 UnitTest (org.apache.geode.test.junit.categories.UnitTest)32 Test (org.junit.Test)32 ArrayList (java.util.ArrayList)19 HashSet (java.util.HashSet)9 PartitionMemberInfo (org.apache.geode.cache.partition.PartitionMemberInfo)7 Map (java.util.Map)2 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)2 Completion (org.apache.geode.internal.cache.partitioned.rebalance.BucketOperator.Completion)2 PersistentMemberID (org.apache.geode.internal.cache.persistence.PersistentMemberID)2 Ignore (org.junit.Ignore)2 IOException (java.io.IOException)1 UnknownHostException (java.net.UnknownHostException)1 Random (java.util.Random)1 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)1 CancelException (org.apache.geode.CancelException)1 CacheClosedException (org.apache.geode.cache.CacheClosedException)1 PartitionedRegionStorageException (org.apache.geode.cache.PartitionedRegionStorageException)1