use of org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMoveBucketsWithSizeLimits.
/**
* Test to make sure that moving buckets honors size restrictions for VMs.
*/
@Test
public void testMoveBucketsWithSizeLimits() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 0, 6, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
// Create some imbalanced nodes
PartitionMemberInfoImpl details1 = buildDetails(member1, 50, 50, new long[] { 30, 30, 30, 0, 0, 0 }, new long[] { 1, 1, 1, 0, 0, 0 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 50, 50, new long[] { 0, 0, 0, 10, 10, 10 }, new long[] { 0, 0, 0, 1, 1, 1 });
// this member has a lower size that can't fit buckets of size 30
PartitionMemberInfoImpl details3 = buildDetails(member3, 50, 20, new long[] { 0, 0, 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3), new FakeOfflineDetails(), true);
assertEquals(3, doMoves(new CompositeDirector(false, false, true, true), model));
assertEquals(Collections.emptyList(), bucketOperator.creates);
// One bucket should move from each member to member4
Set<Move> expectedMoves = new HashSet<Move>();
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member2, member3));
expectedMoves.add(new Move(member2, member3));
assertEquals(expectedMoves, new HashSet<Move>(bucketOperator.bucketMoves));
Set<PartitionMemberInfo> detailSet = model.getPartitionedMemberDetails("a");
for (PartitionMemberInfo member : detailSet) {
assertEquals(2, member.getPrimaryCount());
assertEquals(2, member.getBucketCount());
}
}
use of org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMoveBucketsWithFailures.
/**
* Test that moving buckets will work if there are failures while moving buckets member2 refuses
* the buckets, so the buckets should move to member3
*/
@Test
public void testMoveBucketsWithFailures() throws Exception {
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
final InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
MyBucketOperator op = new MyBucketOperator() {
@Override
public boolean moveBucket(InternalDistributedMember source, InternalDistributedMember target, int id, Map<String, Long> colocatedRegionBytes) {
if (target.equals(member2)) {
return false;
}
return super.moveBucket(source, target, id, colocatedRegionBytes);
}
};
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(op, 0, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
// Create some imbalanced nodes
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
PartitionMemberInfoImpl details3 = buildDetails(member3, 500, 500, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3), new FakeOfflineDetails(), true);
assertEquals(8, doMoves(new CompositeDirector(false, false, true, true), model));
assertEquals(Collections.emptyList(), op.creates);
assertEquals(Collections.emptyList(), op.primaryMoves);
// Two of the buckets should move to member2
List<Move> expectedMoves = new ArrayList<Move>();
expectedMoves.add(new Move(member1, member3));
expectedMoves.add(new Move(member1, member3));
assertEquals(expectedMoves, op.bucketMoves);
}
use of org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMovePrimaries.
/**
* Very basic test of moving primaries. Creates two nodes and four buckets, with a copy of each
* bucket on both nodes. All of the primaries are on one node. It expects half the primaries to
* move to the other node.
*/
@Test
public void testMovePrimaries() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 2, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
// Create some imbalanced primaries
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2), new FakeOfflineDetails(), true);
assertEquals(2, doMoves(new CompositeDirector(false, false, false, true), model));
assertEquals(Collections.emptyList(), bucketOperator.creates);
// Two of the primaries should move to member2
List<Move> expectedMoves = new ArrayList<Move>();
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member1, member2));
assertEquals(expectedMoves, bucketOperator.primaryMoves);
}
use of org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMoveBucketsWithRedundancy.
/**
* Test to move buckets with redundancy. Makes sure that buckets and primaries are balanced
*/
@Test
public void testMoveBucketsWithRedundancy() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 2, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
InternalDistributedMember member4 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 4);
// Create some imbalanced nodes
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 0, 0 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 0, 0, 1, 0 });
PartitionMemberInfoImpl details3 = buildDetails(member3, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 0, 0, 0, 1 });
PartitionMemberInfoImpl details4 = buildDetails(member4, 500, 500, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3, details4), new FakeOfflineDetails(), true);
doMoves(new CompositeDirector(false, false, true, true), model);
assertEquals(Collections.emptyList(), bucketOperator.creates);
// One bucket should move from each member to member4
Set<Move> expectedMoves = new HashSet<Move>();
expectedMoves.add(new Move(member1, member4));
expectedMoves.add(new Move(member2, member4));
expectedMoves.add(new Move(member3, member4));
assertEquals(expectedMoves, new HashSet<Move>(bucketOperator.bucketMoves));
// We don't know how many primaries will move, because
// the move buckets algorithm could move the primary or
// it could move a redundant copy. But after we're done, we should
// only have one primary per member.
Set<PartitionMemberInfo> detailSet = model.getPartitionedMemberDetails("a");
for (PartitionMemberInfo member : detailSet) {
assertEquals(1, member.getPrimaryCount());
assertEquals(3, member.getBucketCount());
}
}
use of org.apache.geode.internal.cache.partitioned.rebalance.PartitionedRegionLoadModel in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testColocationIgnoreEnforceLocalMaxMemory.
/**
* Test that each region indivually honors it's enforce local max memory flag.
*/
@Test
public void testColocationIgnoreEnforceLocalMaxMemory() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 1, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
// Create some buckets with low redundancy on member 1
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2), new FakeOfflineDetails(), true);
// Member 2 has a lmm of 2, so it should only accept 2 buckets
PartitionMemberInfoImpl bDetails1 = buildDetails(member1, 2, 2, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1 });
PartitionMemberInfoImpl bDetails2 = buildDetails(member2, 2, 2, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
model.addRegion("b", Arrays.asList(bDetails1, bDetails2), new FakeOfflineDetails(), false);
assertEquals(6, doMoves(new CompositeDirector(true, true, false, true), model));
// Everything should be created on member2
Set<Create> expectedCreates = new HashSet<Create>();
expectedCreates.add(new Create(member2, 0));
expectedCreates.add(new Create(member2, 1));
expectedCreates.add(new Create(member2, 2));
expectedCreates.add(new Create(member2, 3));
assertEquals(expectedCreates, new HashSet(bucketOperator.creates));
Set<Move> expectedMoves = new HashSet<Move>();
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member1, member2));
assertEquals(expectedMoves, new HashSet(bucketOperator.primaryMoves));
}
Aggregations