use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testRedundancySatisfactionWithAsyncFailures.
/**
* Test that redundancy satisfation can handle asynchronous failures and complete the job
* correctly.
*/
@Test
public void testRedundancySatisfactionWithAsyncFailures() throws Exception {
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
BucketOperatorWithFailures operator = new BucketOperatorWithFailures();
operator.addBadMember(member2);
bucketOperator = operator;
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 1, 6, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1, 1, 1 }, new long[] { 1, 1, 1, 1, 1, 1 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 0, 0, 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0, 0, 0 });
PartitionMemberInfoImpl details3 = buildDetails(member3, 500, 500, new long[] { 0, 0, 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3), new FakeOfflineDetails(), true);
Set<PartitionMemberInfo> details = model.getPartitionedMemberDetails("a");
assertEquals(3, details.size());
// TODO - make some assertions about what's in the details
// we expect 6 moves (3 of these will fail)
assertEquals(6, doMoves(new CompositeDirector(true, true, false, false), model));
assertEquals(3, bucketOperator.creates.size());
for (Completion completion : operator.pendingSuccesses) {
completion.onSuccess();
}
for (Completion completion : operator.pendingFailures) {
completion.onFailure();
}
// Now the last two moves will get reattempted to a new location (because the last location
// failed)
assertEquals(3, doMoves(new CompositeDirector(true, true, false, false), model));
List<Create> expectedCreates = new ArrayList<Create>();
expectedCreates.add(new Create(member3, 1));
expectedCreates.add(new Create(member3, 3));
expectedCreates.add(new Create(member3, 5));
expectedCreates.add(new Create(member3, 0));
expectedCreates.add(new Create(member3, 2));
expectedCreates.add(new Create(member3, 4));
assertEquals(expectedCreates, bucketOperator.creates);
}
use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMoveLargeBucketsWithRedundancy.
/**
* Test to move buckets with some large buckets (to make sure there are no issues with buffer
* overflow); Makes sure that buckets and primaries are balanced
*/
@Test
public void testMoveLargeBucketsWithRedundancy() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 2, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
InternalDistributedMember member4 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 4);
// Create some imbalanced nodes
long bigBucket = Integer.MAX_VALUE * 5L + 10L;
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, Long.MAX_VALUE, new long[] { bigBucket, bigBucket, bigBucket, bigBucket }, new long[] { 1, 1, 0, 0 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, Long.MAX_VALUE, new long[] { bigBucket, bigBucket, bigBucket, bigBucket }, new long[] { 0, 0, 1, 0 });
PartitionMemberInfoImpl details3 = buildDetails(member3, 500, Long.MAX_VALUE, new long[] { bigBucket, bigBucket, bigBucket, bigBucket }, new long[] { 0, 0, 0, 1 });
PartitionMemberInfoImpl details4 = buildDetails(member4, 500, Long.MAX_VALUE, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3, details4), new FakeOfflineDetails(), true);
doMoves(new CompositeDirector(false, false, true, true), model);
assertEquals(Collections.emptyList(), bucketOperator.creates);
// One bucket should move from each member to member4
Set<Move> expectedMoves = new HashSet<Move>();
expectedMoves.add(new Move(member1, member4));
expectedMoves.add(new Move(member2, member4));
expectedMoves.add(new Move(member3, member4));
assertEquals(expectedMoves, new HashSet<Move>(bucketOperator.bucketMoves));
// We don't know how many primaries will move, because
// the move buckets algorithm could move the primary or
// it could move a redundant copy. But after we're done, we should
// only have one primary per member.
Set<PartitionMemberInfo> detailSet = model.getPartitionedMemberDetails("a");
for (PartitionMemberInfo member : detailSet) {
assertEquals(1, member.getPrimaryCount());
assertEquals(3, member.getBucketCount());
}
}
use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMoveBucketsWithSizeLimits.
/**
* Test to make sure that moving buckets honors size restrictions for VMs.
*/
@Test
public void testMoveBucketsWithSizeLimits() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 0, 6, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
// Create some imbalanced nodes
PartitionMemberInfoImpl details1 = buildDetails(member1, 50, 50, new long[] { 30, 30, 30, 0, 0, 0 }, new long[] { 1, 1, 1, 0, 0, 0 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 50, 50, new long[] { 0, 0, 0, 10, 10, 10 }, new long[] { 0, 0, 0, 1, 1, 1 });
// this member has a lower size that can't fit buckets of size 30
PartitionMemberInfoImpl details3 = buildDetails(member3, 50, 20, new long[] { 0, 0, 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3), new FakeOfflineDetails(), true);
assertEquals(3, doMoves(new CompositeDirector(false, false, true, true), model));
assertEquals(Collections.emptyList(), bucketOperator.creates);
// One bucket should move from each member to member4
Set<Move> expectedMoves = new HashSet<Move>();
expectedMoves.add(new Move(member1, member2));
expectedMoves.add(new Move(member2, member3));
expectedMoves.add(new Move(member2, member3));
assertEquals(expectedMoves, new HashSet<Move>(bucketOperator.bucketMoves));
Set<PartitionMemberInfo> detailSet = model.getPartitionedMemberDetails("a");
for (PartitionMemberInfo member : detailSet) {
assertEquals(2, member.getPrimaryCount());
assertEquals(2, member.getBucketCount());
}
}
use of org.apache.geode.cache.partition.PartitionMemberInfo in project geode by apache.
the class PartitionedRegionLoadModelJUnitTest method testMoveBucketsWithRedundancy.
/**
* Test to move buckets with redundancy. Makes sure that buckets and primaries are balanced
*/
@Test
public void testMoveBucketsWithRedundancy() throws Exception {
PartitionedRegionLoadModel model = new PartitionedRegionLoadModel(bucketOperator, 2, 4, getAddressComparor(false), Collections.<InternalDistributedMember>emptySet(), null);
InternalDistributedMember member1 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 1);
InternalDistributedMember member2 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 2);
InternalDistributedMember member3 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 3);
InternalDistributedMember member4 = new InternalDistributedMember(InetAddress.getByName("127.0.0.1"), 4);
// Create some imbalanced nodes
PartitionMemberInfoImpl details1 = buildDetails(member1, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 1, 1, 0, 0 });
PartitionMemberInfoImpl details2 = buildDetails(member2, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 0, 0, 1, 0 });
PartitionMemberInfoImpl details3 = buildDetails(member3, 500, 500, new long[] { 1, 1, 1, 1 }, new long[] { 0, 0, 0, 1 });
PartitionMemberInfoImpl details4 = buildDetails(member4, 500, 500, new long[] { 0, 0, 0, 0 }, new long[] { 0, 0, 0, 0 });
model.addRegion("a", Arrays.asList(details1, details2, details3, details4), new FakeOfflineDetails(), true);
doMoves(new CompositeDirector(false, false, true, true), model);
assertEquals(Collections.emptyList(), bucketOperator.creates);
// One bucket should move from each member to member4
Set<Move> expectedMoves = new HashSet<Move>();
expectedMoves.add(new Move(member1, member4));
expectedMoves.add(new Move(member2, member4));
expectedMoves.add(new Move(member3, member4));
assertEquals(expectedMoves, new HashSet<Move>(bucketOperator.bucketMoves));
// We don't know how many primaries will move, because
// the move buckets algorithm could move the primary or
// it could move a redundant copy. But after we're done, we should
// only have one primary per member.
Set<PartitionMemberInfo> detailSet = model.getPartitionedMemberDetails("a");
for (PartitionMemberInfo member : detailSet) {
assertEquals(1, member.getPrimaryCount());
assertEquals(3, member.getBucketCount());
}
}
Aggregations