use of org.apache.cassandra.locator.AbstractReplicationStrategy in project eiger by wlloyd.
the class AntiEntropyServiceTestAbstract method testGetNeighborsTimesTwo.
@Test
public void testGetNeighborsTimesTwo() throws Throwable {
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
// generate rf*2 nodes, and ensure that only neighbors specified by the ARS are returned
addTokens(2 * Table.open(tablename).getReplicationStrategy().getReplicationFactor());
AbstractReplicationStrategy ars = Table.open(tablename).getReplicationStrategy();
Set<InetAddress> expected = new HashSet<InetAddress>();
for (Range<Token> replicaRange : ars.getAddressRanges().get(FBUtilities.getBroadcastAddress())) {
expected.addAll(ars.getRangeAddresses(tmd).get(replicaRange));
}
expected.remove(FBUtilities.getBroadcastAddress());
Collection<Range<Token>> ranges = StorageService.instance.getLocalRanges(tablename);
Set<InetAddress> neighbors = new HashSet<InetAddress>();
for (Range<Token> range : ranges) {
neighbors.addAll(AntiEntropyService.getNeighbors(tablename, range));
}
assertEquals(expected, neighbors);
}
use of org.apache.cassandra.locator.AbstractReplicationStrategy in project eiger by wlloyd.
the class LeaveAndBootstrapTest method newTestWriteEndpointsDuringLeave.
/**
* Test whether write endpoints is correct when the node is leaving. Uses
* StorageService.onChange and does not manipulate token metadata directly.
*/
@Test
public void newTestWriteEndpointsDuringLeave() throws Exception {
StorageService ss = StorageService.instance;
final int RING_SIZE = 6;
final int LEAVING_NODE = 3;
TokenMetadata tmd = ss.getTokenMetadata();
tmd.clearUnsafe();
IPartitioner partitioner = new RandomPartitioner();
VersionedValue.VersionedValueFactory valueFactory = new VersionedValue.VersionedValueFactory(partitioner);
IPartitioner oldPartitioner = ss.setPartitionerUnsafe(partitioner);
ArrayList<Token> endpointTokens = new ArrayList<Token>();
ArrayList<Token> keyTokens = new ArrayList<Token>();
List<InetAddress> hosts = new ArrayList<InetAddress>();
Util.createInitialRing(ss, partitioner, endpointTokens, keyTokens, hosts, RING_SIZE);
Map<Token, List<InetAddress>> expectedEndpoints = new HashMap<Token, List<InetAddress>>();
for (String table : Schema.instance.getNonSystemTables()) {
for (Token token : keyTokens) {
List<InetAddress> endpoints = new ArrayList<InetAddress>();
Iterator<Token> tokenIter = TokenMetadata.ringIterator(tmd.sortedTokens(), token, false);
while (tokenIter.hasNext()) {
endpoints.add(tmd.getEndpoint(tokenIter.next()));
}
expectedEndpoints.put(token, endpoints);
}
}
// Third node leaves
ss.onChange(hosts.get(LEAVING_NODE), ApplicationState.STATUS, valueFactory.leaving(endpointTokens.get(LEAVING_NODE)));
assertTrue(tmd.isLeaving(hosts.get(LEAVING_NODE)));
AbstractReplicationStrategy strategy;
for (String table : Schema.instance.getNonSystemTables()) {
strategy = getStrategy(table, tmd);
for (Token token : keyTokens) {
int replicationFactor = strategy.getReplicationFactor();
HashSet<InetAddress> actual = new HashSet<InetAddress>(tmd.getWriteEndpoints(token, table, strategy.calculateNaturalEndpoints(token, tmd)));
HashSet<InetAddress> expected = new HashSet<InetAddress>();
for (int i = 0; i < replicationFactor; i++) {
expected.add(expectedEndpoints.get(token).get(i));
}
// then we should expect it plus one extra for when it's gone
if (expected.contains(hosts.get(LEAVING_NODE)))
expected.add(expectedEndpoints.get(token).get(replicationFactor));
assertEquals("mismatched endpoint sets", expected, actual);
}
}
ss.setPartitionerUnsafe(oldPartitioner);
}
Aggregations