use of org.apache.cassandra.locator.IEndpointSnitch in project cassandra by apache.
the class TokenRange method create.
public static TokenRange create(Token.TokenFactory tokenFactory, Range<Token> range, List<InetAddress> endpoints) {
List<EndpointDetails> details = new ArrayList<>(endpoints.size());
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
for (InetAddress ep : endpoints) details.add(new EndpointDetails(ep, StorageService.instance.getRpcaddress(ep), snitch.getDatacenter(ep), snitch.getRack(ep)));
return new TokenRange(tokenFactory, range, details);
}
use of org.apache.cassandra.locator.IEndpointSnitch in project cassandra by apache.
the class CreateTest method testHyphenDatacenters.
@Test
public // tests CASSANDRA-4278
void testHyphenDatacenters() throws Throwable {
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
// Register an EndpointSnitch which returns fixed values for test.
DatabaseDescriptor.setEndpointSnitch(new AbstractEndpointSnitch() {
@Override
public String getRack(InetAddress endpoint) {
return RACK1;
}
@Override
public String getDatacenter(InetAddress endpoint) {
return "us-east-1";
}
@Override
public int compareEndpoints(InetAddress target, InetAddress a1, InetAddress a2) {
return 0;
}
});
execute("CREATE KEYSPACE Foo WITH replication = { 'class' : 'NetworkTopologyStrategy', 'us-east-1' : 1 };");
// Restore the previous EndpointSnitch
DatabaseDescriptor.setEndpointSnitch(snitch);
// clean up
execute("DROP KEYSPACE IF EXISTS Foo");
}
use of org.apache.cassandra.locator.IEndpointSnitch in project eiger by wlloyd.
the class StorageService method move.
/**
* move the node to new token or find a new token to boot to according to load
*
* @param newToken new token to boot to, or if null, find balanced token to boot to
*
* @throws IOException on any I/O operation error
*/
private void move(Token newToken) throws IOException {
if (newToken == null)
throw new IOException("Can't move to the undefined (null) token.");
if (tokenMetadata_.sortedTokens().contains(newToken))
throw new IOException("target token " + newToken + " is already owned by another node.");
// address of the current node
InetAddress localAddress = FBUtilities.getBroadcastAddress();
List<String> tablesToProcess = Schema.instance.getNonSystemTables();
// checking if data is moving to this node
for (String table : tablesToProcess) {
if (tokenMetadata_.getPendingRanges(table, localAddress).size() > 0)
throw new UnsupportedOperationException("data is currently moving to this node; unable to leave the ring");
}
// setting 'moving' application state
Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.moving(newToken));
logger_.info(String.format("Moving %s from %s to %s.", localAddress, getLocalToken(), newToken));
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
Map<String, Multimap<InetAddress, Range<Token>>> rangesToFetch = new HashMap<String, Multimap<InetAddress, Range<Token>>>();
Map<String, Multimap<Range<Token>, InetAddress>> rangesToStreamByTable = new HashMap<String, Multimap<Range<Token>, InetAddress>>();
TokenMetadata tokenMetaClone = tokenMetadata_.cloneAfterAllSettled();
// which current node will handle after move to the new token
for (String table : tablesToProcess) {
// replication strategy of the current keyspace (aka table)
AbstractReplicationStrategy strategy = Table.open(table).getReplicationStrategy();
// getting collection of the currently used ranges by this keyspace
Collection<Range<Token>> currentRanges = getRangesForEndpoint(table, localAddress);
// collection of ranges which this node will serve after move to the new token
Collection<Range<Token>> updatedRanges = strategy.getPendingAddressRanges(tokenMetadata_, newToken, localAddress);
// ring ranges and endpoints associated with them
// this used to determine what nodes should we ping about range data
Multimap<Range<Token>, InetAddress> rangeAddresses = strategy.getRangeAddresses(tokenMetadata_);
// calculated parts of the ranges to request/stream from/to nodes in the ring
Pair<Set<Range<Token>>, Set<Range<Token>>> rangesPerTable = calculateStreamAndFetchRanges(currentRanges, updatedRanges);
/**
* In this loop we are going through all ranges "to fetch" and determining
* nodes in the ring responsible for data we are interested in
*/
Multimap<Range<Token>, InetAddress> rangesToFetchWithPreferredEndpoints = ArrayListMultimap.create();
for (Range<Token> toFetch : rangesPerTable.right) {
for (Range<Token> range : rangeAddresses.keySet()) {
if (range.contains(toFetch)) {
List<InetAddress> endpoints = snitch.getSortedListByProximity(localAddress, rangeAddresses.get(range));
// storing range and preferred endpoint set
rangesToFetchWithPreferredEndpoints.putAll(toFetch, endpoints);
}
}
}
// calculating endpoints to stream current ranges to if needed
// in some situations node will handle current ranges as part of the new ranges
Multimap<Range<Token>, InetAddress> rangeWithEndpoints = HashMultimap.create();
for (Range<Token> toStream : rangesPerTable.left) {
Set<InetAddress> currentEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(toStream.right, tokenMetadata_));
Set<InetAddress> newEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(toStream.right, tokenMetaClone));
rangeWithEndpoints.putAll(toStream, Sets.difference(newEndpoints, currentEndpoints));
}
// associating table with range-to-endpoints map
rangesToStreamByTable.put(table, rangeWithEndpoints);
Multimap<InetAddress, Range<Token>> workMap = BootStrapper.getWorkMap(rangesToFetchWithPreferredEndpoints);
rangesToFetch.put(table, workMap);
if (logger_.isDebugEnabled())
logger_.debug("Table {}: work map {}.", table, workMap);
}
if (!rangesToStreamByTable.isEmpty() || !rangesToFetch.isEmpty()) {
logger_.info("Sleeping {} ms before start streaming/fetching ranges.", RING_DELAY);
try {
Thread.sleep(RING_DELAY);
} catch (InterruptedException e) {
throw new RuntimeException("Sleep interrupted " + e.getMessage());
}
setMode(Mode.MOVING, "fetching new ranges and streaming old ranges", true);
if (logger_.isDebugEnabled())
logger_.debug("[Move->STREAMING] Work Map: " + rangesToStreamByTable);
CountDownLatch streamLatch = streamRanges(rangesToStreamByTable);
if (logger_.isDebugEnabled())
logger_.debug("[Move->FETCHING] Work Map: " + rangesToFetch);
CountDownLatch fetchLatch = requestRanges(rangesToFetch);
try {
streamLatch.await();
fetchLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted latch while waiting for stream/fetch ranges to finish: " + e.getMessage());
}
}
// setting new token as we have everything settled
setToken(newToken);
if (logger_.isDebugEnabled())
logger_.debug("Successfully moved to new token {}", getLocalToken());
}
use of org.apache.cassandra.locator.IEndpointSnitch in project eiger by wlloyd.
the class StorageService method getNewSourceRanges.
/**
* Finds living endpoints responsible for the given ranges
*
* @param table the table ranges belong to
* @param ranges the ranges to find sources for
* @return multimap of addresses to ranges the address is responsible for
*/
private Multimap<InetAddress, Range<Token>> getNewSourceRanges(String table, Set<Range<Token>> ranges) {
InetAddress myAddress = FBUtilities.getBroadcastAddress();
Multimap<Range<Token>, InetAddress> rangeAddresses = Table.open(table).getReplicationStrategy().getRangeAddresses(tokenMetadata_);
Multimap<InetAddress, Range<Token>> sourceRanges = HashMultimap.create();
IFailureDetector failureDetector = FailureDetector.instance;
// find alive sources for our new ranges
for (Range<Token> range : ranges) {
Collection<InetAddress> possibleRanges = rangeAddresses.get(range);
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
List<InetAddress> sources = snitch.getSortedListByProximity(myAddress, possibleRanges);
assert (!sources.contains(myAddress));
for (InetAddress source : sources) {
if (failureDetector.isAlive(source)) {
sourceRanges.put(source, range);
break;
}
}
}
return sourceRanges;
}
use of org.apache.cassandra.locator.IEndpointSnitch in project eiger by wlloyd.
the class StorageService method updateSnitch.
@Override
public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ConfigurationException {
IEndpointSnitch oldSnitch = DatabaseDescriptor.getEndpointSnitch();
// new snitch registers mbean during construction
IEndpointSnitch newSnitch = FBUtilities.construct(epSnitchClassName, "snitch");
if (dynamic) {
DatabaseDescriptor.setDynamicUpdateInterval(dynamicUpdateInterval);
DatabaseDescriptor.setDynamicResetInterval(dynamicResetInterval);
DatabaseDescriptor.setDynamicBadnessThreshold(dynamicBadnessThreshold);
newSnitch = new DynamicEndpointSnitch(newSnitch);
}
// point snitch references to the new instance
DatabaseDescriptor.setEndpointSnitch(newSnitch);
for (String ks : Schema.instance.getTables()) {
Table.open(ks).getReplicationStrategy().snitch = newSnitch;
}
if (oldSnitch instanceof DynamicEndpointSnitch)
((DynamicEndpointSnitch) oldSnitch).unregisterMBean();
}
Aggregations