use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class TokenAllocation method allocateTokens.
public static Collection<Token> allocateTokens(final TokenMetadata tokenMetadata, final AbstractReplicationStrategy rs, final InetAddress endpoint, int numTokens) {
TokenMetadata tokenMetadataCopy = tokenMetadata.cloneOnlyTokenMap();
StrategyAdapter strategy = getStrategy(tokenMetadataCopy, rs, endpoint);
Collection<Token> tokens = create(tokenMetadata, strategy).addUnit(endpoint, numTokens);
tokens = adjustForCrossDatacenterClashes(tokenMetadata, strategy, tokens);
if (logger.isWarnEnabled()) {
logger.warn("Selected tokens {}", tokens);
SummaryStatistics os = replicatedOwnershipStats(tokenMetadataCopy, rs, endpoint);
tokenMetadataCopy.updateNormalTokens(tokens, endpoint);
SummaryStatistics ns = replicatedOwnershipStats(tokenMetadataCopy, rs, endpoint);
logger.warn("Replicated node load in datacentre before allocation {}", statToString(os));
logger.warn("Replicated node load in datacentre after allocation {}", statToString(ns));
// TODO: Is it worth doing the replicated ownership calculation always to be able to raise this alarm?
if (ns.getStandardDeviation() > os.getStandardDeviation())
logger.warn("Unexpected growth in standard deviation after allocation.");
}
return tokens;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class TokenAllocation method adjustForCrossDatacenterClashes.
private static Collection<Token> adjustForCrossDatacenterClashes(final TokenMetadata tokenMetadata, StrategyAdapter strategy, Collection<Token> tokens) {
List<Token> filtered = Lists.newArrayListWithCapacity(tokens.size());
for (Token t : tokens) {
while (tokenMetadata.getEndpoint(t) != null) {
InetAddress other = tokenMetadata.getEndpoint(t);
if (strategy.inAllocationRing(other))
throw new ConfigurationException(String.format("Allocated token %s already assigned to node %s. Is another node also allocating tokens?", t, other));
t = t.increaseSlightly();
}
filtered.add(t);
}
return filtered;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class NoReplicationTokenAllocator method createTokenInfos.
/**
* Construct the token ring as a CircularList of TokenInfo,
* and populate the ownership of the UnitInfo's provided
*/
private TokenInfo<Unit> createTokenInfos(Map<Unit, UnitInfo<Unit>> units) {
if (units.isEmpty())
return null;
// build the circular list
TokenInfo<Unit> prev = null;
TokenInfo<Unit> first = null;
for (Map.Entry<Token, Unit> en : sortedTokens.entrySet()) {
Token t = en.getKey();
UnitInfo<Unit> ni = units.get(en.getValue());
TokenInfo<Unit> ti = new TokenInfo<>(t, ni);
first = ti.insertAfter(first, prev);
prev = ti;
}
TokenInfo<Unit> curr = first;
tokensInUnits.clear();
sortedUnits.clear();
do {
populateTokenInfoAndAdjustUnit(curr);
curr = curr.next;
} while (curr != first);
for (UnitInfo<Unit> unitInfo : units.values()) {
sortedUnits.add(new Weighted<UnitInfo>(unitInfo.ownership, unitInfo));
}
return first;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class ReplicationAwareTokenAllocator method createTokenInfos.
/**
* Construct the token ring as a CircularList of TokenInfo,
* and populate the ownership of the UnitInfo's provided
*/
private TokenInfo<Unit> createTokenInfos(Map<Unit, UnitInfo<Unit>> units, GroupInfo newUnitGroup) {
// build the circular list
TokenInfo<Unit> prev = null;
TokenInfo<Unit> first = null;
for (Map.Entry<Token, Unit> en : sortedTokens.entrySet()) {
Token t = en.getKey();
UnitInfo<Unit> ni = units.get(en.getValue());
TokenInfo<Unit> ti = new TokenInfo<>(t, ni);
first = ti.insertAfter(first, prev);
prev = ti;
}
TokenInfo<Unit> curr = first;
do {
populateTokenInfoAndAdjustUnit(curr, newUnitGroup);
curr = curr.next;
} while (curr != first);
return first;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class SSTableLoader method stream.
public StreamResultFuture stream(Set<InetAddress> toIgnore, StreamEventHandler... listeners) {
client.init(keyspace);
outputHandler.output("Established connection to initial hosts");
StreamPlan plan = new StreamPlan("Bulk Load", 0, connectionsPerHost, false, false, false, null).connectionFactory(client.getConnectionFactory());
Map<InetAddress, Collection<Range<Token>>> endpointToRanges = client.getEndpointToRangesMap();
openSSTables(endpointToRanges);
if (sstables.isEmpty()) {
// return empty result
return plan.execute();
}
outputHandler.output(String.format("Streaming relevant part of %sto %s", names(sstables), endpointToRanges.keySet()));
for (Map.Entry<InetAddress, Collection<Range<Token>>> entry : endpointToRanges.entrySet()) {
InetAddress remote = entry.getKey();
if (toIgnore.contains(remote))
continue;
List<StreamSession.SSTableStreamingSections> endpointDetails = new LinkedList<>();
// references are acquired when constructing the SSTableStreamingSections above
for (StreamSession.SSTableStreamingSections details : streamingDetails.get(remote)) {
endpointDetails.add(details);
}
plan.transferFiles(remote, endpointDetails);
}
plan.listeners(this, listeners);
return plan.execute();
}
Aggregations