use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class NoReplicationTokenAllocator method addUnit.
public Collection<Token> addUnit(Unit newUnit, int numTokens) {
assert !tokensInUnits.containsKey(newUnit);
Map<Object, GroupInfo> groups = Maps.newHashMap();
UnitInfo<Unit> newUnitInfo = new UnitInfo<>(newUnit, 0, groups, strategy);
Map<Unit, UnitInfo<Unit>> unitInfos = createUnitInfos(groups);
if (unitInfos.isEmpty())
return generateRandomTokens(newUnitInfo, numTokens, unitInfos);
if (numTokens > sortedTokens.size())
return generateRandomTokens(newUnitInfo, numTokens, unitInfos);
TokenInfo<Unit> head = createTokenInfos(unitInfos);
// Select the nodes we will work with, extract them from sortedUnits and calculate targetAverage
double targetAverage = 0.0;
double sum = 0.0;
List<Weighted<UnitInfo>> unitsToChange = new ArrayList<>();
for (int i = 0; i < numTokens; i++) {
Weighted<UnitInfo> unit = sortedUnits.peek();
if (unit == null)
break;
sum += unit.weight;
// unit and newUnit must be counted
double average = sum / (unitsToChange.size() + 2);
if (unit.weight <= average)
// No point to include later nodes, target can only decrease from here.
break;
sortedUnits.remove();
unitsToChange.add(unit);
targetAverage = average;
}
List<Token> newTokens = Lists.newArrayListWithCapacity(numTokens);
int nr = 0;
// calculate the tokens
for (Weighted<UnitInfo> unit : unitsToChange) {
// TODO: Any better ways to assign how many tokens to change in each node?
int tokensToChange = numTokens / unitsToChange.size() + (nr < numTokens % unitsToChange.size() ? 1 : 0);
Queue<Weighted<TokenInfo>> unitTokens = tokensInUnits.get(unit.value.unit);
List<Weighted<TokenInfo>> tokens = Lists.newArrayListWithCapacity(tokensToChange);
double workWeight = 0;
// Extract biggest vnodes and calculate how much weight we can work with.
for (int i = 0; i < tokensToChange; i++) {
Weighted<TokenInfo> wt = unitTokens.remove();
tokens.add(wt);
workWeight += wt.weight;
unit.value.ownership -= wt.weight;
}
double toTakeOver = unit.weight - targetAverage;
// Split toTakeOver proportionally between the vnodes.
for (Weighted<TokenInfo> wt : tokens) {
double slice;
Token token;
if (toTakeOver < workWeight) {
// Spread decrease.
slice = toTakeOver / workWeight;
if (slice < MIN_TAKEOVER_RATIO)
slice = MIN_TAKEOVER_RATIO;
if (slice > MAX_TAKEOVER_RATIO)
slice = MAX_TAKEOVER_RATIO;
} else {
slice = MAX_TAKEOVER_RATIO;
}
token = partitioner.split(wt.value.prevInRing().token, wt.value.token, slice);
//Token selected, now change all data
sortedTokens.put(token, newUnit);
TokenInfo<Unit> ti = new TokenInfo<>(token, newUnitInfo);
ti.insertAfter(head, wt.value.prevInRing());
populateTokenInfoAndAdjustUnit(ti);
populateTokenInfoAndAdjustUnit(wt.value);
newTokens.add(token);
}
// adjust the weight for current unit
sortedUnits.add(new Weighted<>(unit.value.ownership, unit.value));
++nr;
}
sortedUnits.add(new Weighted<>(newUnitInfo.ownership, newUnitInfo));
return newTokens;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class NoReplicationTokenAllocator method generateRandomTokens.
private Collection<Token> generateRandomTokens(UnitInfo<Unit> newUnit, int numTokens, Map<Unit, UnitInfo<Unit>> unitInfos) {
Set<Token> tokens = new HashSet<>(numTokens);
while (tokens.size() < numTokens) {
Token token = partitioner.getRandomToken();
if (!sortedTokens.containsKey(token)) {
tokens.add(token);
sortedTokens.put(token, newUnit.unit);
}
}
unitInfos.put(newUnit.unit, newUnit);
createTokenInfos(unitInfos);
return tokens;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class ReplicationAwareTokenAllocator method generateRandomTokens.
private Collection<Token> generateRandomTokens(Unit newUnit, int numTokens) {
Set<Token> tokens = new HashSet<>(numTokens);
while (tokens.size() < numTokens) {
Token token = partitioner.getRandomToken();
if (!sortedTokens.containsKey(token)) {
tokens.add(token);
sortedTokens.put(token, newUnit);
unitToTokens.put(newUnit, token);
}
}
return tokens;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class ReplicationAwareTokenAllocator method populateTokenInfo.
/**
* Calculates the {@code replicationStart} of a token, as well as {@code replicationThreshold} which is chosen in a way
* that permits {@code findUpdatedReplicationStart} to quickly identify changes in ownership.
*/
private Token populateTokenInfo(BaseTokenInfo<Unit, ?> token, GroupInfo newUnitGroup) {
GroupInfo tokenGroup = token.owningUnit.group;
PopulateVisitor visitor = new PopulateVisitor();
// Replication start = the end of a token from the RF'th different group seen before the token.
Token replicationStart;
// The end of a token from the RF-1'th different group seen before the token.
Token replicationThreshold = token.token;
GroupInfo currGroup;
for (TokenInfo<Unit> curr = token.prevInRing(); ; curr = curr.prev) {
replicationStart = curr.token;
currGroup = curr.owningUnit.group;
if (!visitor.add(currGroup))
// Group is already seen.
continue;
if (visitor.visitedAll())
break;
replicationThreshold = replicationStart;
// so this is where our replication range begins
if (currGroup == tokenGroup)
break;
}
if (newUnitGroup == tokenGroup)
// new token is always a boundary (as long as it's closer than replicationStart)
replicationThreshold = token.token;
else if (newUnitGroup != currGroup && visitor.seen(newUnitGroup))
// already has new group in replication span before last seen. cannot be affected
replicationThreshold = replicationStart;
visitor.clean();
token.replicationThreshold = replicationThreshold;
token.replicationStart = replicationStart;
return replicationStart;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class ViewBuilder method getCompactionInfo.
public CompactionInfo getCompactionInfo() {
long rangesLeft = 0, rangesTotal = 0;
Token lastToken = prevToken;
// has.
for (Range<Token> range : StorageService.instance.getLocalRanges(baseCfs.keyspace.getName())) {
rangesLeft++;
rangesTotal++;
// end of the method.
if (lastToken == null || range.contains(lastToken))
rangesLeft = 0;
}
return new CompactionInfo(baseCfs.metadata(), OperationType.VIEW_BUILD, rangesLeft, rangesTotal, "ranges", compactionId);
}
Aggregations