use of org.apache.cassandra.dht.Splitter in project cassandra by apache.
the class DiskBoundaryManager method getDiskBoundaries.
/**
* Returns a list of disk boundaries, the result will differ depending on whether vnodes are enabled or not.
*
* What is returned are upper bounds for the disks, meaning everything from partitioner.minToken up to
* getDiskBoundaries(..).get(0) should be on the first disk, everything between 0 to 1 should be on the second disk
* etc.
*
* The final entry in the returned list will always be the partitioner maximum tokens upper key bound
*/
private static List<PartitionPosition> getDiskBoundaries(RangesAtEndpoint replicas, IPartitioner partitioner, Directories.DataDirectory[] dataDirectories) {
assert partitioner.splitter().isPresent();
Splitter splitter = partitioner.splitter().get();
boolean dontSplitRanges = DatabaseDescriptor.getNumTokens() > 1;
List<Splitter.WeightedRange> weightedRanges = new ArrayList<>(replicas.size());
// note that Range.sort unwraps any wraparound ranges, so we need to sort them here
for (Range<Token> r : Range.sort(replicas.onlyFull().ranges())) weightedRanges.add(new Splitter.WeightedRange(1.0, r));
for (Range<Token> r : Range.sort(replicas.onlyTransient().ranges())) weightedRanges.add(new Splitter.WeightedRange(0.1, r));
weightedRanges.sort(Comparator.comparing(Splitter.WeightedRange::left));
List<Token> boundaries = splitter.splitOwnedRanges(dataDirectories.length, weightedRanges, dontSplitRanges);
// If we can't split by ranges, split evenly to ensure utilisation of all disks
if (dontSplitRanges && boundaries.size() < dataDirectories.length)
boundaries = splitter.splitOwnedRanges(dataDirectories.length, weightedRanges, false);
List<PartitionPosition> diskBoundaries = new ArrayList<>();
for (int i = 0; i < boundaries.size() - 1; i++) diskBoundaries.add(boundaries.get(i).maxKeyBound());
diskBoundaries.add(partitioner.getMaximumToken().maxKeyBound());
return diskBoundaries;
}
Aggregations