use of org.apache.cassandra.dht.Bounds in project cassandra by apache.
the class ReplicaPlanIterator method getRestrictedRanges.
/**
* Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
* so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
*/
private static List<AbstractBounds<PartitionPosition>> getRestrictedRanges(final AbstractBounds<PartitionPosition> queryRange) {
// special case for bounds containing exactly 1 (non-minimum) token
if (queryRange instanceof Bounds && queryRange.left.equals(queryRange.right) && !queryRange.left.isMinimum()) {
return Collections.singletonList(queryRange);
}
TokenMetadata tokenMetadata = StorageService.instance.getTokenMetadata();
List<AbstractBounds<PartitionPosition>> ranges = new ArrayList<>();
// divide the queryRange into pieces delimited by the ring and minimum tokens
Iterator<Token> ringIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), queryRange.left.getToken(), true);
AbstractBounds<PartitionPosition> remainder = queryRange;
while (ringIter.hasNext()) {
/*
* remainder is a range/bounds of partition positions and we want to split it with a token. We want to split
* using the key returned by token.maxKeyBound. For instance, if remainder is [DK(10, 'foo'), DK(20, 'bar')],
* and we have 3 nodes with tokens 0, 15, 30, we want to split remainder to A=[DK(10, 'foo'), 15] and
* B=(15, DK(20, 'bar')]. But since we can't mix tokens and keys at the same time in a range, we use
* 15.maxKeyBound() to have A include all keys having 15 as token and B include none of those (since that is
* what our node owns).
*/
Token upperBoundToken = ringIter.next();
PartitionPosition upperBound = upperBoundToken.maxKeyBound();
if (!remainder.left.equals(upperBound) && !remainder.contains(upperBound))
// no more splits
break;
Pair<AbstractBounds<PartitionPosition>, AbstractBounds<PartitionPosition>> splits = remainder.split(upperBound);
if (splits == null)
continue;
ranges.add(splits.left);
remainder = splits.right;
}
ranges.add(remainder);
return ranges;
}
use of org.apache.cassandra.dht.Bounds in project cassandra by apache.
the class CounterCacheTest method testCounterCacheInvalidate.
@Test
public void testCounterCacheInvalidate() {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(COUNTER1);
cfs.truncateBlocking();
CacheService.instance.invalidateCounterCache();
Clustering<?> c1 = CBuilder.create(cfs.metadata().comparator).add(ByteBufferUtil.bytes(1)).build();
Clustering<?> c2 = CBuilder.create(cfs.metadata().comparator).add(ByteBufferUtil.bytes(2)).build();
ColumnMetadata cd = cfs.metadata().getColumn(ByteBufferUtil.bytes("c"));
assertEquals(0, CacheService.instance.counterCache.size());
assertNull(cfs.getCachedCounter(bytes(1), c1, cd, null));
assertNull(cfs.getCachedCounter(bytes(1), c2, cd, null));
assertNull(cfs.getCachedCounter(bytes(2), c1, cd, null));
assertNull(cfs.getCachedCounter(bytes(2), c2, cd, null));
assertNull(cfs.getCachedCounter(bytes(3), c1, cd, null));
assertNull(cfs.getCachedCounter(bytes(3), c2, cd, null));
cfs.putCachedCounter(bytes(1), c1, cd, null, ClockAndCount.create(1L, 1L));
cfs.putCachedCounter(bytes(1), c2, cd, null, ClockAndCount.create(1L, 2L));
cfs.putCachedCounter(bytes(2), c1, cd, null, ClockAndCount.create(2L, 1L));
cfs.putCachedCounter(bytes(2), c2, cd, null, ClockAndCount.create(2L, 2L));
cfs.putCachedCounter(bytes(3), c1, cd, null, ClockAndCount.create(3L, 1L));
cfs.putCachedCounter(bytes(3), c2, cd, null, ClockAndCount.create(3L, 2L));
assertEquals(ClockAndCount.create(1L, 1L), cfs.getCachedCounter(bytes(1), c1, cd, null));
assertEquals(ClockAndCount.create(1L, 2L), cfs.getCachedCounter(bytes(1), c2, cd, null));
assertEquals(ClockAndCount.create(2L, 1L), cfs.getCachedCounter(bytes(2), c1, cd, null));
assertEquals(ClockAndCount.create(2L, 2L), cfs.getCachedCounter(bytes(2), c2, cd, null));
assertEquals(ClockAndCount.create(3L, 1L), cfs.getCachedCounter(bytes(3), c1, cd, null));
assertEquals(ClockAndCount.create(3L, 2L), cfs.getCachedCounter(bytes(3), c2, cd, null));
cfs.invalidateCounterCache(Collections.singleton(new Bounds<Token>(cfs.decorateKey(bytes(1)).getToken(), cfs.decorateKey(bytes(2)).getToken())));
assertEquals(2, CacheService.instance.counterCache.size());
assertNull(cfs.getCachedCounter(bytes(1), c1, cd, null));
assertNull(cfs.getCachedCounter(bytes(1), c2, cd, null));
assertNull(cfs.getCachedCounter(bytes(2), c1, cd, null));
assertNull(cfs.getCachedCounter(bytes(2), c2, cd, null));
assertEquals(ClockAndCount.create(3L, 1L), cfs.getCachedCounter(bytes(3), c1, cd, null));
assertEquals(ClockAndCount.create(3L, 2L), cfs.getCachedCounter(bytes(3), c2, cd, null));
}
use of org.apache.cassandra.dht.Bounds in project cassandra by apache.
the class CompactionManager method findSSTablesToAnticompact.
@VisibleForTesting
static Set<SSTableReader> findSSTablesToAnticompact(Iterator<SSTableReader> sstableIterator, List<Range<Token>> normalizedRanges, UUID parentRepairSession) {
Set<SSTableReader> fullyContainedSSTables = new HashSet<>();
while (sstableIterator.hasNext()) {
SSTableReader sstable = sstableIterator.next();
Bounds<Token> sstableBounds = new Bounds<>(sstable.first.getToken(), sstable.last.getToken());
for (Range<Token> r : normalizedRanges) {
// ranges are normalized - no wrap around - if first and last are contained we know that all tokens are contained in the range
if (r.contains(sstable.first.getToken()) && r.contains(sstable.last.getToken())) {
logger.info("{} SSTable {} fully contained in range {}, mutating repairedAt instead of anticompacting", PreviewKind.NONE.logPrefix(parentRepairSession), sstable, r);
fullyContainedSSTables.add(sstable);
sstableIterator.remove();
break;
} else if (r.intersects(sstableBounds)) {
logger.info("{} SSTable {} ({}) will be anticompacted on range {}", PreviewKind.NONE.logPrefix(parentRepairSession), sstable, sstableBounds, r);
}
}
}
return fullyContainedSSTables;
}
use of org.apache.cassandra.dht.Bounds in project cassandra by apache.
the class CompactionManager method validateSSTableBoundsForAnticompaction.
static void validateSSTableBoundsForAnticompaction(UUID sessionID, Collection<SSTableReader> sstables, RangesAtEndpoint ranges) {
List<Range<Token>> normalizedRanges = Range.normalize(ranges.ranges());
for (SSTableReader sstable : sstables) {
Bounds<Token> bounds = new Bounds<>(sstable.first.getToken(), sstable.last.getToken());
if (!Iterables.any(normalizedRanges, r -> (r.contains(bounds.left) && r.contains(bounds.right)) || r.intersects(bounds))) {
// this should never happen - in PendingAntiCompaction#getSSTables we select all sstables that intersect the repaired ranges, that can't have changed here
String message = String.format("%s SSTable %s (%s) does not intersect repaired ranges %s, this sstable should not have been included.", PreviewKind.NONE.logPrefix(sessionID), sstable, bounds, normalizedRanges);
logger.error(message);
throw new IllegalStateException(message);
}
}
}
use of org.apache.cassandra.dht.Bounds in project cassandra by apache.
the class LeveledManifest method getCandidatesFor.
/**
* @return highest-priority sstables to compact for the given level.
* If no compactions are possible (because of concurrent compactions or because some sstables are excluded
* for prior failure), will return an empty list. Never returns null.
*/
private Collection<SSTableReader> getCandidatesFor(int level) {
assert !generations.get(level).isEmpty();
logger.trace("Choosing candidates for L{}", level);
final Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
if (level == 0) {
Set<SSTableReader> compactingL0 = getCompactingL0();
PartitionPosition lastCompactingKey = null;
PartitionPosition firstCompactingKey = null;
for (SSTableReader candidate : compactingL0) {
if (firstCompactingKey == null || candidate.first.compareTo(firstCompactingKey) < 0)
firstCompactingKey = candidate.first;
if (lastCompactingKey == null || candidate.last.compareTo(lastCompactingKey) > 0)
lastCompactingKey = candidate.last;
}
// L0 is the dumping ground for new sstables which thus may overlap each other.
//
// We treat L0 compactions specially:
// 1a. add sstables to the candidate set until we have at least maxSSTableSizeInMB
// 1b. prefer choosing older sstables as candidates, to newer ones
// 1c. any L0 sstables that overlap a candidate, will also become candidates
// 2. At most max_threshold sstables from L0 will be compacted at once
// 3. If total candidate size is less than maxSSTableSizeInMB, we won't bother compacting with L1,
// and the result of the compaction will stay in L0 instead of being promoted (see promote())
//
// Note that we ignore suspect-ness of L1 sstables here, since if an L1 sstable is suspect we're
// basically screwed, since we expect all or most L0 sstables to overlap with each L1 sstable.
// So if an L1 sstable is suspect we can't do much besides try anyway and hope for the best.
Set<SSTableReader> candidates = new HashSet<>();
Map<SSTableReader, Bounds<Token>> remaining = genBounds(Iterables.filter(generations.get(0), Predicates.not(SSTableReader::isMarkedSuspect)));
for (SSTableReader sstable : ageSortedSSTables(remaining.keySet())) {
if (candidates.contains(sstable))
continue;
Sets.SetView<SSTableReader> overlappedL0 = Sets.union(Collections.singleton(sstable), overlappingWithBounds(sstable, remaining));
if (!Sets.intersection(overlappedL0, compactingL0).isEmpty())
continue;
for (SSTableReader newCandidate : overlappedL0) {
if (firstCompactingKey == null || lastCompactingKey == null || overlapping(firstCompactingKey.getToken(), lastCompactingKey.getToken(), Collections.singleton(newCandidate)).size() == 0)
candidates.add(newCandidate);
remaining.remove(newCandidate);
}
if (candidates.size() > cfs.getMaximumCompactionThreshold()) {
// limit to only the cfs.getMaximumCompactionThreshold() oldest candidates
candidates = new HashSet<>(ageSortedSSTables(candidates).subList(0, cfs.getMaximumCompactionThreshold()));
break;
}
}
// leave everything in L0 if we didn't end up with a full sstable's worth of data
if (SSTableReader.getTotalBytes(candidates) > maxSSTableSizeInBytes) {
// add sstables from L1 that overlap candidates
// if the overlapping ones are already busy in a compaction, leave it out.
// TODO try to find a set of L0 sstables that only overlaps with non-busy L1 sstables
Set<SSTableReader> l1overlapping = overlapping(candidates, generations.get(1));
if (Sets.intersection(l1overlapping, compacting).size() > 0)
return Collections.emptyList();
if (!overlapping(candidates, compactingL0).isEmpty())
return Collections.emptyList();
candidates = Sets.union(candidates, l1overlapping);
}
if (candidates.size() < 2)
return Collections.emptyList();
else
return candidates;
}
// look for a non-suspect keyspace to compact with, starting with where we left off last time,
// and wrapping back to the beginning of the generation if necessary
Map<SSTableReader, Bounds<Token>> sstablesNextLevel = genBounds(generations.get(level + 1));
Iterator<SSTableReader> levelIterator = generations.wrappingIterator(level, lastCompactedSSTables[level]);
while (levelIterator.hasNext()) {
SSTableReader sstable = levelIterator.next();
Set<SSTableReader> candidates = Sets.union(Collections.singleton(sstable), overlappingWithBounds(sstable, sstablesNextLevel));
if (Iterables.any(candidates, SSTableReader::isMarkedSuspect))
continue;
if (Sets.intersection(candidates, compacting).isEmpty())
return candidates;
}
// all the sstables were suspect or overlapped with something suspect
return Collections.emptyList();
}
Aggregations