Search in sources :

Example 1 with Bounds

use of org.apache.cassandra.dht.Bounds in project cassandra by apache.

the class ReplicaPlanIterator method getRestrictedRanges.

/**
 * Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
 * so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
 */
private static List<AbstractBounds<PartitionPosition>> getRestrictedRanges(final AbstractBounds<PartitionPosition> queryRange) {
    // special case for bounds containing exactly 1 (non-minimum) token
    if (queryRange instanceof Bounds && queryRange.left.equals(queryRange.right) && !queryRange.left.isMinimum()) {
        return Collections.singletonList(queryRange);
    }
    TokenMetadata tokenMetadata = StorageService.instance.getTokenMetadata();
    List<AbstractBounds<PartitionPosition>> ranges = new ArrayList<>();
    // divide the queryRange into pieces delimited by the ring and minimum tokens
    Iterator<Token> ringIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), queryRange.left.getToken(), true);
    AbstractBounds<PartitionPosition> remainder = queryRange;
    while (ringIter.hasNext()) {
        /*
             * remainder is a range/bounds of partition positions and we want to split it with a token. We want to split
             * using the key returned by token.maxKeyBound. For instance, if remainder is [DK(10, 'foo'), DK(20, 'bar')],
             * and we have 3 nodes with tokens 0, 15, 30, we want to split remainder to A=[DK(10, 'foo'), 15] and
             * B=(15, DK(20, 'bar')]. But since we can't mix tokens and keys at the same time in a range, we use
             * 15.maxKeyBound() to have A include all keys having 15 as token and B include none of those (since that is
             * what our node owns).
             */
        Token upperBoundToken = ringIter.next();
        PartitionPosition upperBound = upperBoundToken.maxKeyBound();
        if (!remainder.left.equals(upperBound) && !remainder.contains(upperBound))
            // no more splits
            break;
        Pair<AbstractBounds<PartitionPosition>, AbstractBounds<PartitionPosition>> splits = remainder.split(upperBound);
        if (splits == null)
            continue;
        ranges.add(splits.left);
        remainder = splits.right;
    }
    ranges.add(remainder);
    return ranges;
}
Also used : AbstractBounds(org.apache.cassandra.dht.AbstractBounds) PartitionPosition(org.apache.cassandra.db.PartitionPosition) Bounds(org.apache.cassandra.dht.Bounds) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) ArrayList(java.util.ArrayList) Token(org.apache.cassandra.dht.Token) TokenMetadata(org.apache.cassandra.locator.TokenMetadata)

Example 2 with Bounds

use of org.apache.cassandra.dht.Bounds in project cassandra by apache.

the class CounterCacheTest method testCounterCacheInvalidate.

@Test
public void testCounterCacheInvalidate() {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(COUNTER1);
    cfs.truncateBlocking();
    CacheService.instance.invalidateCounterCache();
    Clustering<?> c1 = CBuilder.create(cfs.metadata().comparator).add(ByteBufferUtil.bytes(1)).build();
    Clustering<?> c2 = CBuilder.create(cfs.metadata().comparator).add(ByteBufferUtil.bytes(2)).build();
    ColumnMetadata cd = cfs.metadata().getColumn(ByteBufferUtil.bytes("c"));
    assertEquals(0, CacheService.instance.counterCache.size());
    assertNull(cfs.getCachedCounter(bytes(1), c1, cd, null));
    assertNull(cfs.getCachedCounter(bytes(1), c2, cd, null));
    assertNull(cfs.getCachedCounter(bytes(2), c1, cd, null));
    assertNull(cfs.getCachedCounter(bytes(2), c2, cd, null));
    assertNull(cfs.getCachedCounter(bytes(3), c1, cd, null));
    assertNull(cfs.getCachedCounter(bytes(3), c2, cd, null));
    cfs.putCachedCounter(bytes(1), c1, cd, null, ClockAndCount.create(1L, 1L));
    cfs.putCachedCounter(bytes(1), c2, cd, null, ClockAndCount.create(1L, 2L));
    cfs.putCachedCounter(bytes(2), c1, cd, null, ClockAndCount.create(2L, 1L));
    cfs.putCachedCounter(bytes(2), c2, cd, null, ClockAndCount.create(2L, 2L));
    cfs.putCachedCounter(bytes(3), c1, cd, null, ClockAndCount.create(3L, 1L));
    cfs.putCachedCounter(bytes(3), c2, cd, null, ClockAndCount.create(3L, 2L));
    assertEquals(ClockAndCount.create(1L, 1L), cfs.getCachedCounter(bytes(1), c1, cd, null));
    assertEquals(ClockAndCount.create(1L, 2L), cfs.getCachedCounter(bytes(1), c2, cd, null));
    assertEquals(ClockAndCount.create(2L, 1L), cfs.getCachedCounter(bytes(2), c1, cd, null));
    assertEquals(ClockAndCount.create(2L, 2L), cfs.getCachedCounter(bytes(2), c2, cd, null));
    assertEquals(ClockAndCount.create(3L, 1L), cfs.getCachedCounter(bytes(3), c1, cd, null));
    assertEquals(ClockAndCount.create(3L, 2L), cfs.getCachedCounter(bytes(3), c2, cd, null));
    cfs.invalidateCounterCache(Collections.singleton(new Bounds<Token>(cfs.decorateKey(bytes(1)).getToken(), cfs.decorateKey(bytes(2)).getToken())));
    assertEquals(2, CacheService.instance.counterCache.size());
    assertNull(cfs.getCachedCounter(bytes(1), c1, cd, null));
    assertNull(cfs.getCachedCounter(bytes(1), c2, cd, null));
    assertNull(cfs.getCachedCounter(bytes(2), c1, cd, null));
    assertNull(cfs.getCachedCounter(bytes(2), c2, cd, null));
    assertEquals(ClockAndCount.create(3L, 1L), cfs.getCachedCounter(bytes(3), c1, cd, null));
    assertEquals(ClockAndCount.create(3L, 2L), cfs.getCachedCounter(bytes(3), c2, cd, null));
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Bounds(org.apache.cassandra.dht.Bounds) Test(org.junit.Test)

Example 3 with Bounds

use of org.apache.cassandra.dht.Bounds in project cassandra by apache.

the class CompactionManager method findSSTablesToAnticompact.

@VisibleForTesting
static Set<SSTableReader> findSSTablesToAnticompact(Iterator<SSTableReader> sstableIterator, List<Range<Token>> normalizedRanges, UUID parentRepairSession) {
    Set<SSTableReader> fullyContainedSSTables = new HashSet<>();
    while (sstableIterator.hasNext()) {
        SSTableReader sstable = sstableIterator.next();
        Bounds<Token> sstableBounds = new Bounds<>(sstable.first.getToken(), sstable.last.getToken());
        for (Range<Token> r : normalizedRanges) {
            // ranges are normalized - no wrap around - if first and last are contained we know that all tokens are contained in the range
            if (r.contains(sstable.first.getToken()) && r.contains(sstable.last.getToken())) {
                logger.info("{} SSTable {} fully contained in range {}, mutating repairedAt instead of anticompacting", PreviewKind.NONE.logPrefix(parentRepairSession), sstable, r);
                fullyContainedSSTables.add(sstable);
                sstableIterator.remove();
                break;
            } else if (r.intersects(sstableBounds)) {
                logger.info("{} SSTable {} ({}) will be anticompacted on range {}", PreviewKind.NONE.logPrefix(parentRepairSession), sstable, sstableBounds, r);
            }
        }
    }
    return fullyContainedSSTables;
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Bounds(org.apache.cassandra.dht.Bounds) Token(org.apache.cassandra.dht.Token) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 4 with Bounds

use of org.apache.cassandra.dht.Bounds in project cassandra by apache.

the class CompactionManager method validateSSTableBoundsForAnticompaction.

static void validateSSTableBoundsForAnticompaction(UUID sessionID, Collection<SSTableReader> sstables, RangesAtEndpoint ranges) {
    List<Range<Token>> normalizedRanges = Range.normalize(ranges.ranges());
    for (SSTableReader sstable : sstables) {
        Bounds<Token> bounds = new Bounds<>(sstable.first.getToken(), sstable.last.getToken());
        if (!Iterables.any(normalizedRanges, r -> (r.contains(bounds.left) && r.contains(bounds.right)) || r.intersects(bounds))) {
            // this should never happen - in PendingAntiCompaction#getSSTables we select all sstables that intersect the repaired ranges, that can't have changed here
            String message = String.format("%s SSTable %s (%s) does not intersect repaired ranges %s, this sstable should not have been included.", PreviewKind.NONE.logPrefix(sessionID), sstable, bounds, normalizedRanges);
            logger.error(message);
            throw new IllegalStateException(message);
        }
    }
}
Also used : NoSuchRepairSessionException(org.apache.cassandra.repair.NoSuchRepairSessionException) WrappedExecutorPlus(org.apache.cassandra.concurrent.WrappedExecutorPlus) SSTableSet(org.apache.cassandra.db.lifecycle.SSTableSet) File(org.apache.cassandra.io.util.File) LoggerFactory(org.slf4j.LoggerFactory) org.apache.cassandra.db(org.apache.cassandra.db) CompactionExecutor.compactionThreadGroup(org.apache.cassandra.db.compaction.CompactionManager.CompactionExecutor.compactionThreadGroup) TabularData(javax.management.openmbean.TabularData) org.apache.cassandra.utils(org.apache.cassandra.utils) Global.nanoTime(org.apache.cassandra.utils.Clock.Global.nanoTime) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) BooleanSupplier(java.util.function.BooleanSupplier) ExecutorFactory(org.apache.cassandra.concurrent.ExecutorFactory) Collections.singleton(java.util.Collections.singleton) NO_PENDING_REPAIR(org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ViewBuilderTask(org.apache.cassandra.db.view.ViewBuilderTask) com.google.common.collect(com.google.common.collect) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Predicate(java.util.function.Predicate) UNREPAIRED_SSTABLE(org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE) TombstoneOption(org.apache.cassandra.schema.CompactionParams.TombstoneOption) Collectors(java.util.stream.Collectors) RangesAtEndpoint(org.apache.cassandra.locator.RangesAtEndpoint) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) FileUtils(org.apache.cassandra.io.util.FileUtils) Future(org.apache.cassandra.utils.concurrent.Future) TableMetadata(org.apache.cassandra.schema.TableMetadata) StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) ILifecycleTransaction(org.apache.cassandra.db.lifecycle.ILifecycleTransaction) FutureTask.callable(org.apache.cassandra.concurrent.FutureTask.callable) java.util(java.util) OpenDataException(javax.management.openmbean.OpenDataException) Range(org.apache.cassandra.dht.Range) Callable(java.util.concurrent.Callable) WrappedLifecycleTransaction(org.apache.cassandra.db.lifecycle.WrappedLifecycleTransaction) RateLimiter(com.google.common.util.concurrent.RateLimiter) Schema(org.apache.cassandra.schema.Schema) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Token(org.apache.cassandra.dht.Token) ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) TableMetrics(org.apache.cassandra.metrics.TableMetrics) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) CompactionMetrics(org.apache.cassandra.metrics.CompactionMetrics) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Component(org.apache.cassandra.io.sstable.Component) Descriptor(org.apache.cassandra.io.sstable.Descriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) Refs(org.apache.cassandra.utils.concurrent.Refs) ExecutorService(java.util.concurrent.ExecutorService) Uninterruptibles(com.google.common.util.concurrent.Uninterruptibles) Logger(org.slf4j.Logger) SSTableRewriter(org.apache.cassandra.io.sstable.SSTableRewriter) SecondaryIndexBuilder(org.apache.cassandra.index.SecondaryIndexBuilder) AutoSavingCache(org.apache.cassandra.cache.AutoSavingCache) ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) StorageService(org.apache.cassandra.service.StorageService) IOException(java.io.IOException) Bounds(org.apache.cassandra.dht.Bounds) SSTableIntervalTree(org.apache.cassandra.db.lifecycle.SSTableIntervalTree) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) PreviewKind(org.apache.cassandra.streaming.PreviewKind) Holder(org.apache.cassandra.db.compaction.CompactionInfo.Holder) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) Global.executorFactory(org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory) DatabaseDescriptor.getConcurrentCompactors(org.apache.cassandra.config.DatabaseDescriptor.getConcurrentCompactors) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) IndexSummaryRedistribution(org.apache.cassandra.io.sstable.IndexSummaryRedistribution) SSTableWriter(org.apache.cassandra.io.sstable.format.SSTableWriter) View(org.apache.cassandra.db.lifecycle.View) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Bounds(org.apache.cassandra.dht.Bounds) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range)

Example 5 with Bounds

use of org.apache.cassandra.dht.Bounds in project cassandra by apache.

the class LeveledManifest method getCandidatesFor.

/**
 * @return highest-priority sstables to compact for the given level.
 * If no compactions are possible (because of concurrent compactions or because some sstables are excluded
 * for prior failure), will return an empty list.  Never returns null.
 */
private Collection<SSTableReader> getCandidatesFor(int level) {
    assert !generations.get(level).isEmpty();
    logger.trace("Choosing candidates for L{}", level);
    final Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
    if (level == 0) {
        Set<SSTableReader> compactingL0 = getCompactingL0();
        PartitionPosition lastCompactingKey = null;
        PartitionPosition firstCompactingKey = null;
        for (SSTableReader candidate : compactingL0) {
            if (firstCompactingKey == null || candidate.first.compareTo(firstCompactingKey) < 0)
                firstCompactingKey = candidate.first;
            if (lastCompactingKey == null || candidate.last.compareTo(lastCompactingKey) > 0)
                lastCompactingKey = candidate.last;
        }
        // L0 is the dumping ground for new sstables which thus may overlap each other.
        // 
        // We treat L0 compactions specially:
        // 1a. add sstables to the candidate set until we have at least maxSSTableSizeInMB
        // 1b. prefer choosing older sstables as candidates, to newer ones
        // 1c. any L0 sstables that overlap a candidate, will also become candidates
        // 2. At most max_threshold sstables from L0 will be compacted at once
        // 3. If total candidate size is less than maxSSTableSizeInMB, we won't bother compacting with L1,
        // and the result of the compaction will stay in L0 instead of being promoted (see promote())
        // 
        // Note that we ignore suspect-ness of L1 sstables here, since if an L1 sstable is suspect we're
        // basically screwed, since we expect all or most L0 sstables to overlap with each L1 sstable.
        // So if an L1 sstable is suspect we can't do much besides try anyway and hope for the best.
        Set<SSTableReader> candidates = new HashSet<>();
        Map<SSTableReader, Bounds<Token>> remaining = genBounds(Iterables.filter(generations.get(0), Predicates.not(SSTableReader::isMarkedSuspect)));
        for (SSTableReader sstable : ageSortedSSTables(remaining.keySet())) {
            if (candidates.contains(sstable))
                continue;
            Sets.SetView<SSTableReader> overlappedL0 = Sets.union(Collections.singleton(sstable), overlappingWithBounds(sstable, remaining));
            if (!Sets.intersection(overlappedL0, compactingL0).isEmpty())
                continue;
            for (SSTableReader newCandidate : overlappedL0) {
                if (firstCompactingKey == null || lastCompactingKey == null || overlapping(firstCompactingKey.getToken(), lastCompactingKey.getToken(), Collections.singleton(newCandidate)).size() == 0)
                    candidates.add(newCandidate);
                remaining.remove(newCandidate);
            }
            if (candidates.size() > cfs.getMaximumCompactionThreshold()) {
                // limit to only the cfs.getMaximumCompactionThreshold() oldest candidates
                candidates = new HashSet<>(ageSortedSSTables(candidates).subList(0, cfs.getMaximumCompactionThreshold()));
                break;
            }
        }
        // leave everything in L0 if we didn't end up with a full sstable's worth of data
        if (SSTableReader.getTotalBytes(candidates) > maxSSTableSizeInBytes) {
            // add sstables from L1 that overlap candidates
            // if the overlapping ones are already busy in a compaction, leave it out.
            // TODO try to find a set of L0 sstables that only overlaps with non-busy L1 sstables
            Set<SSTableReader> l1overlapping = overlapping(candidates, generations.get(1));
            if (Sets.intersection(l1overlapping, compacting).size() > 0)
                return Collections.emptyList();
            if (!overlapping(candidates, compactingL0).isEmpty())
                return Collections.emptyList();
            candidates = Sets.union(candidates, l1overlapping);
        }
        if (candidates.size() < 2)
            return Collections.emptyList();
        else
            return candidates;
    }
    // look for a non-suspect keyspace to compact with, starting with where we left off last time,
    // and wrapping back to the beginning of the generation if necessary
    Map<SSTableReader, Bounds<Token>> sstablesNextLevel = genBounds(generations.get(level + 1));
    Iterator<SSTableReader> levelIterator = generations.wrappingIterator(level, lastCompactedSSTables[level]);
    while (levelIterator.hasNext()) {
        SSTableReader sstable = levelIterator.next();
        Set<SSTableReader> candidates = Sets.union(Collections.singleton(sstable), overlappingWithBounds(sstable, sstablesNextLevel));
        if (Iterables.any(candidates, SSTableReader::isMarkedSuspect))
            continue;
        if (Sets.intersection(candidates, compacting).isEmpty())
            return candidates;
    }
    // all the sstables were suspect or overlapped with something suspect
    return Collections.emptyList();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) PartitionPosition(org.apache.cassandra.db.PartitionPosition) Sets(com.google.common.collect.Sets) Bounds(org.apache.cassandra.dht.Bounds)

Aggregations

Bounds (org.apache.cassandra.dht.Bounds)9 AbstractBounds (org.apache.cassandra.dht.AbstractBounds)5 Token (org.apache.cassandra.dht.Token)5 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)5 IOException (java.io.IOException)4 VisibleForTesting (com.google.common.annotations.VisibleForTesting)3 PartitionPosition (org.apache.cassandra.db.PartitionPosition)3 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)3 ISSTableScanner (org.apache.cassandra.io.sstable.ISSTableScanner)3 TableMetadata (org.apache.cassandra.schema.TableMetadata)3 Preconditions (com.google.common.base.Preconditions)2 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Collectors (java.util.stream.Collectors)2 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)2 DecoratedKey (org.apache.cassandra.db.DecoratedKey)2 SSTableSet (org.apache.cassandra.db.lifecycle.SSTableSet)2 View (org.apache.cassandra.db.lifecycle.View)2 Range (org.apache.cassandra.dht.Range)2