Search in sources :

Example 1 with Partition

use of org.apache.cassandra.db.partitions.Partition in project cassandra by apache.

the class CompactionController method getPurgeEvaluator.

/**
     * @param key
     * @return a predicate for whether tombstones marked for deletion at the given time for the given partition are
     * purgeable; we calculate this by checking whether the deletion time is less than the min timestamp of all SSTables
     * containing his partition and not participating in the compaction. This means there isn't any data in those
     * sstables that might still need to be suppressed by a tombstone at this timestamp.
     */
public Predicate<Long> getPurgeEvaluator(DecoratedKey key) {
    if (NEVER_PURGE_TOMBSTONES || !compactingRepaired())
        return time -> false;
    overlapIterator.update(key);
    Set<SSTableReader> filteredSSTables = overlapIterator.overlaps();
    Iterable<Memtable> memtables = cfs.getTracker().getView().getAllMemtables();
    long minTimestampSeen = Long.MAX_VALUE;
    boolean hasTimestamp = false;
    for (SSTableReader sstable : filteredSSTables) {
        // we check index file instead.
        if (sstable.getBloomFilter() instanceof AlwaysPresentFilter && sstable.getPosition(key, SSTableReader.Operator.EQ, false) != null || sstable.getBloomFilter().isPresent(key)) {
            minTimestampSeen = Math.min(minTimestampSeen, sstable.getMinTimestamp());
            hasTimestamp = true;
        }
    }
    for (Memtable memtable : memtables) {
        Partition partition = memtable.getPartition(key);
        if (partition != null) {
            minTimestampSeen = Math.min(minTimestampSeen, partition.stats().minTimestamp);
            hasTimestamp = true;
        }
    }
    if (!hasTimestamp)
        return time -> true;
    else {
        final long finalTimestamp = minTimestampSeen;
        return time -> time < finalTimestamp;
    }
}
Also used : java.util(java.util) Iterables(com.google.common.collect.Iterables) Logger(org.slf4j.Logger) OverlapIterator(org.apache.cassandra.utils.OverlapIterator) Predicate(java.util.function.Predicate) TombstoneOption(org.apache.cassandra.schema.CompactionParams.TombstoneOption) LoggerFactory(org.slf4j.LoggerFactory) org.apache.cassandra.db(org.apache.cassandra.db) RateLimiter(com.google.common.util.concurrent.RateLimiter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Partition(org.apache.cassandra.db.partitions.Partition) FileDataInput(org.apache.cassandra.io.util.FileDataInput) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) FileUtils(org.apache.cassandra.io.util.FileUtils) SSTableIntervalTree.buildIntervals(org.apache.cassandra.db.lifecycle.SSTableIntervalTree.buildIntervals) Memtable(org.apache.cassandra.db.Memtable) Predicates(com.google.common.base.Predicates) Refs(org.apache.cassandra.utils.concurrent.Refs) AlwaysPresentFilter(org.apache.cassandra.utils.AlwaysPresentFilter) Partition(org.apache.cassandra.db.partitions.Partition) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AlwaysPresentFilter(org.apache.cassandra.utils.AlwaysPresentFilter) Memtable(org.apache.cassandra.db.Memtable)

Example 2 with Partition

use of org.apache.cassandra.db.partitions.Partition in project cassandra by apache.

the class PartitionImplementationTest method testIter.

private void testIter(Supplier<Collection<? extends Unfiltered>> contentSupplier, Row staticRow) {
    NavigableSet<Clusterable> sortedContent = new TreeSet<Clusterable>(metadata.comparator);
    sortedContent.addAll(contentSupplier.get());
    AbstractBTreePartition partition;
    try (UnfilteredRowIterator iter = new Util.UnfilteredSource(metadata, Util.dk("pk"), staticRow, sortedContent.stream().map(x -> (Unfiltered) x).iterator())) {
        partition = ImmutableBTreePartition.create(iter);
    }
    ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
    ColumnFilter cf = ColumnFilter.selectionBuilder().add(defCol).build();
    Function<? super Clusterable, ? extends Clusterable> colFilter = x -> x instanceof Row ? ((Row) x).filter(cf, metadata) : x;
    Slices slices = Slices.with(metadata.comparator, Slice.make(clustering(KEY_RANGE / 4), clustering(KEY_RANGE * 3 / 4)));
    Slices multiSlices = makeSlices();
    // lastRow
    assertRowsEqual((Row) get(sortedContent.descendingSet(), x -> x instanceof Row), partition.lastRow());
    // get(static)
    assertRowsEqual(staticRow, partition.getRow(Clustering.STATIC_CLUSTERING));
    // get
    for (int i = 0; i < KEY_RANGE; ++i) {
        Clustering cl = clustering(i);
        assertRowsEqual(getRow(sortedContent, cl), partition.getRow(cl));
    }
    // isEmpty
    assertEquals(sortedContent.isEmpty() && staticRow == null, partition.isEmpty());
    // hasRows
    assertEquals(sortedContent.stream().anyMatch(x -> x instanceof Row), partition.hasRows());
    // iterator
    assertIteratorsEqual(sortedContent.stream().filter(x -> x instanceof Row).iterator(), partition.iterator());
    // unfiltered iterator
    assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator());
    // unfiltered iterator
    assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, false));
    // column-filtered
    assertIteratorsEqual(sortedContent.stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, false));
    // sliced
    assertIteratorsEqual(slice(sortedContent, slices.get(0)), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, false));
    assertIteratorsEqual(streamOf(slice(sortedContent, slices.get(0))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, false));
    // randomly multi-sliced
    assertIteratorsEqual(slice(sortedContent, multiSlices), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, false));
    assertIteratorsEqual(streamOf(slice(sortedContent, multiSlices)).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, false));
    // reversed
    assertIteratorsEqual(sortedContent.descendingIterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, true));
    assertIteratorsEqual(sortedContent.descendingSet().stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, true));
    assertIteratorsEqual(invert(slice(sortedContent, slices.get(0))), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, true));
    assertIteratorsEqual(streamOf(invert(slice(sortedContent, slices.get(0)))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, true));
    assertIteratorsEqual(invert(slice(sortedContent, multiSlices)), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, true));
    assertIteratorsEqual(streamOf(invert(slice(sortedContent, multiSlices))).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, true));
    // search iterator
    testSearchIterator(sortedContent, partition, ColumnFilter.all(metadata), false);
    testSearchIterator(sortedContent, partition, cf, false);
    testSearchIterator(sortedContent, partition, ColumnFilter.all(metadata), true);
    testSearchIterator(sortedContent, partition, cf, true);
    // sliceable iter
    testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), false);
    testSlicingOfIterators(sortedContent, partition, cf, false);
    testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), true);
    testSlicingOfIterators(sortedContent, partition, cf, true);
}
Also used : AbstractBTreePartition(org.apache.cassandra.db.partitions.AbstractBTreePartition) java.util(java.util) Iterables(com.google.common.collect.Iterables) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) BeforeClass(org.junit.BeforeClass) SearchIterator(org.apache.cassandra.utils.SearchIterator) org.apache.cassandra.db(org.apache.cassandra.db) Deletion(org.apache.cassandra.db.rows.Row.Deletion) Function(java.util.function.Function) Supplier(java.util.function.Supplier) Iterators(com.google.common.collect.Iterators) org.apache.cassandra.db.rows(org.apache.cassandra.db.rows) Partition(org.apache.cassandra.db.partitions.Partition) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) StreamSupport(java.util.stream.StreamSupport) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Predicate(java.util.function.Predicate) Util(org.apache.cassandra.Util) ByteBufferUtil(org.apache.cassandra.utils.ByteBufferUtil) KeyspaceParams(org.apache.cassandra.schema.KeyspaceParams) Test(org.junit.Test) Collectors(java.util.stream.Collectors) SchemaLoader(org.apache.cassandra.SchemaLoader) Stream(java.util.stream.Stream) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) TableMetadata(org.apache.cassandra.schema.TableMetadata) Assert(org.junit.Assert) AsciiType(org.apache.cassandra.db.marshal.AsciiType) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) AbstractBTreePartition(org.apache.cassandra.db.partitions.AbstractBTreePartition) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier)

Example 3 with Partition

use of org.apache.cassandra.db.partitions.Partition in project cassandra by apache.

the class ScrubTest method assertOrdered.

private static void assertOrdered(ReadCommand cmd, int expectedSize) {
    int size = 0;
    DecoratedKey prev = null;
    for (Partition partition : Util.getAllUnfiltered(cmd)) {
        DecoratedKey current = partition.partitionKey();
        assertTrue("key " + current + " does not sort after previous key " + prev, prev == null || prev.compareTo(current) < 0);
        prev = current;
        ++size;
    }
    assertEquals(expectedSize, size);
}
Also used : Partition(org.apache.cassandra.db.partitions.Partition)

Example 4 with Partition

use of org.apache.cassandra.db.partitions.Partition in project cassandra by apache.

the class UpdateParameters method getPrefetchedRow.

/**
     * Returns the prefetched row with the already performed modifications.
     * <p>If no modification have yet been performed this method will return the fetched row or {@code null} if
     * the row does not exist. If some modifications (updates or deletions) have already been done the row returned
     * will be the result of the merge of the fetched row and of the pending mutations.</p>
     *
     * @param key the partition key
     * @param clustering the row clustering
     * @return the prefetched row with the already performed modifications
     */
public Row getPrefetchedRow(DecoratedKey key, Clustering clustering) {
    if (prefetchedRows == null)
        return null;
    Partition partition = prefetchedRows.get(key);
    Row prefetchedRow = partition == null ? null : partition.searchIterator(ColumnFilter.selection(partition.columns()), false).next(clustering);
    // We need to apply the pending mutations to return the row in its current state
    Row pendingMutations = builder.copy().build();
    if (pendingMutations.isEmpty())
        return prefetchedRow;
    if (prefetchedRow == null)
        return pendingMutations;
    return Rows.merge(prefetchedRow, pendingMutations, nowInSec).purge(DeletionPurger.PURGE_ALL, nowInSec);
}
Also used : Partition(org.apache.cassandra.db.partitions.Partition)

Aggregations

Partition (org.apache.cassandra.db.partitions.Partition)4 Iterables (com.google.common.collect.Iterables)2 java.util (java.util)2 Predicate (java.util.function.Predicate)2 org.apache.cassandra.db (org.apache.cassandra.db)2 Predicates (com.google.common.base.Predicates)1 Iterators (com.google.common.collect.Iterators)1 RateLimiter (com.google.common.util.concurrent.RateLimiter)1 Function (java.util.function.Function)1 Supplier (java.util.function.Supplier)1 Collectors (java.util.stream.Collectors)1 Stream (java.util.stream.Stream)1 StreamSupport (java.util.stream.StreamSupport)1 SchemaLoader (org.apache.cassandra.SchemaLoader)1 Util (org.apache.cassandra.Util)1 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)1 Memtable (org.apache.cassandra.db.Memtable)1 ColumnFilter (org.apache.cassandra.db.filter.ColumnFilter)1 SSTableIntervalTree.buildIntervals (org.apache.cassandra.db.lifecycle.SSTableIntervalTree.buildIntervals)1 AsciiType (org.apache.cassandra.db.marshal.AsciiType)1