Search in sources :

Example 1 with ColumnFilter

use of org.apache.cassandra.db.filter.ColumnFilter in project cassandra by apache.

the class ComplexColumnData method filter.

public ComplexColumnData filter(ColumnFilter filter, DeletionTime activeDeletion, DroppedColumn dropped, LivenessInfo rowLiveness) {
    ColumnFilter.Tester cellTester = filter.newTester(column);
    boolean isQueriedColumn = filter.fetchedColumnIsQueried(column);
    if (cellTester == null && activeDeletion.isLive() && dropped == null && isQueriedColumn)
        return this;
    DeletionTime newDeletion = activeDeletion.supersedes(complexDeletion) ? DeletionTime.LIVE : complexDeletion;
    return transformAndFilter(newDeletion, (cell) -> {
        CellPath path = cell.path();
        boolean isForDropped = dropped != null && cell.timestamp() <= dropped.droppedTime;
        boolean isShadowed = activeDeletion.deletes(cell);
        boolean isFetchedCell = cellTester == null || cellTester.fetches(path);
        boolean isQueriedCell = isQueriedColumn && isFetchedCell && (cellTester == null || cellTester.fetchedCellIsQueried(path));
        boolean isSkippableCell = !isFetchedCell || (!isQueriedCell && cell.timestamp() < rowLiveness.timestamp());
        if (isForDropped || isShadowed || isSkippableCell)
            return null;
        // between sstables and memtables data, i.e resulting in a digest mismatch.
        return isQueriedCell ? cell : cell.withSkippedValue();
    });
}
Also used : DeletionTime(org.apache.cassandra.db.DeletionTime) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter)

Example 2 with ColumnFilter

use of org.apache.cassandra.db.filter.ColumnFilter in project cassandra by apache.

the class SinglePartitionSliceCommandTest method staticColumnsAreReturned.

@Test
public void staticColumnsAreReturned() throws IOException {
    DecoratedKey key = metadata.partitioner.decorateKey(ByteBufferUtil.bytes("k1"));
    QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, s) VALUES ('k1', 's')");
    Assert.assertFalse(QueryProcessor.executeInternal("SELECT s FROM ks.tbl WHERE k='k1'").isEmpty());
    ColumnFilter columnFilter = ColumnFilter.selection(RegularAndStaticColumns.of(s));
    ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.NONE, false);
    ReadCommand cmd = SinglePartitionReadCommand.create(metadata, FBUtilities.nowInSeconds(), columnFilter, RowFilter.NONE, DataLimits.NONE, key, sliceFilter);
    // check raw iterator for static cell
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
        checkForS(pi);
    }
    ReadResponse response;
    DataOutputBuffer out;
    DataInputPlus in;
    ReadResponse dst;
    // check (de)serialized iterator for memtable static cell
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
        response = ReadResponse.createDataResponse(pi, cmd, executionController.getRepairedDataInfo());
    }
    out = new DataOutputBuffer((int) ReadResponse.serializer.serializedSize(response, MessagingService.VERSION_30));
    ReadResponse.serializer.serialize(response, out, MessagingService.VERSION_30);
    in = new DataInputBuffer(out.buffer(), true);
    dst = ReadResponse.serializer.deserialize(in, MessagingService.VERSION_30);
    try (UnfilteredPartitionIterator pi = dst.makeIterator(cmd)) {
        checkForS(pi);
    }
    // check (de)serialized iterator for sstable static cell
    Schema.instance.getColumnFamilyStoreInstance(metadata.id).forceBlockingFlush();
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
        response = ReadResponse.createDataResponse(pi, cmd, executionController.getRepairedDataInfo());
    }
    out = new DataOutputBuffer((int) ReadResponse.serializer.serializedSize(response, MessagingService.VERSION_30));
    ReadResponse.serializer.serialize(response, out, MessagingService.VERSION_30);
    in = new DataInputBuffer(out.buffer(), true);
    dst = ReadResponse.serializer.deserialize(in, MessagingService.VERSION_30);
    try (UnfilteredPartitionIterator pi = dst.makeIterator(cmd)) {
        checkForS(pi);
    }
}
Also used : ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) DataInputBuffer(org.apache.cassandra.io.util.DataInputBuffer) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Test(org.junit.Test)

Example 3 with ColumnFilter

use of org.apache.cassandra.db.filter.ColumnFilter in project cassandra by apache.

the class KeysSearcher method queryDataFromIndex.

protected UnfilteredPartitionIterator queryDataFromIndex(final DecoratedKey indexKey, final RowIterator indexHits, final ReadCommand command, final ReadExecutionController executionController) {
    assert indexHits.staticRow() == Rows.EMPTY_STATIC_ROW;
    return new UnfilteredPartitionIterator() {

        private UnfilteredRowIterator next;

        public TableMetadata metadata() {
            return command.metadata();
        }

        public boolean hasNext() {
            return prepareNext();
        }

        public UnfilteredRowIterator next() {
            if (next == null)
                prepareNext();
            UnfilteredRowIterator toReturn = next;
            next = null;
            return toReturn;
        }

        private boolean prepareNext() {
            while (next == null && indexHits.hasNext()) {
                Row hit = indexHits.next();
                DecoratedKey key = index.baseCfs.decorateKey(hit.clustering().bufferAt(0));
                if (!command.selectsKey(key))
                    continue;
                ColumnFilter extendedFilter = getExtendedFilter(command.columnFilter());
                SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata(), command.nowInSec(), extendedFilter, command.rowFilter(), DataLimits.NONE, key, command.clusteringIndexFilter(key), null);
                // filterIfStale closes it's iterator if either it materialize it or if it returns null.
                @SuppressWarnings("resource") UnfilteredRowIterator // by the next caller of next, or through closing this iterator is this come before.
                dataIter = filterIfStale(dataCmd.queryMemtableAndDisk(index.baseCfs, executionController), hit, indexKey.getKey(), executionController.getWriteContext(), command.nowInSec());
                if (dataIter != null) {
                    if (dataIter.isEmpty())
                        dataIter.close();
                    else
                        next = dataIter;
                }
            }
            return next != null;
        }

        public void remove() {
            throw new UnsupportedOperationException();
        }

        public void close() {
            indexHits.close();
            if (next != null)
                next.close();
        }
    };
}
Also used : UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter)

Example 4 with ColumnFilter

use of org.apache.cassandra.db.filter.ColumnFilter in project cassandra by apache.

the class CassandraStreamReceiver method sendThroughWritePath.

private void sendThroughWritePath(ColumnFamilyStore cfs, Collection<SSTableReader> readers) {
    boolean hasCdc = hasCDC(cfs);
    ColumnFilter filter = ColumnFilter.all(cfs.metadata());
    for (SSTableReader reader : readers) {
        Keyspace ks = Keyspace.open(reader.getKeyspaceName());
        // ({@link Stream MAX_ROWS_PER_BATCH}) to avoid OOMing and generating heap pressure
        try (ISSTableScanner scanner = reader.getScanner();
            CloseableIterator<UnfilteredRowIterator> throttledPartitions = ThrottledUnfilteredIterator.throttle(scanner, MAX_ROWS_PER_BATCH)) {
            while (throttledPartitions.hasNext()) {
                // MV *can* be applied unsafe if there's no CDC on the CFS as we flush
                // before transaction is done.
                // 
                // If the CFS has CDC, however, these updates need to be written to the CommitLog
                // so they get archived into the cdc_raw folder
                ks.apply(new Mutation(PartitionUpdate.fromIterator(throttledPartitions.next(), filter)), hasCdc, true, false);
            }
        }
    }
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Mutation(org.apache.cassandra.db.Mutation)

Example 5 with ColumnFilter

use of org.apache.cassandra.db.filter.ColumnFilter in project cassandra by apache.

the class PartitionImplementationTest method testIter.

private void testIter(Supplier<Collection<? extends Unfiltered>> contentSupplier, Row staticRow) {
    NavigableSet<Clusterable> sortedContent = new TreeSet<Clusterable>(metadata.comparator);
    sortedContent.addAll(contentSupplier.get());
    AbstractBTreePartition partition;
    try (UnfilteredRowIterator iter = new Util.UnfilteredSource(metadata, Util.dk("pk"), staticRow, sortedContent.stream().map(x -> (Unfiltered) x).iterator())) {
        partition = ImmutableBTreePartition.create(iter);
    }
    ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
    ColumnFilter cf = ColumnFilter.selectionBuilder().add(defCol).build();
    Function<? super Clusterable, ? extends Clusterable> colFilter = x -> x instanceof Row ? ((Row) x).filter(cf, metadata) : x;
    Slices slices = Slices.with(metadata.comparator, Slice.make(clustering(KEY_RANGE / 4), clustering(KEY_RANGE * 3 / 4)));
    Slices multiSlices = makeSlices();
    // lastRow
    assertRowsEqual((Row) get(sortedContent.descendingSet(), x -> x instanceof Row), partition.lastRow());
    // get(static)
    assertRowsEqual(staticRow, partition.getRow(Clustering.STATIC_CLUSTERING));
    // get
    for (int i = 0; i < KEY_RANGE; ++i) {
        Clustering<?> cl = clustering(i);
        assertRowsEqual(getRow(sortedContent, cl), partition.getRow(cl));
    }
    // isEmpty
    assertEquals(sortedContent.isEmpty() && staticRow == null, partition.isEmpty());
    // hasRows
    assertEquals(sortedContent.stream().anyMatch(x -> x instanceof Row), partition.hasRows());
    // iterator
    assertIteratorsEqual(sortedContent.stream().filter(x -> x instanceof Row).iterator(), partition.iterator());
    // unfiltered iterator
    assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator());
    // unfiltered iterator
    assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, false));
    // column-filtered
    assertIteratorsEqual(sortedContent.stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, false));
    // sliced
    assertIteratorsEqual(slice(sortedContent, slices.get(0)), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, false));
    assertIteratorsEqual(streamOf(slice(sortedContent, slices.get(0))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, false));
    // randomly multi-sliced
    assertIteratorsEqual(slice(sortedContent, multiSlices), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, false));
    assertIteratorsEqual(streamOf(slice(sortedContent, multiSlices)).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, false));
    // reversed
    assertIteratorsEqual(sortedContent.descendingIterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, true));
    assertIteratorsEqual(sortedContent.descendingSet().stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, true));
    assertIteratorsEqual(invert(slice(sortedContent, slices.get(0))), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, true));
    assertIteratorsEqual(streamOf(invert(slice(sortedContent, slices.get(0)))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, true));
    assertIteratorsEqual(invert(slice(sortedContent, multiSlices)), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, true));
    assertIteratorsEqual(streamOf(invert(slice(sortedContent, multiSlices))).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, true));
    // clustering iterator
    testClusteringsIterator(sortedContent, partition, ColumnFilter.all(metadata), false);
    testClusteringsIterator(sortedContent, partition, cf, false);
    testClusteringsIterator(sortedContent, partition, ColumnFilter.all(metadata), true);
    testClusteringsIterator(sortedContent, partition, cf, true);
    // sliceable iter
    testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), false);
    testSlicingOfIterators(sortedContent, partition, cf, false);
    testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), true);
    testSlicingOfIterators(sortedContent, partition, cf, true);
}
Also used : AbstractBTreePartition(org.apache.cassandra.db.partitions.AbstractBTreePartition) java.util(java.util) Iterables(com.google.common.collect.Iterables) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) BeforeClass(org.junit.BeforeClass) org.apache.cassandra.db(org.apache.cassandra.db) Deletion(org.apache.cassandra.db.rows.Row.Deletion) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ByteBuffer(java.nio.ByteBuffer) Iterators(com.google.common.collect.Iterators) org.apache.cassandra.db.rows(org.apache.cassandra.db.rows) Partition(org.apache.cassandra.db.partitions.Partition) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) StreamSupport(java.util.stream.StreamSupport) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Predicate(java.util.function.Predicate) Util(org.apache.cassandra.Util) ByteBufferUtil(org.apache.cassandra.utils.ByteBufferUtil) KeyspaceParams(org.apache.cassandra.schema.KeyspaceParams) Test(org.junit.Test) Collectors(java.util.stream.Collectors) SchemaLoader(org.apache.cassandra.SchemaLoader) Stream(java.util.stream.Stream) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) TableMetadata(org.apache.cassandra.schema.TableMetadata) Assert(org.junit.Assert) AsciiType(org.apache.cassandra.db.marshal.AsciiType) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) AbstractBTreePartition(org.apache.cassandra.db.partitions.AbstractBTreePartition) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier)

Aggregations

ColumnFilter (org.apache.cassandra.db.filter.ColumnFilter)10 Test (org.junit.Test)6 ClusteringIndexSliceFilter (org.apache.cassandra.db.filter.ClusteringIndexSliceFilter)5 ByteBuffer (java.nio.ByteBuffer)4 DataOutputBuffer (org.apache.cassandra.io.util.DataOutputBuffer)4 RowFilter (org.apache.cassandra.db.filter.RowFilter)3 UnfilteredPartitionIterator (org.apache.cassandra.db.partitions.UnfilteredPartitionIterator)2 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)2 DataInputBuffer (org.apache.cassandra.io.util.DataInputBuffer)2 Iterables (com.google.common.collect.Iterables)1 Iterators (com.google.common.collect.Iterators)1 java.util (java.util)1 Function (java.util.function.Function)1 Predicate (java.util.function.Predicate)1 Supplier (java.util.function.Supplier)1 Collectors (java.util.stream.Collectors)1 Stream (java.util.stream.Stream)1 StreamSupport (java.util.stream.StreamSupport)1 SchemaLoader (org.apache.cassandra.SchemaLoader)1 Util (org.apache.cassandra.Util)1