Search in sources :

Example 1 with ClusteringIndexNamesFilter

use of org.apache.cassandra.db.filter.ClusteringIndexNamesFilter in project cassandra by apache.

the class SinglePartitionSliceCommandTest method createClusteringFilter.

private AbstractClusteringIndexFilter createClusteringFilter(int uniqueCk1, int uniqueCk2, boolean isSlice) {
    Slices.Builder slicesBuilder = new Slices.Builder(CFM_SLICES.comparator);
    BTreeSet.Builder<Clustering<?>> namesBuilder = BTreeSet.builder(CFM_SLICES.comparator);
    for (int ck1 = 0; ck1 < uniqueCk1; ck1++) {
        for (int ck2 = 0; ck2 < uniqueCk2; ck2++) {
            if (isSlice)
                slicesBuilder.add(Slice.make(Util.clustering(CFM_SLICES.comparator, ck1, ck2)));
            else
                namesBuilder.add(Util.clustering(CFM_SLICES.comparator, ck1, ck2));
        }
    }
    if (isSlice)
        return new ClusteringIndexSliceFilter(slicesBuilder.build(), false);
    return new ClusteringIndexNamesFilter(namesBuilder.build(), false);
}
Also used : ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) ClusteringIndexNamesFilter(org.apache.cassandra.db.filter.ClusteringIndexNamesFilter) BTreeSet(org.apache.cassandra.utils.btree.BTreeSet)

Example 2 with ClusteringIndexNamesFilter

use of org.apache.cassandra.db.filter.ClusteringIndexNamesFilter in project cassandra by apache.

the class CompositesSearcher method queryDataFromIndex.

protected UnfilteredPartitionIterator queryDataFromIndex(final DecoratedKey indexKey, final RowIterator indexHits, final ReadCommand command, final ReadExecutionController executionController) {
    assert indexHits.staticRow() == Rows.EMPTY_STATIC_ROW;
    return new UnfilteredPartitionIterator() {

        private IndexEntry nextEntry;

        private UnfilteredRowIterator next;

        public TableMetadata metadata() {
            return command.metadata();
        }

        public boolean hasNext() {
            return prepareNext();
        }

        public UnfilteredRowIterator next() {
            if (next == null)
                prepareNext();
            UnfilteredRowIterator toReturn = next;
            next = null;
            return toReturn;
        }

        private boolean prepareNext() {
            while (true) {
                if (next != null)
                    return true;
                if (nextEntry == null) {
                    if (!indexHits.hasNext())
                        return false;
                    nextEntry = index.decodeEntry(indexKey, indexHits.next());
                }
                SinglePartitionReadCommand dataCmd;
                DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
                List<IndexEntry> entries = new ArrayList<>();
                if (isStaticColumn()) {
                    // The index hit may not match the commad key constraint
                    if (!isMatchingEntry(partitionKey, nextEntry, command)) {
                        nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
                        continue;
                    }
                    // If the index is on a static column, we just need to do a full read on the partition.
                    // Note that we want to re-use the command.columnFilter() in case of future change.
                    dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata(), command.nowInSec(), command.columnFilter(), RowFilter.NONE, DataLimits.NONE, partitionKey, command.clusteringIndexFilter(partitionKey));
                    entries.add(nextEntry);
                    nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
                } else {
                    // Gather all index hits belonging to the same partition and query the data for those hits.
                    // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
                    // 1 read per index hit. However, this basically mean materializing all hits for a partition
                    // in memory so we should consider adding some paging mechanism. However, index hits should
                    // be relatively small so it's much better than the previous code that was materializing all
                    // *data* for a given partition.
                    BTreeSet.Builder<Clustering<?>> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
                    while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey)) {
                        // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
                        if (isMatchingEntry(partitionKey, nextEntry, command)) {
                            clusterings.add(nextEntry.indexedEntryClustering);
                            entries.add(nextEntry);
                        }
                        nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
                    }
                    // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
                    if (clusterings.isEmpty())
                        continue;
                    // Query the gathered index hits. We still need to filter stale hits from the resulting query.
                    ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
                    dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata(), command.nowInSec(), command.columnFilter(), command.rowFilter(), DataLimits.NONE, partitionKey, filter, null);
                }
                // We close right away if empty, and if it's assign to next it will be called either
                @SuppressWarnings("resource") UnfilteredRowIterator // by the next caller of next, or through closing this iterator is this come before.
                dataIter = filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs, executionController), indexKey.getKey(), entries, executionController.getWriteContext(), command.nowInSec());
                if (dataIter.isEmpty()) {
                    dataIter.close();
                    continue;
                }
                next = dataIter;
                return true;
            }
        }

        public void remove() {
            throw new UnsupportedOperationException();
        }

        public void close() {
            indexHits.close();
            if (next != null)
                next.close();
        }
    };
}
Also used : ArrayList(java.util.ArrayList) IndexEntry(org.apache.cassandra.index.internal.IndexEntry) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) ClusteringIndexNamesFilter(org.apache.cassandra.db.filter.ClusteringIndexNamesFilter) BTreeSet(org.apache.cassandra.utils.btree.BTreeSet)

Example 3 with ClusteringIndexNamesFilter

use of org.apache.cassandra.db.filter.ClusteringIndexNamesFilter in project cassandra by apache.

the class SSTableAndMemTableDigestMatchTest method getDigest.

private String getDigest(ColumnFilter filter, Clustering<?>... clusterings) {
    ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
    NavigableSet<Clustering<?>> clusteringSet = Sets.newTreeSet(new ClusteringComparator());
    for (Clustering<?> clustering : clusterings) clusteringSet.add(clustering);
    BufferDecoratedKey key = new BufferDecoratedKey(DatabaseDescriptor.getPartitioner().getToken(Int32Type.instance.decompose(1)), Int32Type.instance.decompose(1));
    SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata(), (int) (System.currentTimeMillis() / 1000), key, filter, new ClusteringIndexNamesFilter(clusteringSet, false)).copyAsDigestQuery();
    cmd.setDigestVersion(MessagingService.current_version);
    ReadResponse resp;
    try (ReadExecutionController ctrl = ReadExecutionController.forCommand(cmd, false);
        UnfilteredRowIterator iterator = cmd.queryMemtableAndDisk(cfs, ctrl)) {
        resp = ReadResponse.createDataResponse(new SingletonUnfilteredPartitionIterator(iterator), cmd, ctrl.getRepairedDataInfo());
        logger.info("Response is: {}", resp.toDebugString(cmd, key));
        ByteBuffer digest = resp.digest(cmd);
        return ByteBufferUtil.bytesToHex(digest);
    }
}
Also used : UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) ClusteringIndexNamesFilter(org.apache.cassandra.db.filter.ClusteringIndexNamesFilter) ByteBuffer(java.nio.ByteBuffer) SingletonUnfilteredPartitionIterator(org.apache.cassandra.db.partitions.SingletonUnfilteredPartitionIterator)

Example 4 with ClusteringIndexNamesFilter

use of org.apache.cassandra.db.filter.ClusteringIndexNamesFilter in project cassandra by apache.

the class CounterCacheKey method readCounterValue.

/**
 * Reads the value of the counter represented by this key.
 *
 * @param cfs the store for the table this is a key of.
 * @return the value for the counter represented by this key, or {@code null} if there
 * is not such counter.
 */
public ByteBuffer readCounterValue(ColumnFamilyStore cfs) {
    TableMetadata metadata = cfs.metadata();
    assert metadata.id.equals(tableId) && Objects.equals(metadata.indexName().orElse(null), indexName);
    DecoratedKey key = cfs.decorateKey(partitionKey());
    int clusteringSize = metadata.comparator.size();
    List<ByteBuffer> buffers = CompositeType.splitName(ByteBuffer.wrap(cellName), ByteBufferAccessor.instance);
    // See makeCellName above
    assert buffers.size() >= clusteringSize + 1;
    Clustering<?> clustering = Clustering.make(buffers.subList(0, clusteringSize).toArray(new ByteBuffer[clusteringSize]));
    ColumnMetadata column = metadata.getColumn(buffers.get(clusteringSize));
    // try to load it. Not point if failing in any case, just skip the value.
    if (column == null)
        return null;
    CellPath path = column.isComplex() ? CellPath.create(buffers.get(buffers.size() - 1)) : null;
    int nowInSec = FBUtilities.nowInSeconds();
    ColumnFilter.Builder builder = ColumnFilter.selectionBuilder();
    if (path == null)
        builder.add(column);
    else
        builder.select(column, path);
    ClusteringIndexFilter filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(clustering, metadata.comparator), false);
    SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(metadata, nowInSec, key, builder.build(), filter);
    try (ReadExecutionController controller = cmd.executionController();
        RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) {
        ByteBuffer value = null;
        if (column.isStatic())
            value = iter.staticRow().getCell(column).buffer();
        else if (iter.hasNext())
            value = iter.next().getCell(column).buffer();
        return value;
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) CellPath(org.apache.cassandra.db.rows.CellPath) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ClusteringIndexNamesFilter(org.apache.cassandra.db.filter.ClusteringIndexNamesFilter) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) ByteBuffer(java.nio.ByteBuffer) RowIterator(org.apache.cassandra.db.rows.RowIterator) ClusteringIndexFilter(org.apache.cassandra.db.filter.ClusteringIndexFilter)

Aggregations

ClusteringIndexNamesFilter (org.apache.cassandra.db.filter.ClusteringIndexNamesFilter)4 ByteBuffer (java.nio.ByteBuffer)2 BTreeSet (org.apache.cassandra.utils.btree.BTreeSet)2 ArrayList (java.util.ArrayList)1 ClusteringIndexFilter (org.apache.cassandra.db.filter.ClusteringIndexFilter)1 ClusteringIndexSliceFilter (org.apache.cassandra.db.filter.ClusteringIndexSliceFilter)1 ColumnFilter (org.apache.cassandra.db.filter.ColumnFilter)1 SingletonUnfilteredPartitionIterator (org.apache.cassandra.db.partitions.SingletonUnfilteredPartitionIterator)1 UnfilteredPartitionIterator (org.apache.cassandra.db.partitions.UnfilteredPartitionIterator)1 CellPath (org.apache.cassandra.db.rows.CellPath)1 RowIterator (org.apache.cassandra.db.rows.RowIterator)1 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)1 IndexEntry (org.apache.cassandra.index.internal.IndexEntry)1 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)1 TableMetadata (org.apache.cassandra.schema.TableMetadata)1