Search in sources :

Example 6 with UnfilteredPartitionIterator

use of org.apache.cassandra.db.partitions.UnfilteredPartitionIterator in project cassandra by apache.

the class SASIIndexTest method getPaged.

private static Set<DecoratedKey> getPaged(ColumnFamilyStore store, int pageSize, Expression... expressions) {
    UnfilteredPartitionIterator currentPage;
    Set<DecoratedKey> uniqueKeys = new TreeSet<>();
    DecoratedKey lastKey = null;
    int count;
    do {
        count = 0;
        currentPage = getIndexed(store, ColumnFilter.all(store.metadata()), lastKey, pageSize, expressions);
        if (currentPage == null)
            break;
        while (currentPage.hasNext()) {
            try (UnfilteredRowIterator row = currentPage.next()) {
                uniqueKeys.add(row.partitionKey());
                lastKey = row.partitionKey();
                count++;
            }
        }
        currentPage.close();
    } while (count == pageSize);
    return uniqueKeys;
}
Also used : UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator)

Example 7 with UnfilteredPartitionIterator

use of org.apache.cassandra.db.partitions.UnfilteredPartitionIterator in project cassandra by apache.

the class ReadCommandTest method testSinglePartitionGroupMerge.

@Test
public void testSinglePartitionGroupMerge() throws Exception {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
    String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
    new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
    // Given the data above, when the keys are sorted and the deletions removed, we should
    // get these clustering rows in this order
    String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
    List<ByteBuffer> buffers = new ArrayList<>(groups.length);
    int nowInSeconds = FBUtilities.nowInSeconds();
    ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata()).build();
    RowFilter rowFilter = RowFilter.create();
    Slice slice = Slice.make(ClusteringBound.BOTTOM, ClusteringBound.TOP);
    ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
    for (String[][] group : groups) {
        cfs.truncateBlocking();
        List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
        for (String[] data : group) {
            if (data[0].equals("1")) {
                new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
            } else {
                RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
            }
            commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
        }
        cfs.forceBlockingFlush();
        ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
        try (ReadExecutionController executionController = query.executionController();
            UnfilteredPartitionIterator iter = query.executeLocally(executionController);
            DataOutputBuffer buffer = new DataOutputBuffer()) {
            UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
            buffers.add(buffer.buffer());
        }
    }
    // deserialize, merge and check the results are all there
    List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
    for (ByteBuffer buffer : buffers) {
        try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
            iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, SerializationHelper.Flag.LOCAL));
        }
    }
    try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.mergeAndFilter(iterators, nowInSeconds, new UnfilteredPartitionIterators.MergeListener() {

        public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
            return null;
        }

        public void close() {
        }
    })) {
        int i = 0;
        int numPartitions = 0;
        while (partitionIterator.hasNext()) {
            numPartitions++;
            try (RowIterator rowIterator = partitionIterator.next()) {
                while (rowIterator.hasNext()) {
                    Row row = rowIterator.next();
                    assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
                //System.out.print(row.toString(cfs.metadata, true));
                }
            }
        }
        assertEquals(5, numPartitions);
        assertEquals(expectedRows.length, i);
    }
}
Also used : UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) ArrayList(java.util.ArrayList) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) RowFilter(org.apache.cassandra.db.filter.RowFilter) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) ByteBuffer(java.nio.ByteBuffer) UnfilteredPartitionIterators(org.apache.cassandra.db.partitions.UnfilteredPartitionIterators) DataInputBuffer(org.apache.cassandra.io.util.DataInputBuffer) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) PartitionIterator(org.apache.cassandra.db.partitions.PartitionIterator) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) RowIterator(org.apache.cassandra.db.rows.RowIterator) Row(org.apache.cassandra.db.rows.Row) Test(org.junit.Test)

Example 8 with UnfilteredPartitionIterator

use of org.apache.cassandra.db.partitions.UnfilteredPartitionIterator in project cassandra by apache.

the class CustomIndexTest method notifyIndexersOfPartitionAndRowRemovalDuringCleanup.

@Test
public void notifyIndexersOfPartitionAndRowRemovalDuringCleanup() throws Throwable {
    createTable("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY (k,c))");
    createIndex(String.format("CREATE CUSTOM INDEX cleanup_index ON %%s() USING '%s'", StubIndex.class.getName()));
    ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
    StubIndex index = (StubIndex) cfs.indexManager.getIndexByName("cleanup_index");
    execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 0, 0, 0);
    execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 0, 1, 1);
    execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 0, 2, 2);
    execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", 3, 3, 3);
    assertEquals(4, index.rowsInserted.size());
    assertEquals(0, index.partitionDeletions.size());
    ReadCommand cmd = Util.cmd(cfs, 0).build();
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator iterator = cmd.executeLocally(executionController)) {
        assertTrue(iterator.hasNext());
        cfs.indexManager.deletePartition(iterator.next(), FBUtilities.nowInSeconds());
    }
    assertEquals(1, index.partitionDeletions.size());
    assertEquals(3, index.rowsDeleted.size());
    for (int i = 0; i < 3; i++) assertEquals(index.rowsDeleted.get(i).clustering(), index.rowsInserted.get(i).clustering());
}
Also used : UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) Test(org.junit.Test)

Example 9 with UnfilteredPartitionIterator

use of org.apache.cassandra.db.partitions.UnfilteredPartitionIterator in project cassandra by apache.

the class ReadCommandVerbHandler method doVerb.

public void doVerb(MessageIn<ReadCommand> message, int id) {
    if (StorageService.instance.isBootstrapMode()) {
        throw new RuntimeException("Cannot service reads while bootstrapping!");
    }
    ReadCommand command = message.payload;
    command.setMonitoringTime(message.constructionTime, message.isCrossNode(), message.getTimeout(), message.getSlowQueryTimeout());
    ReadResponse response;
    try (ReadExecutionController executionController = command.executionController();
        UnfilteredPartitionIterator iterator = command.executeLocally(executionController)) {
        response = command.createResponse(iterator);
    }
    if (!command.complete()) {
        Tracing.trace("Discarding partial response to {} (timed out)", message.from);
        MessagingService.instance().incrementDroppedMessages(message, message.getLifetimeInMS());
        return;
    }
    Tracing.trace("Enqueuing response to {}", message.from);
    MessageOut<ReadResponse> reply = new MessageOut<>(MessagingService.Verb.REQUEST_RESPONSE, response, serializer());
    MessagingService.instance().sendReply(reply, id, message.from);
}
Also used : UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) MessageOut(org.apache.cassandra.net.MessageOut)

Example 10 with UnfilteredPartitionIterator

use of org.apache.cassandra.db.partitions.UnfilteredPartitionIterator in project cassandra by apache.

the class CompositesSearcher method queryDataFromIndex.

protected UnfilteredPartitionIterator queryDataFromIndex(final DecoratedKey indexKey, final RowIterator indexHits, final ReadCommand command, final ReadExecutionController executionController) {
    assert indexHits.staticRow() == Rows.EMPTY_STATIC_ROW;
    return new UnfilteredPartitionIterator() {

        private IndexEntry nextEntry;

        private UnfilteredRowIterator next;

        public TableMetadata metadata() {
            return command.metadata();
        }

        public boolean hasNext() {
            return prepareNext();
        }

        public UnfilteredRowIterator next() {
            if (next == null)
                prepareNext();
            UnfilteredRowIterator toReturn = next;
            next = null;
            return toReturn;
        }

        private boolean prepareNext() {
            while (true) {
                if (next != null)
                    return true;
                if (nextEntry == null) {
                    if (!indexHits.hasNext())
                        return false;
                    nextEntry = index.decodeEntry(indexKey, indexHits.next());
                }
                SinglePartitionReadCommand dataCmd;
                DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
                List<IndexEntry> entries = new ArrayList<>();
                if (isStaticColumn()) {
                    // If the index is on a static column, we just need to do a full read on the partition.
                    // Note that we want to re-use the command.columnFilter() in case of future change.
                    dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata(), command.nowInSec(), command.columnFilter(), RowFilter.NONE, DataLimits.NONE, partitionKey, new ClusteringIndexSliceFilter(Slices.ALL, false));
                    entries.add(nextEntry);
                    nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
                } else {
                    // Gather all index hits belonging to the same partition and query the data for those hits.
                    // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
                    // 1 read per index hit. However, this basically mean materializing all hits for a partition
                    // in memory so we should consider adding some paging mechanism. However, index hits should
                    // be relatively small so it's much better than the previous code that was materializing all
                    // *data* for a given partition.
                    BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
                    while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey)) {
                        // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
                        if (isMatchingEntry(partitionKey, nextEntry, command)) {
                            clusterings.add(nextEntry.indexedEntryClustering);
                            entries.add(nextEntry);
                        }
                        nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
                    }
                    // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
                    if (clusterings.isEmpty())
                        continue;
                    // Query the gathered index hits. We still need to filter stale hits from the resulting query.
                    ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
                    dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata(), command.nowInSec(), command.columnFilter(), command.rowFilter(), DataLimits.NONE, partitionKey, filter);
                }
                // We close right away if empty, and if it's assign to next it will be called either
                @SuppressWarnings("resource") UnfilteredRowIterator // by the next caller of next, or through closing this iterator is this come before.
                dataIter = filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs, executionController), indexKey.getKey(), entries, executionController.writeOpOrderGroup(), command.nowInSec());
                if (dataIter.isEmpty()) {
                    dataIter.close();
                    continue;
                }
                next = dataIter;
                return true;
            }
        }

        public void remove() {
            throw new UnsupportedOperationException();
        }

        public void close() {
            indexHits.close();
            if (next != null)
                next.close();
        }
    };
}
Also used : ArrayList(java.util.ArrayList) IndexEntry(org.apache.cassandra.index.internal.IndexEntry) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) ClusteringIndexNamesFilter(org.apache.cassandra.db.filter.ClusteringIndexNamesFilter) ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) BTreeSet(org.apache.cassandra.utils.btree.BTreeSet)

Aggregations

UnfilteredPartitionIterator (org.apache.cassandra.db.partitions.UnfilteredPartitionIterator)10 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)4 Test (org.junit.Test)4 ClusteringIndexSliceFilter (org.apache.cassandra.db.filter.ClusteringIndexSliceFilter)3 ColumnFilter (org.apache.cassandra.db.filter.ColumnFilter)3 ArrayList (java.util.ArrayList)2 AbstractRow (org.apache.cassandra.db.rows.AbstractRow)2 DataInputBuffer (org.apache.cassandra.io.util.DataInputBuffer)2 DataOutputBuffer (org.apache.cassandra.io.util.DataOutputBuffer)2 ByteBuffer (java.nio.ByteBuffer)1 ClusteringIndexNamesFilter (org.apache.cassandra.db.filter.ClusteringIndexNamesFilter)1 RowFilter (org.apache.cassandra.db.filter.RowFilter)1 PartitionIterator (org.apache.cassandra.db.partitions.PartitionIterator)1 UnfilteredPartitionIterators (org.apache.cassandra.db.partitions.UnfilteredPartitionIterators)1 Row (org.apache.cassandra.db.rows.Row)1 RowIterator (org.apache.cassandra.db.rows.RowIterator)1 IndexEntry (org.apache.cassandra.index.internal.IndexEntry)1 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)1 DataInputPlus (org.apache.cassandra.io.util.DataInputPlus)1 MessageOut (org.apache.cassandra.net.MessageOut)1