Search in sources :

Example 6 with RowFilter

use of org.apache.cassandra.db.filter.RowFilter in project cassandra by apache.

the class ReadCommandTest method testCountDeletedRows.

@Test
public void testCountDeletedRows() throws Exception {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF4);
    String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the
    new String[] { "1", "key1", "aa", "a" }, // row
    new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "1", "key2", "aa", "a" }, new String[] { "1", "key2", "cc", "c" }, new String[] { "1", "key2", "dd", "d" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" }, new String[] { "-1", "key2", "ee", "e" }, new String[] { "-1", "key2", "aa", "a" }, new String[] { "-1", "key2", "cc", "c" }, new String[] { "-1", "key2", "dd", "d" } } };
    List<ByteBuffer> buffers = new ArrayList<>(groups.length);
    int nowInSeconds = FBUtilities.nowInSeconds();
    ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata(), false).build();
    RowFilter rowFilter = RowFilter.create();
    Slice slice = Slice.make(BufferClusteringBound.BOTTOM, BufferClusteringBound.TOP);
    ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
    for (String[][] group : groups) {
        cfs.truncateBlocking();
        List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
        for (String[] data : group) {
            if (data[0].equals("1")) {
                new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
            } else {
                RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
            }
            commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
        }
        cfs.forceBlockingFlush();
        ReadQuery query = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
        try (ReadExecutionController executionController = query.executionController();
            UnfilteredPartitionIterator iter = query.executeLocally(executionController);
            DataOutputBuffer buffer = new DataOutputBuffer()) {
            UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
            buffers.add(buffer.buffer());
        }
    }
    assertEquals(5, cfs.metric.tombstoneScannedHistogram.cf.getSnapshot().getMax());
}
Also used : ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) ByteBuffer(java.nio.ByteBuffer) ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) RowFilter(org.apache.cassandra.db.filter.RowFilter) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) Test(org.junit.Test)

Example 7 with RowFilter

use of org.apache.cassandra.db.filter.RowFilter in project cassandra by apache.

the class ReadCommandTest method testSinglePartitionGroupMerge.

@Test
public void testSinglePartitionGroupMerge() throws Exception {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
    String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
    new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
    // Given the data above, when the keys are sorted and the deletions removed, we should
    // get these clustering rows in this order
    String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
    List<ByteBuffer> buffers = new ArrayList<>(groups.length);
    int nowInSeconds = FBUtilities.nowInSeconds();
    ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata(), false).build();
    RowFilter rowFilter = RowFilter.create();
    Slice slice = Slice.make(BufferClusteringBound.BOTTOM, BufferClusteringBound.TOP);
    ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
    for (String[][] group : groups) {
        cfs.truncateBlocking();
        List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
        for (String[] data : group) {
            if (data[0].equals("1")) {
                new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
            } else {
                RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
            }
            commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
        }
        cfs.forceBlockingFlush();
        ReadQuery query = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
        try (ReadExecutionController executionController = query.executionController();
            UnfilteredPartitionIterator iter = query.executeLocally(executionController);
            DataOutputBuffer buffer = new DataOutputBuffer()) {
            UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
            buffers.add(buffer.buffer());
        }
    }
    // deserialize, merge and check the results are all there
    List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
    for (ByteBuffer buffer : buffers) {
        try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
            iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, DeserializationHelper.Flag.LOCAL));
        }
    }
    UnfilteredPartitionIterators.MergeListener listener = new UnfilteredPartitionIterators.MergeListener() {

        public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
            return null;
        }

        public void close() {
        }
    };
    try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.filter(UnfilteredPartitionIterators.merge(iterators, listener), nowInSeconds)) {
        int i = 0;
        int numPartitions = 0;
        while (partitionIterator.hasNext()) {
            numPartitions++;
            try (RowIterator rowIterator = partitionIterator.next()) {
                while (rowIterator.hasNext()) {
                    Row row = rowIterator.next();
                    assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
                // System.out.print(row.toString(cfs.metadata, true));
                }
            }
        }
        assertEquals(5, numPartitions);
        assertEquals(expectedRows.length, i);
    }
}
Also used : ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) RowFilter(org.apache.cassandra.db.filter.RowFilter) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) UnfilteredRowIterators(org.apache.cassandra.db.rows.UnfilteredRowIterators) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) ByteBuffer(java.nio.ByteBuffer) DataInputBuffer(org.apache.cassandra.io.util.DataInputBuffer) RowIterator(org.apache.cassandra.db.rows.RowIterator) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) Row(org.apache.cassandra.db.rows.Row) Test(org.junit.Test)

Example 8 with RowFilter

use of org.apache.cassandra.db.filter.RowFilter in project cassandra by apache.

the class CleanupTest method testCleanupWithIndexes.

@Test
public void testCleanupWithIndexes() throws IOException, ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_INDEXED1);
    // insert data and verify we get it back w/ range query
    fillCF(cfs, "birthdate", LOOPS);
    assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
    ColumnMetadata cdef = cfs.metadata().getColumn(COLUMN);
    String indexName = "birthdate_key_index";
    long start = nanoTime();
    while (!cfs.getBuiltIndexes().contains(indexName) && nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) Thread.sleep(10);
    RowFilter cf = RowFilter.create();
    cf.add(cdef, Operator.EQ, VALUE);
    assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).filterOn("birthdate", Operator.EQ, VALUE).build()).size());
    // we don't allow cleanup when the local host has no range to avoid wipping up all data when a node has not join the ring.
    // So to make sure cleanup erase everything here, we give the localhost the tiniest possible range.
    TokenMetadata tmd = StorageService.instance.getTokenMetadata();
    byte[] tk1 = new byte[1], tk2 = new byte[1];
    tk1[0] = 2;
    tk2[0] = 1;
    tmd.updateNormalToken(new BytesToken(tk1), InetAddressAndPort.getByName("127.0.0.1"));
    tmd.updateNormalToken(new BytesToken(tk2), InetAddressAndPort.getByName("127.0.0.2"));
    CompactionManager.instance.performCleanup(cfs, 2);
    // row data should be gone
    assertEquals(0, Util.getAll(Util.cmd(cfs).build()).size());
    // not only should it be gone but there should be no data on disk, not even tombstones
    assert cfs.getLiveSSTables().isEmpty();
    // 2ary indexes should result in no results, too (although tombstones won't be gone until compacted)
    assertEquals(0, Util.getAll(Util.cmd(cfs).filterOn("birthdate", Operator.EQ, VALUE).build()).size());
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) RowFilter(org.apache.cassandra.db.filter.RowFilter) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) TokenMetadata(org.apache.cassandra.locator.TokenMetadata) Test(org.junit.Test)

Example 9 with RowFilter

use of org.apache.cassandra.db.filter.RowFilter in project cassandra by apache.

the class SASIIndexTest method getIndexed.

private static UnfilteredPartitionIterator getIndexed(ColumnFamilyStore store, ColumnFilter columnFilter, DecoratedKey startKey, int maxResults, Expression... expressions) {
    DataRange range = (startKey == null) ? DataRange.allData(PARTITIONER) : DataRange.forKeyRange(new Range<>(startKey, PARTITIONER.getMinimumToken().maxKeyBound()));
    RowFilter filter = RowFilter.create();
    for (Expression e : expressions) filter.add(store.metadata().getColumn(e.name), e.op, e.value);
    ReadCommand command = PartitionRangeReadCommand.create(store.metadata(), FBUtilities.nowInSeconds(), columnFilter, filter, DataLimits.cqlLimits(maxResults), range);
    return command.executeLocally(command.executionController());
}
Also used : RowFilter(org.apache.cassandra.db.filter.RowFilter) Range(org.apache.cassandra.dht.Range)

Aggregations

RowFilter (org.apache.cassandra.db.filter.RowFilter)9 ByteBuffer (java.nio.ByteBuffer)4 Test (org.junit.Test)4 ClusteringIndexSliceFilter (org.apache.cassandra.db.filter.ClusteringIndexSliceFilter)3 ColumnFilter (org.apache.cassandra.db.filter.ColumnFilter)3 CassandraIndex (org.apache.cassandra.index.internal.CassandraIndex)3 DataOutputBuffer (org.apache.cassandra.io.util.DataOutputBuffer)3 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)2 Joiner (com.google.common.base.Joiner)1 Strings (com.google.common.base.Strings)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 Iterables (com.google.common.collect.Iterables)1 Maps (com.google.common.collect.Maps)1 Longs (com.google.common.primitives.Longs)1 Futures (com.google.common.util.concurrent.Futures)1 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)1 IOException (java.io.IOException)1 Constructor (java.lang.reflect.Constructor)1 java.util (java.util)1 java.util.concurrent (java.util.concurrent)1