use of org.apache.cassandra.db.filter.ClusteringIndexSliceFilter in project cassandra by apache.
the class SinglePartitionSliceCommandTest method staticColumnsAreReturned.
@Test
public void staticColumnsAreReturned() throws IOException {
DecoratedKey key = metadata.partitioner.decorateKey(ByteBufferUtil.bytes("k1"));
QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, s) VALUES ('k1', 's')");
Assert.assertFalse(QueryProcessor.executeInternal("SELECT s FROM ks.tbl WHERE k='k1'").isEmpty());
ColumnFilter columnFilter = ColumnFilter.selection(RegularAndStaticColumns.of(s));
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.NONE, false);
ReadCommand cmd = new SinglePartitionReadCommand(false, MessagingService.VERSION_30, metadata, FBUtilities.nowInSeconds(), columnFilter, RowFilter.NONE, DataLimits.NONE, key, sliceFilter);
// check raw iterator for static cell
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
checkForS(pi);
}
ReadResponse response;
DataOutputBuffer out;
DataInputPlus in;
ReadResponse dst;
// check (de)serialized iterator for memtable static cell
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
response = ReadResponse.createDataResponse(pi, cmd);
}
out = new DataOutputBuffer((int) ReadResponse.serializer.serializedSize(response, MessagingService.VERSION_30));
ReadResponse.serializer.serialize(response, out, MessagingService.VERSION_30);
in = new DataInputBuffer(out.buffer(), true);
dst = ReadResponse.serializer.deserialize(in, MessagingService.VERSION_30);
try (UnfilteredPartitionIterator pi = dst.makeIterator(cmd)) {
checkForS(pi);
}
// check (de)serialized iterator for sstable static cell
Schema.instance.getColumnFamilyStoreInstance(metadata.id).forceBlockingFlush();
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
response = ReadResponse.createDataResponse(pi, cmd);
}
out = new DataOutputBuffer((int) ReadResponse.serializer.serializedSize(response, MessagingService.VERSION_30));
ReadResponse.serializer.serialize(response, out, MessagingService.VERSION_30);
in = new DataInputBuffer(out.buffer(), true);
dst = ReadResponse.serializer.deserialize(in, MessagingService.VERSION_30);
try (UnfilteredPartitionIterator pi = dst.makeIterator(cmd)) {
checkForS(pi);
}
}
use of org.apache.cassandra.db.filter.ClusteringIndexSliceFilter in project cassandra by apache.
the class CassandraIndexTest method assertIndexRowTtl.
// this is slightly annoying, but we cannot read rows from the methods in Util as
// ReadCommand#executeInternal uses metadata retrieved via the tableId, which the index
// CFS inherits from the base CFS. This has the 'wrong' partitioner (the index table
// uses LocalPartition, the base table a real one, so we cannot read from the index
// table with executeInternal
private void assertIndexRowTtl(ColumnFamilyStore indexCfs, int indexedValue, int ttl) throws Throwable {
DecoratedKey indexKey = indexCfs.decorateKey(ByteBufferUtil.bytes(indexedValue));
ClusteringIndexFilter filter = new ClusteringIndexSliceFilter(Slices.with(indexCfs.metadata().comparator, Slice.ALL), false);
SinglePartitionReadCommand command = SinglePartitionReadCommand.create(indexCfs.metadata(), FBUtilities.nowInSeconds(), indexKey, ColumnFilter.all(indexCfs.metadata()), filter);
try (ReadExecutionController executionController = command.executionController();
UnfilteredRowIterator iter = command.queryMemtableAndDisk(indexCfs, executionController)) {
while (iter.hasNext()) {
Unfiltered unfiltered = iter.next();
assert (unfiltered.isRow());
Row indexRow = (Row) unfiltered;
assertEquals(ttl, indexRow.primaryKeyLivenessInfo().ttl());
}
}
}
use of org.apache.cassandra.db.filter.ClusteringIndexSliceFilter in project cassandra by apache.
the class ReadCommandTest method testSinglePartitionGroupMerge.
@Test
public void testSinglePartitionGroupMerge() throws Exception {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
// Given the data above, when the keys are sorted and the deletions removed, we should
// get these clustering rows in this order
String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
List<ByteBuffer> buffers = new ArrayList<>(groups.length);
int nowInSeconds = FBUtilities.nowInSeconds();
ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata()).build();
RowFilter rowFilter = RowFilter.create();
Slice slice = Slice.make(ClusteringBound.BOTTOM, ClusteringBound.TOP);
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
for (String[][] group : groups) {
cfs.truncateBlocking();
List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
for (String[] data : group) {
if (data[0].equals("1")) {
new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
} else {
RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
}
commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
}
cfs.forceBlockingFlush();
ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
try (ReadExecutionController executionController = query.executionController();
UnfilteredPartitionIterator iter = query.executeLocally(executionController);
DataOutputBuffer buffer = new DataOutputBuffer()) {
UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
buffers.add(buffer.buffer());
}
}
// deserialize, merge and check the results are all there
List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
for (ByteBuffer buffer : buffers) {
try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, SerializationHelper.Flag.LOCAL));
}
}
try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.mergeAndFilter(iterators, nowInSeconds, new UnfilteredPartitionIterators.MergeListener() {
public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
return null;
}
public void close() {
}
})) {
int i = 0;
int numPartitions = 0;
while (partitionIterator.hasNext()) {
numPartitions++;
try (RowIterator rowIterator = partitionIterator.next()) {
while (rowIterator.hasNext()) {
Row row = rowIterator.next();
assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
//System.out.print(row.toString(cfs.metadata, true));
}
}
}
assertEquals(5, numPartitions);
assertEquals(expectedRows.length, i);
}
}
use of org.apache.cassandra.db.filter.ClusteringIndexSliceFilter in project cassandra by apache.
the class SinglePartitionSliceCommandTest method toCQLStringIsSafeToCall.
@Test
public void toCQLStringIsSafeToCall() throws IOException {
DecoratedKey key = metadata.partitioner.decorateKey(ByteBufferUtil.bytes("k1"));
ColumnFilter columnFilter = ColumnFilter.selection(RegularAndStaticColumns.of(s));
Slice slice = Slice.make(ClusteringBound.BOTTOM, ClusteringBound.inclusiveEndOf(ByteBufferUtil.bytes("i1")));
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(metadata.comparator, slice), false);
ReadCommand cmd = new SinglePartitionReadCommand(false, MessagingService.VERSION_30, metadata, FBUtilities.nowInSeconds(), columnFilter, RowFilter.NONE, DataLimits.NONE, key, sliceFilter);
String ret = cmd.toCQLString();
Assert.assertNotNull(ret);
Assert.assertFalse(ret.isEmpty());
}
use of org.apache.cassandra.db.filter.ClusteringIndexSliceFilter in project cassandra by apache.
the class SSTableScannerTest method dataRange.
private static DataRange dataRange(TableMetadata metadata, PartitionPosition start, boolean startInclusive, PartitionPosition end, boolean endInclusive) {
Slices.Builder sb = new Slices.Builder(metadata.comparator);
ClusteringIndexSliceFilter filter = new ClusteringIndexSliceFilter(sb.build(), false);
return new DataRange(AbstractBounds.bounds(start, startInclusive, end, endInclusive), filter);
}
Aggregations