use of org.apache.cassandra.db.rows.RowIterator in project cassandra by apache.
the class BatchStatement method executeWithConditions.
private ResultMessage executeWithConditions(BatchQueryOptions options, QueryState state, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException {
Pair<CQL3CasRequest, Set<ColumnMetadata>> p = makeCasRequest(options, state);
CQL3CasRequest casRequest = p.left;
Set<ColumnMetadata> columnsWithConditions = p.right;
String ksName = casRequest.metadata.keyspace;
String tableName = casRequest.metadata.name;
try (RowIterator result = StorageProxy.cas(ksName, tableName, casRequest.key, casRequest, options.getSerialConsistency(), options.getConsistency(), state.getClientState(), queryStartNanoTime)) {
return new ResultMessage.Rows(ModificationStatement.buildCasResultSet(ksName, tableName, result, columnsWithConditions, true, options.forStatement(0)));
}
}
use of org.apache.cassandra.db.rows.RowIterator in project cassandra by apache.
the class ReadCommandTest method testSinglePartitionGroupMerge.
@Test
public void testSinglePartitionGroupMerge() throws Exception {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
// Given the data above, when the keys are sorted and the deletions removed, we should
// get these clustering rows in this order
String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
List<ByteBuffer> buffers = new ArrayList<>(groups.length);
int nowInSeconds = FBUtilities.nowInSeconds();
ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata()).build();
RowFilter rowFilter = RowFilter.create();
Slice slice = Slice.make(ClusteringBound.BOTTOM, ClusteringBound.TOP);
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
for (String[][] group : groups) {
cfs.truncateBlocking();
List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
for (String[] data : group) {
if (data[0].equals("1")) {
new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
} else {
RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
}
commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
}
cfs.forceBlockingFlush();
ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
try (ReadExecutionController executionController = query.executionController();
UnfilteredPartitionIterator iter = query.executeLocally(executionController);
DataOutputBuffer buffer = new DataOutputBuffer()) {
UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
buffers.add(buffer.buffer());
}
}
// deserialize, merge and check the results are all there
List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
for (ByteBuffer buffer : buffers) {
try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, SerializationHelper.Flag.LOCAL));
}
}
try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.mergeAndFilter(iterators, nowInSeconds, new UnfilteredPartitionIterators.MergeListener() {
public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
return null;
}
public void close() {
}
})) {
int i = 0;
int numPartitions = 0;
while (partitionIterator.hasNext()) {
numPartitions++;
try (RowIterator rowIterator = partitionIterator.next()) {
while (rowIterator.hasNext()) {
Row row = rowIterator.next();
assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
//System.out.print(row.toString(cfs.metadata, true));
}
}
}
assertEquals(5, numPartitions);
assertEquals(expectedRows.length, i);
}
}
use of org.apache.cassandra.db.rows.RowIterator in project cassandra by apache.
the class QueryPagerTest method query.
private static List<FilteredPartition> query(QueryPager pager, int toQuery, int expectedSize) {
StringBuilder sb = new StringBuilder();
List<FilteredPartition> partitionList = new ArrayList<>();
int rows = 0;
try (ReadExecutionController executionController = pager.executionController();
PartitionIterator iterator = pager.fetchPageInternal(toQuery, executionController)) {
while (iterator.hasNext()) {
try (RowIterator rowIter = iterator.next()) {
FilteredPartition partition = FilteredPartition.create(rowIter);
sb.append(partition);
partitionList.add(partition);
rows += partition.rowCount();
}
}
}
assertEquals(sb.toString(), expectedSize, rows);
return partitionList;
}
use of org.apache.cassandra.db.rows.RowIterator in project cassandra by apache.
the class QueryPagerTest method queryAndVerifyCells.
private void queryAndVerifyCells(TableMetadata table, boolean reversed, String key) throws Exception {
ClusteringIndexFilter rowfilter = new ClusteringIndexSliceFilter(Slices.ALL, reversed);
ReadCommand command = SinglePartitionReadCommand.create(table, nowInSec, Util.dk(key), ColumnFilter.all(table), rowfilter);
QueryPager pager = command.getPager(null, ProtocolVersion.CURRENT);
ColumnMetadata staticColumn = table.staticColumns().getSimple(0);
assertEquals(staticColumn.name.toCQLString(), "st");
for (int i = 0; i < 5; i++) {
try (ReadExecutionController controller = pager.executionController();
PartitionIterator partitions = pager.fetchPageInternal(1, controller)) {
try (RowIterator partition = partitions.next()) {
assertCell(partition.staticRow(), staticColumn, 4);
Row row = partition.next();
int cellIndex = !reversed ? i : 4 - i;
assertEquals(row.clustering().get(0), ByteBufferUtil.bytes(cellIndex));
assertCell(row, table.getColumn(new ColumnIdentifier("v1", false)), cellIndex);
assertCell(row, table.getColumn(new ColumnIdentifier("v2", false)), cellIndex);
// the partition/page should contain just a single regular row
assertFalse(partition.hasNext());
}
}
}
// After processing the 5 rows there should be no more rows to return
try (ReadExecutionController controller = pager.executionController();
PartitionIterator partitions = pager.fetchPageInternal(1, controller)) {
assertFalse(partitions.hasNext());
}
}
use of org.apache.cassandra.db.rows.RowIterator in project cassandra by apache.
the class KeyspaceTest method assertRowsInSlice.
private static void assertRowsInSlice(ColumnFamilyStore cfs, String key, int sliceStart, int sliceEnd, int limit, boolean reversed, String columnValuePrefix) {
Clustering startClustering = Clustering.make(ByteBufferUtil.bytes(sliceStart));
Clustering endClustering = Clustering.make(ByteBufferUtil.bytes(sliceEnd));
Slices slices = Slices.with(cfs.getComparator(), Slice.make(startClustering, endClustering));
ClusteringIndexSliceFilter filter = new ClusteringIndexSliceFilter(slices, reversed);
SinglePartitionReadCommand command = singlePartitionSlice(cfs, key, filter, limit);
try (ReadExecutionController executionController = command.executionController();
PartitionIterator iterator = command.executeInternal(executionController)) {
try (RowIterator rowIterator = iterator.next()) {
if (reversed) {
for (int i = sliceEnd; i >= sliceStart; i--) {
Row row = rowIterator.next();
Cell cell = row.getCell(cfs.metadata().getColumn(new ColumnIdentifier("c", false)));
assertEquals(ByteBufferUtil.bytes(columnValuePrefix + i), cell.value());
}
} else {
for (int i = sliceStart; i <= sliceEnd; i++) {
Row row = rowIterator.next();
Cell cell = row.getCell(cfs.metadata().getColumn(new ColumnIdentifier("c", false)));
assertEquals(ByteBufferUtil.bytes(columnValuePrefix + i), cell.value());
}
}
assertFalse(rowIterator.hasNext());
}
}
}
Aggregations