use of org.apache.cassandra.db.rows.Unfiltered in project cassandra by apache.
the class JsonTransformer method serializePartition.
private void serializePartition(UnfilteredRowIterator partition) {
try {
json.writeStartObject();
json.writeFieldName("partition");
json.writeStartObject();
json.writeFieldName("key");
serializePartitionKey(partition.partitionKey());
json.writeNumberField("position", this.currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive())
serializeDeletion(partition.partitionLevelDeletion());
json.writeEndObject();
if (partition.hasNext() || partition.staticRow() != null) {
json.writeFieldName("rows");
json.writeStartArray();
updatePosition();
if (!partition.staticRow().isEmpty())
serializeRow(partition.staticRow());
Unfiltered unfiltered;
updatePosition();
while (partition.hasNext()) {
unfiltered = partition.next();
if (unfiltered instanceof Row) {
serializeRow((Row) unfiltered);
} else if (unfiltered instanceof RangeTombstoneMarker) {
serializeTombstone((RangeTombstoneMarker) unfiltered);
}
updatePosition();
}
json.writeEndArray();
json.writeEndObject();
}
} catch (IOException e) {
String key = metadata.partitionKeyType.getString(partition.partitionKey().getKey());
logger.error("Fatal error parsing partition: {}", key, e);
}
}
use of org.apache.cassandra.db.rows.Unfiltered in project cassandra by apache.
the class CassandraIndexTest method assertIndexRowTtl.
// this is slightly annoying, but we cannot read rows from the methods in Util as
// ReadCommand#executeInternal uses metadata retrieved via the tableId, which the index
// CFS inherits from the base CFS. This has the 'wrong' partitioner (the index table
// uses LocalPartition, the base table a real one, so we cannot read from the index
// table with executeInternal
private void assertIndexRowTtl(ColumnFamilyStore indexCfs, int indexedValue, int ttl) throws Throwable {
DecoratedKey indexKey = indexCfs.decorateKey(ByteBufferUtil.bytes(indexedValue));
ClusteringIndexFilter filter = new ClusteringIndexSliceFilter(Slices.with(indexCfs.metadata().comparator, Slice.ALL), false);
SinglePartitionReadCommand command = SinglePartitionReadCommand.create(indexCfs.metadata(), FBUtilities.nowInSeconds(), indexKey, ColumnFilter.all(indexCfs.metadata()), filter);
try (ReadExecutionController executionController = command.executionController();
UnfilteredRowIterator iter = command.queryMemtableAndDisk(indexCfs, executionController)) {
while (iter.hasNext()) {
Unfiltered unfiltered = iter.next();
assert (unfiltered.isRow());
Row indexRow = (Row) unfiltered;
assertEquals(ttl, indexRow.primaryKeyLivenessInfo().ttl());
}
}
}
use of org.apache.cassandra.db.rows.Unfiltered in project cassandra by apache.
the class NeverPurgeTest method verifyContainsTombstones.
private void verifyContainsTombstones(Collection<SSTableReader> sstables, int expectedTombstoneCount) throws Exception {
// always run a major compaction before calling this
assertTrue(sstables.size() == 1);
SSTableReader sstable = sstables.iterator().next();
int tombstoneCount = 0;
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
try (UnfilteredRowIterator iter = scanner.next()) {
if (!iter.partitionLevelDeletion().isLive())
tombstoneCount++;
while (iter.hasNext()) {
Unfiltered atom = iter.next();
if (atom.isRow()) {
Row r = (Row) atom;
if (!r.deletion().isLive())
tombstoneCount++;
for (Cell c : r.cells()) if (c.isTombstone())
tombstoneCount++;
}
}
}
}
}
assertEquals(tombstoneCount, expectedTombstoneCount);
}
Aggregations