Search in sources :

Example 1 with Unfiltered

use of org.apache.cassandra.db.rows.Unfiltered in project cassandra by apache.

the class JsonTransformer method serializePartition.

private void serializePartition(UnfilteredRowIterator partition) {
    try {
        json.writeStartObject();
        json.writeFieldName("partition");
        json.writeStartObject();
        json.writeFieldName("key");
        serializePartitionKey(partition.partitionKey());
        json.writeNumberField("position", this.currentScanner.getCurrentPosition());
        if (!partition.partitionLevelDeletion().isLive())
            serializeDeletion(partition.partitionLevelDeletion());
        json.writeEndObject();
        if (partition.hasNext() || partition.staticRow() != null) {
            json.writeFieldName("rows");
            json.writeStartArray();
            updatePosition();
            if (!partition.staticRow().isEmpty())
                serializeRow(partition.staticRow());
            Unfiltered unfiltered;
            updatePosition();
            while (partition.hasNext()) {
                unfiltered = partition.next();
                if (unfiltered instanceof Row) {
                    serializeRow((Row) unfiltered);
                } else if (unfiltered instanceof RangeTombstoneMarker) {
                    serializeTombstone((RangeTombstoneMarker) unfiltered);
                }
                updatePosition();
            }
            json.writeEndArray();
            json.writeEndObject();
        }
    } catch (IOException e) {
        String key = metadata.partitionKeyType.getString(partition.partitionKey().getKey());
        logger.error("Fatal error parsing partition: {}", key, e);
    }
}
Also used : RangeTombstoneMarker(org.apache.cassandra.db.rows.RangeTombstoneMarker) Row(org.apache.cassandra.db.rows.Row) IOException(java.io.IOException) Unfiltered(org.apache.cassandra.db.rows.Unfiltered)

Example 2 with Unfiltered

use of org.apache.cassandra.db.rows.Unfiltered in project cassandra by apache.

the class CassandraIndexTest method assertIndexRowTtl.

// this is slightly annoying, but we cannot read rows from the methods in Util as
// ReadCommand#executeInternal uses metadata retrieved via the tableId, which the index
// CFS inherits from the base CFS. This has the 'wrong' partitioner (the index table
// uses LocalPartition, the base table a real one, so we cannot read from the index
// table with executeInternal
private void assertIndexRowTtl(ColumnFamilyStore indexCfs, int indexedValue, int ttl) throws Throwable {
    DecoratedKey indexKey = indexCfs.decorateKey(ByteBufferUtil.bytes(indexedValue));
    ClusteringIndexFilter filter = new ClusteringIndexSliceFilter(Slices.with(indexCfs.metadata().comparator, Slice.ALL), false);
    SinglePartitionReadCommand command = SinglePartitionReadCommand.create(indexCfs.metadata(), FBUtilities.nowInSeconds(), indexKey, ColumnFilter.all(indexCfs.metadata()), filter);
    try (ReadExecutionController executionController = command.executionController();
        UnfilteredRowIterator iter = command.queryMemtableAndDisk(indexCfs, executionController)) {
        while (iter.hasNext()) {
            Unfiltered unfiltered = iter.next();
            assert (unfiltered.isRow());
            Row indexRow = (Row) unfiltered;
            assertEquals(ttl, indexRow.primaryKeyLivenessInfo().ttl());
        }
    }
}
Also used : ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) Row(org.apache.cassandra.db.rows.Row) ClusteringIndexFilter(org.apache.cassandra.db.filter.ClusteringIndexFilter) Unfiltered(org.apache.cassandra.db.rows.Unfiltered)

Example 3 with Unfiltered

use of org.apache.cassandra.db.rows.Unfiltered in project cassandra by apache.

the class NeverPurgeTest method verifyContainsTombstones.

private void verifyContainsTombstones(Collection<SSTableReader> sstables, int expectedTombstoneCount) throws Exception {
    // always run a major compaction before calling this
    assertTrue(sstables.size() == 1);
    SSTableReader sstable = sstables.iterator().next();
    int tombstoneCount = 0;
    try (ISSTableScanner scanner = sstable.getScanner()) {
        while (scanner.hasNext()) {
            try (UnfilteredRowIterator iter = scanner.next()) {
                if (!iter.partitionLevelDeletion().isLive())
                    tombstoneCount++;
                while (iter.hasNext()) {
                    Unfiltered atom = iter.next();
                    if (atom.isRow()) {
                        Row r = (Row) atom;
                        if (!r.deletion().isLive())
                            tombstoneCount++;
                        for (Cell c : r.cells()) if (c.isTombstone())
                            tombstoneCount++;
                    }
                }
            }
        }
    }
    assertEquals(tombstoneCount, expectedTombstoneCount);
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Row(org.apache.cassandra.db.rows.Row) Cell(org.apache.cassandra.db.rows.Cell) Unfiltered(org.apache.cassandra.db.rows.Unfiltered)

Aggregations

Row (org.apache.cassandra.db.rows.Row)3 Unfiltered (org.apache.cassandra.db.rows.Unfiltered)3 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)2 IOException (java.io.IOException)1 ClusteringIndexFilter (org.apache.cassandra.db.filter.ClusteringIndexFilter)1 ClusteringIndexSliceFilter (org.apache.cassandra.db.filter.ClusteringIndexSliceFilter)1 Cell (org.apache.cassandra.db.rows.Cell)1 RangeTombstoneMarker (org.apache.cassandra.db.rows.RangeTombstoneMarker)1 ISSTableScanner (org.apache.cassandra.io.sstable.ISSTableScanner)1 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)1