Search in sources :

Example 1 with Row

use of org.apache.cassandra.db.rows.Row in project cassandra by apache.

the class Operation method localSatisfiedBy.

/**
     * Check every expression in the analyzed list to figure out if the
     * columns in the give row match all of the based on the operation
     * set to the current operation node.
     *
     * The algorithm is as follows: for every given expression from analyzed
     * list get corresponding column from the Row:
     *   - apply {@link Expression#isSatisfiedBy(ByteBuffer)}
     *     method to figure out if it's satisfied;
     *   - apply logical operation between boolean accumulator and current boolean result;
     *   - if result == false and node's operation is AND return right away;
     *
     * After all of the expressions have been evaluated return resulting accumulator variable.
     *
     * Example:
     *
     * Operation = (op: AND, columns: [first_name = p, 5 < age < 7, last_name: y])
     * Row = (first_name: pavel, last_name: y, age: 6, timestamp: 15)
     *
     * #1 get "first_name" = p (expressions)
     *      - row-get "first_name"                      => "pavel"
     *      - compare "pavel" against "p"               => true (current)
     *      - set accumulator current                   => true (because this is expression #1)
     *
     * #2 get "last_name" = y (expressions)
     *      - row-get "last_name"                       => "y"
     *      - compare "y" against "y"                   => true (current)
     *      - set accumulator to accumulator & current  => true
     *
     * #3 get 5 < "age" < 7 (expressions)
     *      - row-get "age"                             => "6"
     *      - compare 5 < 6 < 7                         => true (current)
     *      - set accumulator to accumulator & current  => true
     *
     * #4 return accumulator => true (row satisfied all of the conditions)
     *
     * @param currentCluster The row cluster to check.
     * @param staticRow The static row associated with current cluster.
     * @param allowMissingColumns allow columns value to be null.
     * @return true if give Row satisfied all of the analyzed expressions,
     *         false otherwise.
     */
private boolean localSatisfiedBy(Unfiltered currentCluster, Row staticRow, boolean allowMissingColumns) {
    if (currentCluster == null || !currentCluster.isRow())
        return false;
    final int now = FBUtilities.nowInSeconds();
    boolean result = false;
    int idx = 0;
    for (ColumnMetadata column : expressions.keySet()) {
        if (column.kind == Kind.PARTITION_KEY)
            continue;
        ByteBuffer value = ColumnIndex.getValueOf(column, column.kind == Kind.STATIC ? staticRow : (Row) currentCluster, now);
        boolean isMissingColumn = value == null;
        if (!allowMissingColumns && isMissingColumn)
            throw new IllegalStateException("All indexed columns should be included into the column slice, missing: " + column);
        boolean isMatch = false;
        // If there is a column with multiple expressions that effectively means an OR
        // e.g. comment = 'x y z' could be split into 'comment' EQ 'x', 'comment' EQ 'y', 'comment' EQ 'z'
        // by analyzer, in situation like that we only need to check if at least one of expressions matches,
        // and there is no hit on the NOT_EQ (if any) which are always at the end of the filter list.
        // Loop always starts from the end of the list, which makes it possible to break after the last
        // NOT_EQ condition on first EQ/RANGE condition satisfied, instead of checking every
        // single expression in the column filter list.
        List<Expression> filters = expressions.get(column);
        for (int i = filters.size() - 1; i >= 0; i--) {
            Expression expression = filters.get(i);
            isMatch = !isMissingColumn && expression.isSatisfiedBy(value);
            if (expression.getOp() == Op.NOT_EQ) {
                // since this is NOT_EQ operation we have to
                // inverse match flag (to check against other expressions),
                // and break in case of negative inverse because that means
                // that it's a positive hit on the not-eq clause.
                isMatch = !isMatch;
                if (!isMatch)
                    break;
            } else // if it was a match on EQ/RANGE or column is missing
            if (isMatch || isMissingColumn)
                break;
        }
        if (idx++ == 0) {
            result = isMatch;
            continue;
        }
        result = op.apply(result, isMatch);
        // exit early because we already got a single false
        if (op == OperationType.AND && !result)
            return false;
    }
    return idx == 0 || result;
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Row(org.apache.cassandra.db.rows.Row) ByteBuffer(java.nio.ByteBuffer)

Example 2 with Row

use of org.apache.cassandra.db.rows.Row in project cassandra by apache.

the class JsonTransformer method serializePartition.

private void serializePartition(UnfilteredRowIterator partition) {
    try {
        json.writeStartObject();
        json.writeFieldName("partition");
        json.writeStartObject();
        json.writeFieldName("key");
        serializePartitionKey(partition.partitionKey());
        json.writeNumberField("position", this.currentScanner.getCurrentPosition());
        if (!partition.partitionLevelDeletion().isLive())
            serializeDeletion(partition.partitionLevelDeletion());
        json.writeEndObject();
        if (partition.hasNext() || partition.staticRow() != null) {
            json.writeFieldName("rows");
            json.writeStartArray();
            updatePosition();
            if (!partition.staticRow().isEmpty())
                serializeRow(partition.staticRow());
            Unfiltered unfiltered;
            updatePosition();
            while (partition.hasNext()) {
                unfiltered = partition.next();
                if (unfiltered instanceof Row) {
                    serializeRow((Row) unfiltered);
                } else if (unfiltered instanceof RangeTombstoneMarker) {
                    serializeTombstone((RangeTombstoneMarker) unfiltered);
                }
                updatePosition();
            }
            json.writeEndArray();
            json.writeEndObject();
        }
    } catch (IOException e) {
        String key = metadata.partitionKeyType.getString(partition.partitionKey().getKey());
        logger.error("Fatal error parsing partition: {}", key, e);
    }
}
Also used : RangeTombstoneMarker(org.apache.cassandra.db.rows.RangeTombstoneMarker) Row(org.apache.cassandra.db.rows.Row) IOException(java.io.IOException) Unfiltered(org.apache.cassandra.db.rows.Unfiltered)

Example 3 with Row

use of org.apache.cassandra.db.rows.Row in project cassandra by apache.

the class BatchlogManagerTest method testReplay.

@Test
@SuppressWarnings("deprecation")
public void testReplay() throws Exception {
    long initialAllBatches = BatchlogManager.instance.countAllBatches();
    long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
    TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
    // Half batches (50) ready to be replayed, half not.
    for (int i = 0; i < 100; i++) {
        List<Mutation> mutations = new ArrayList<>(10);
        for (int j = 0; j < 10; j++) {
            mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + j).add("val", "val" + j).build());
        }
        long timestamp = i < 50 ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout()) : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
        BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
    }
    // Flush the batchlog to disk (see CASSANDRA-6822).
    Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
    assertEquals(100, BatchlogManager.instance.countAllBatches() - initialAllBatches);
    assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
    // Force batchlog replay and wait for it to complete.
    BatchlogManager.instance.startBatchlogReplay().get();
    // Ensure that the first half, and only the first half, got replayed.
    assertEquals(50, BatchlogManager.instance.countAllBatches() - initialAllBatches);
    assertEquals(50, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
    for (int i = 0; i < 100; i++) {
        String query = String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD1, i);
        UntypedResultSet result = executeInternal(query);
        assertNotNull(result);
        if (i < 50) {
            Iterator<UntypedResultSet.Row> it = result.iterator();
            assertNotNull(it);
            for (int j = 0; j < 10; j++) {
                assertTrue(it.hasNext());
                UntypedResultSet.Row row = it.next();
                assertEquals(ByteBufferUtil.bytes(i), row.getBytes("key"));
                assertEquals("name" + j, row.getString("name"));
                assertEquals("val" + j, row.getString("val"));
            }
            assertFalse(it.hasNext());
        } else {
            assertTrue(result.isEmpty());
        }
    }
    // Ensure that no stray mutations got somehow applied.
    UntypedResultSet result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", KEYSPACE1, CF_STANDARD1));
    assertNotNull(result);
    assertEquals(500, result.one().getLong("count"));
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) UntypedResultSet(org.apache.cassandra.cql3.UntypedResultSet) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Mutation(org.apache.cassandra.db.Mutation) Row(org.apache.cassandra.db.rows.Row)

Example 4 with Row

use of org.apache.cassandra.db.rows.Row in project cassandra by apache.

the class BatchlogManagerTest method testDelete.

@Test
public void testDelete() {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
    TableMetadata cfm = cfs.metadata();
    new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("1234")).clustering("c").add("val", "val" + 1234).build().applyUnsafe();
    DecoratedKey dk = cfs.decorateKey(ByteBufferUtil.bytes("1234"));
    ImmutableBTreePartition results = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, dk).build());
    Iterator<Row> iter = results.iterator();
    assert iter.hasNext();
    Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(cfm, dk, FBUtilities.timestampMicros(), FBUtilities.nowInSeconds()));
    mutation.applyUnsafe();
    Util.assertEmpty(Util.cmd(cfs, dk).build());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Row(org.apache.cassandra.db.rows.Row) Mutation(org.apache.cassandra.db.Mutation) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition)

Example 5 with Row

use of org.apache.cassandra.db.rows.Row in project cassandra by apache.

the class KeyspaceTest method testReversedWithFlushing.

@Test
public void testReversedWithFlushing() throws Throwable {
    String tableName = createTable("CREATE TABLE %s (a text, b int, c int, PRIMARY KEY (a, b)) WITH CLUSTERING ORDER BY (b DESC)");
    final ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
    for (int i = 0; i < 10; i++) execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", "0", i, i);
    cfs.forceBlockingFlush();
    for (int i = 10; i < 20; i++) {
        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", "0", i, i);
        RegularAndStaticColumns columns = RegularAndStaticColumns.of(cfs.metadata().getColumn(new ColumnIdentifier("c", false)));
        ClusteringIndexSliceFilter filter = new ClusteringIndexSliceFilter(Slices.ALL, false);
        SinglePartitionReadCommand command = singlePartitionSlice(cfs, "0", filter, null);
        try (ReadExecutionController executionController = command.executionController();
            PartitionIterator iterator = command.executeInternal(executionController)) {
            try (RowIterator rowIterator = iterator.next()) {
                Row row = rowIterator.next();
                Cell cell = row.getCell(cfs.metadata().getColumn(new ColumnIdentifier("c", false)));
                assertEquals(ByteBufferUtil.bytes(i), cell.value());
            }
        }
    }
}
Also used : PartitionIterator(org.apache.cassandra.db.partitions.PartitionIterator) RowIterator(org.apache.cassandra.db.rows.RowIterator) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) Row(org.apache.cassandra.db.rows.Row) Cell(org.apache.cassandra.db.rows.Cell) Test(org.junit.Test)

Aggregations

Row (org.apache.cassandra.db.rows.Row)37 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)17 Test (org.junit.Test)16 ByteBuffer (java.nio.ByteBuffer)11 Cell (org.apache.cassandra.db.rows.Cell)8 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)7 ImmutableBTreePartition (org.apache.cassandra.db.partitions.ImmutableBTreePartition)5 PartitionIterator (org.apache.cassandra.db.partitions.PartitionIterator)5 RowIterator (org.apache.cassandra.db.rows.RowIterator)5 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)4 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)3 Mutation (org.apache.cassandra.db.Mutation)3 Unfiltered (org.apache.cassandra.db.rows.Unfiltered)3 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)3 TableMetadata (org.apache.cassandra.schema.TableMetadata)3 File (java.io.File)2 DecoratedKey (org.apache.cassandra.db.DecoratedKey)2 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)2 ClusteringIndexSliceFilter (org.apache.cassandra.db.filter.ClusteringIndexSliceFilter)2 StubIndex (org.apache.cassandra.index.StubIndex)2