Search in sources :

Example 16 with ColumnIdentifier

use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.

the class RowsTest method testLegacyCellIterator.

@Test
public void testLegacyCellIterator() {
    // Creates a table with
    //   - 3 Simple columns: a, c and e
    //   - 2 Complex columns: b and d
    TableMetadata metadata = TableMetadata.builder("dummy_ks", "dummy_tbl").addPartitionKeyColumn("k", BytesType.instance).addRegularColumn("a", BytesType.instance).addRegularColumn("b", MapType.getInstance(Int32Type.instance, BytesType.instance, true)).addRegularColumn("c", BytesType.instance).addRegularColumn("d", MapType.getInstance(Int32Type.instance, BytesType.instance, true)).addRegularColumn("e", BytesType.instance).build();
    ColumnMetadata a = metadata.getColumn(new ColumnIdentifier("a", false));
    ColumnMetadata b = metadata.getColumn(new ColumnIdentifier("b", false));
    ColumnMetadata c = metadata.getColumn(new ColumnIdentifier("c", false));
    ColumnMetadata d = metadata.getColumn(new ColumnIdentifier("d", false));
    ColumnMetadata e = metadata.getColumn(new ColumnIdentifier("e", false));
    Row row;
    // Row with only simple columns
    row = makeDummyRow(liveCell(a), liveCell(c), liveCell(e));
    assertCellOrder(row.cellsInLegacyOrder(metadata, false), liveCell(a), liveCell(c), liveCell(e));
    assertCellOrder(row.cellsInLegacyOrder(metadata, true), liveCell(e), liveCell(c), liveCell(a));
    // Row with only complex columns
    row = makeDummyRow(liveCell(b, 1), liveCell(b, 2), liveCell(d, 3), liveCell(d, 4));
    assertCellOrder(row.cellsInLegacyOrder(metadata, false), liveCell(b, 1), liveCell(b, 2), liveCell(d, 3), liveCell(d, 4));
    assertCellOrder(row.cellsInLegacyOrder(metadata, true), liveCell(d, 4), liveCell(d, 3), liveCell(b, 2), liveCell(b, 1));
    // Row with mixed simple and complex columns
    row = makeDummyRow(liveCell(a), liveCell(c), liveCell(e), liveCell(b, 1), liveCell(b, 2), liveCell(d, 3), liveCell(d, 4));
    assertCellOrder(row.cellsInLegacyOrder(metadata, false), liveCell(a), liveCell(b, 1), liveCell(b, 2), liveCell(c), liveCell(d, 3), liveCell(d, 4), liveCell(e));
    assertCellOrder(row.cellsInLegacyOrder(metadata, true), liveCell(e), liveCell(d, 4), liveCell(d, 3), liveCell(c), liveCell(b, 2), liveCell(b, 1), liveCell(a));
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) Test(org.junit.Test)

Example 17 with ColumnIdentifier

use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.

the class QueryPagerTest method queryAndVerifyCells.

private void queryAndVerifyCells(TableMetadata table, boolean reversed, String key) throws Exception {
    ClusteringIndexFilter rowfilter = new ClusteringIndexSliceFilter(Slices.ALL, reversed);
    ReadCommand command = SinglePartitionReadCommand.create(table, nowInSec, Util.dk(key), ColumnFilter.all(table), rowfilter);
    QueryPager pager = command.getPager(null, ProtocolVersion.CURRENT);
    ColumnMetadata staticColumn = table.staticColumns().getSimple(0);
    assertEquals(staticColumn.name.toCQLString(), "st");
    for (int i = 0; i < 5; i++) {
        try (ReadExecutionController controller = pager.executionController();
            PartitionIterator partitions = pager.fetchPageInternal(1, controller)) {
            try (RowIterator partition = partitions.next()) {
                assertCell(partition.staticRow(), staticColumn, 4);
                Row row = partition.next();
                int cellIndex = !reversed ? i : 4 - i;
                assertEquals(row.clustering().get(0), ByteBufferUtil.bytes(cellIndex));
                assertCell(row, table.getColumn(new ColumnIdentifier("v1", false)), cellIndex);
                assertCell(row, table.getColumn(new ColumnIdentifier("v2", false)), cellIndex);
                // the partition/page should contain just a single regular row
                assertFalse(partition.hasNext());
            }
        }
    }
    // After processing the 5 rows there should be no more rows to return
    try (ReadExecutionController controller = pager.executionController();
        PartitionIterator partitions = pager.fetchPageInternal(1, controller)) {
        assertFalse(partitions.hasNext());
    }
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) QueryPager(org.apache.cassandra.service.pager.QueryPager) PartitionIterator(org.apache.cassandra.db.partitions.PartitionIterator) RowIterator(org.apache.cassandra.db.rows.RowIterator) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) Row(org.apache.cassandra.db.rows.Row)

Example 18 with ColumnIdentifier

use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.

the class NativeSSTableLoaderClient method createDefinitionFromRow.

private static ColumnMetadata createDefinitionFromRow(Row row, String keyspace, String table, Types types) {
    ClusteringOrder order = ClusteringOrder.valueOf(row.getString("clustering_order").toUpperCase());
    AbstractType<?> type = CQLTypeParser.parse(keyspace, row.getString("type"), types);
    if (order == ClusteringOrder.DESC)
        type = ReversedType.getInstance(type);
    ColumnIdentifier name = ColumnIdentifier.getInterned(type, row.getBytes("column_name_bytes"), row.getString("column_name"));
    int position = row.getInt("position");
    org.apache.cassandra.schema.ColumnMetadata.Kind kind = ColumnMetadata.Kind.valueOf(row.getString("kind").toUpperCase());
    return new ColumnMetadata(keyspace, table, name, type, position, kind);
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) ClusteringOrder(org.apache.cassandra.schema.ColumnMetadata.ClusteringOrder)

Example 19 with ColumnIdentifier

use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.

the class CreateViewStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
    // We need to make sure that:
    //  - primary key includes all columns in base table's primary key
    //  - make sure that the select statement does not have anything other than columns
    //    and their names match the base table's names
    //  - make sure that primary key does not include any collections
    //  - make sure there is no where clause in the select statement
    //  - make sure there is not currently a table or view
    //  - make sure baseTable gcGraceSeconds > 0
    properties.validate();
    if (properties.useCompactStorage)
        throw new InvalidRequestException("Cannot use 'COMPACT STORAGE' when defining a materialized view");
    // specific replica would break
    if (!baseName.getKeyspace().equals(keyspace()))
        throw new InvalidRequestException("Cannot create a materialized view on a table in a separate keyspace");
    TableMetadata metadata = Schema.instance.validateTable(baseName.getKeyspace(), baseName.getColumnFamily());
    if (metadata.isCounter())
        throw new InvalidRequestException("Materialized views are not supported on counter tables");
    if (metadata.isView())
        throw new InvalidRequestException("Materialized views cannot be created against other materialized views");
    if (metadata.params.gcGraceSeconds == 0) {
        throw new InvalidRequestException(String.format("Cannot create materialized view '%s' for base table " + "'%s' with gc_grace_seconds of 0, since this value is " + "used to TTL undelivered updates. Setting gc_grace_seconds" + " too low might cause undelivered updates to expire " + "before being replayed.", cfName.getColumnFamily(), baseName.getColumnFamily()));
    }
    Set<ColumnIdentifier> included = Sets.newHashSetWithExpectedSize(selectClause.size());
    for (RawSelector selector : selectClause) {
        Selectable.Raw selectable = selector.selectable;
        if (selectable instanceof Selectable.WithFieldSelection.Raw)
            throw new InvalidRequestException("Cannot select out a part of type when defining a materialized view");
        if (selectable instanceof Selectable.WithFunction.Raw)
            throw new InvalidRequestException("Cannot use function when defining a materialized view");
        if (selectable instanceof Selectable.WritetimeOrTTL.Raw)
            throw new InvalidRequestException("Cannot use function when defining a materialized view");
        if (selector.alias != null)
            throw new InvalidRequestException("Cannot use alias when defining a materialized view");
        Selectable s = selectable.prepare(metadata);
        if (s instanceof Term.Raw)
            throw new InvalidRequestException("Cannot use terms in selection when defining a materialized view");
        ColumnMetadata cdef = (ColumnMetadata) s;
        included.add(cdef.name);
    }
    Set<ColumnMetadata.Raw> targetPrimaryKeys = new HashSet<>();
    for (ColumnMetadata.Raw identifier : Iterables.concat(partitionKeys, clusteringKeys)) {
        if (!targetPrimaryKeys.add(identifier))
            throw new InvalidRequestException("Duplicate entry found in PRIMARY KEY: " + identifier);
        ColumnMetadata cdef = identifier.prepare(metadata);
        if (cdef.type.isMultiCell())
            throw new InvalidRequestException(String.format("Cannot use MultiCell column '%s' in PRIMARY KEY of materialized view", identifier));
        if (cdef.isStatic())
            throw new InvalidRequestException(String.format("Cannot use Static column '%s' in PRIMARY KEY of materialized view", identifier));
        if (cdef.type instanceof DurationType)
            throw new InvalidRequestException(String.format("Cannot use Duration column '%s' in PRIMARY KEY of materialized view", identifier));
    }
    // build the select statement
    Map<ColumnMetadata.Raw, Boolean> orderings = Collections.emptyMap();
    List<ColumnMetadata.Raw> groups = Collections.emptyList();
    SelectStatement.Parameters parameters = new SelectStatement.Parameters(orderings, groups, false, true, false);
    SelectStatement.RawStatement rawSelect = new SelectStatement.RawStatement(baseName, parameters, selectClause, whereClause, null, null);
    ClientState state = ClientState.forInternalCalls();
    state.setKeyspace(keyspace());
    rawSelect.prepareKeyspace(state);
    rawSelect.setBoundVariables(getBoundVariables());
    ParsedStatement.Prepared prepared = rawSelect.prepare(true);
    SelectStatement select = (SelectStatement) prepared.statement;
    StatementRestrictions restrictions = select.getRestrictions();
    if (!prepared.boundNames.isEmpty())
        throw new InvalidRequestException("Cannot use query parameters in CREATE MATERIALIZED VIEW statements");
    String whereClauseText = View.relationsToWhereClause(whereClause.relations);
    Set<ColumnIdentifier> basePrimaryKeyCols = new HashSet<>();
    for (ColumnMetadata definition : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) basePrimaryKeyCols.add(definition.name);
    List<ColumnIdentifier> targetClusteringColumns = new ArrayList<>();
    List<ColumnIdentifier> targetPartitionKeys = new ArrayList<>();
    // This is only used as an intermediate state; this is to catch whether multiple non-PK columns are used
    boolean hasNonPKColumn = false;
    for (ColumnMetadata.Raw raw : partitionKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetPartitionKeys, restrictions);
    for (ColumnMetadata.Raw raw : clusteringKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetClusteringColumns, restrictions);
    // We need to include all of the primary key columns from the base table in order to make sure that we do not
    // overwrite values in the view. We cannot support "collapsing" the base table into a smaller number of rows in
    // the view because if we need to generate a tombstone, we have no way of knowing which value is currently being
    // used in the view and whether or not to generate a tombstone. In order to not surprise our users, we require
    // that they include all of the columns. We provide them with a list of all of the columns left to include.
    boolean missingClusteringColumns = false;
    StringBuilder columnNames = new StringBuilder();
    List<ColumnIdentifier> includedColumns = new ArrayList<>();
    for (ColumnMetadata def : metadata.columns()) {
        ColumnIdentifier identifier = def.name;
        boolean includeDef = included.isEmpty() || included.contains(identifier);
        if (includeDef && def.isStatic()) {
            throw new InvalidRequestException(String.format("Unable to include static column '%s' which would be included by Materialized View SELECT * statement", identifier));
        }
        boolean defInTargetPrimaryKey = targetClusteringColumns.contains(identifier) || targetPartitionKeys.contains(identifier);
        if (includeDef && !defInTargetPrimaryKey) {
            includedColumns.add(identifier);
        }
        if (!def.isPrimaryKeyColumn())
            continue;
        if (!defInTargetPrimaryKey) {
            if (missingClusteringColumns)
                columnNames.append(',');
            else
                missingClusteringColumns = true;
            columnNames.append(identifier);
        }
    }
    if (missingClusteringColumns)
        throw new InvalidRequestException(String.format("Cannot create Materialized View %s without primary key columns from base %s (%s)", columnFamily(), baseName.getColumnFamily(), columnNames.toString()));
    if (targetPartitionKeys.isEmpty())
        throw new InvalidRequestException("Must select at least a column for a Materialized View");
    if (targetClusteringColumns.isEmpty())
        throw new InvalidRequestException("No columns are defined for Materialized View other than primary key");
    TableParams params = properties.properties.asNewTableParams();
    if (params.defaultTimeToLive > 0) {
        throw new InvalidRequestException("Cannot set default_time_to_live for a materialized view. " + "Data in a materialized view always expire at the same time than " + "the corresponding data in the parent table.");
    }
    TableMetadata.Builder builder = TableMetadata.builder(keyspace(), columnFamily(), properties.properties.getId()).isView(true).params(params);
    add(metadata, targetPartitionKeys, builder::addPartitionKeyColumn);
    add(metadata, targetClusteringColumns, builder::addClusteringColumn);
    add(metadata, includedColumns, builder::addRegularColumn);
    ViewMetadata definition = new ViewMetadata(keyspace(), columnFamily(), metadata.id, metadata.name, included.isEmpty(), rawSelect, whereClauseText, builder.build());
    try {
        MigrationManager.announceNewView(definition, isLocalOnly);
        return new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
    } catch (AlreadyExistsException e) {
        if (ifNotExists)
            return null;
        throw e;
    }
}
Also used : ClientState(org.apache.cassandra.service.ClientState) RawSelector(org.apache.cassandra.cql3.selection.RawSelector) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Selectable(org.apache.cassandra.cql3.selection.Selectable) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) StatementRestrictions(org.apache.cassandra.cql3.restrictions.StatementRestrictions) ViewMetadata(org.apache.cassandra.schema.ViewMetadata) TableMetadata(org.apache.cassandra.schema.TableMetadata) DurationType(org.apache.cassandra.db.marshal.DurationType) TableParams(org.apache.cassandra.schema.TableParams) AlreadyExistsException(org.apache.cassandra.exceptions.AlreadyExistsException)

Example 20 with ColumnIdentifier

use of org.apache.cassandra.cql3.ColumnIdentifier in project cassandra by apache.

the class TargetParser method parse.

public static Pair<ColumnMetadata, IndexTarget.Type> parse(TableMetadata metadata, String target) {
    // if the regex matches then the target is in the form "keys(foo)", "entries(bar)" etc
    // if not, then it must be a simple column name and implictly its type is VALUES
    Matcher matcher = TARGET_REGEX.matcher(target);
    String columnName;
    IndexTarget.Type targetType;
    if (matcher.matches()) {
        targetType = IndexTarget.Type.fromString(matcher.group(1));
        columnName = matcher.group(2);
    } else {
        columnName = target;
        targetType = IndexTarget.Type.VALUES;
    }
    // need to un-escape any such quotes to get the actual column name
    if (columnName.startsWith(QUOTE)) {
        columnName = StringUtils.substring(StringUtils.substring(columnName, 1), 0, -1);
        columnName = TWO_QUOTES.matcher(columnName).replaceAll(QUOTE);
    }
    // in that case we have to do a linear scan of the table's columns to get the matching one
    if (metadata.isCQLTable())
        return Pair.create(metadata.getColumn(new ColumnIdentifier(columnName, true)), targetType);
    else
        for (ColumnMetadata column : metadata.columns()) if (column.name.toString().equals(columnName))
            return Pair.create(column, targetType);
    return null;
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Matcher(java.util.regex.Matcher) IndexTarget(org.apache.cassandra.cql3.statements.IndexTarget) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier)

Aggregations

ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)27 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)17 Test (org.junit.Test)10 Row (org.apache.cassandra.db.rows.Row)7 TableMetadata (org.apache.cassandra.schema.TableMetadata)7 IndexTarget (org.apache.cassandra.cql3.statements.IndexTarget)5 PartitionIterator (org.apache.cassandra.db.partitions.PartitionIterator)4 Cell (org.apache.cassandra.db.rows.Cell)4 PartitionUpdate (org.apache.cassandra.db.partitions.PartitionUpdate)3 RowIterator (org.apache.cassandra.db.rows.RowIterator)3 Iterables (com.google.common.collect.Iterables)2 ByteBuffer (java.nio.ByteBuffer)2 java.util (java.util)2 Collectors (java.util.stream.Collectors)2 CellName (org.apache.cassandra.db.composites.CellName)2 IndexMetadata (org.apache.cassandra.schema.IndexMetadata)2 Iterators (com.google.common.collect.Iterators)1 File (java.io.File)1 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1