Search in sources :

Example 96 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class ClusteringColumnRestrictionsTest method testBoundsAsClusteringWithMultiSliceRestrictionsWithOneDescendingAndOneAscendingClusteringColumns.

/**
     * Test multi-column slice restrictions with 1 descending clustering column and 1 ascending
     * (e.g '(clustering_0, clustering_1) > (1, 2)')
     */
@Test
public void testBoundsAsClusteringWithMultiSliceRestrictionsWithOneDescendingAndOneAscendingClusteringColumns() {
    TableMetadata tableMetadata = newTableMetadata(Sort.DESC, Sort.ASC);
    ByteBuffer value1 = ByteBufferUtil.bytes(1);
    ByteBuffer value2 = ByteBufferUtil.bytes(2);
    // (clustering_0, clustering1) > (1, 2)
    Restriction slice = newMultiSlice(tableMetadata, 0, Bound.START, false, value1, value2);
    ClusteringColumnRestrictions restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice);
    SortedSet<ClusteringBound> bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEmptyStart(get(bounds, 0));
    assertStartBound(get(bounds, 1), false, value1, value2);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEndBound(get(bounds, 0), false, value1);
    assertEndBound(get(bounds, 1), true, value1);
    // (clustering_0, clustering1) >= (1, 2)
    slice = newMultiSlice(tableMetadata, 0, Bound.START, true, value1, value2);
    restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice);
    bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEmptyStart(get(bounds, 0));
    assertStartBound(get(bounds, 1), true, value1, value2);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEndBound(get(bounds, 0), false, value1);
    assertEndBound(get(bounds, 1), true, value1);
    // (clustering_0, clustering1) <= (1, 2)
    slice = newMultiSlice(tableMetadata, 0, Bound.END, true, value1, value2);
    restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice);
    bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertStartBound(get(bounds, 0), true, value1);
    assertStartBound(get(bounds, 1), false, value1);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEndBound(get(bounds, 0), true, value1, value2);
    assertEmptyEnd(get(bounds, 1));
    // (clustering_0, clustering1) < (1, 2)
    slice = newMultiSlice(tableMetadata, 0, Bound.END, false, value1, value2);
    restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice);
    bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertStartBound(get(bounds, 0), true, value1);
    assertStartBound(get(bounds, 1), false, value1);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEndBound(get(bounds, 0), false, value1, value2);
    assertEmptyEnd(get(bounds, 1));
    // (clustering_0, clustering1) > (1, 2) AND (clustering_0) < (2)
    slice = newMultiSlice(tableMetadata, 0, Bound.START, false, value1, value2);
    Restriction slice2 = newMultiSlice(tableMetadata, 0, Bound.END, false, value2);
    restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice).mergeWith(slice2);
    bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertStartBound(get(bounds, 0), false, value2);
    assertStartBound(get(bounds, 1), false, value1, value2);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEndBound(get(bounds, 0), false, value1);
    assertEndBound(get(bounds, 1), true, value1);
    // (clustering_0) > (1) AND (clustering_0, clustering1) < (2, 1)
    slice = newMultiSlice(tableMetadata, 0, Bound.START, false, value1);
    slice2 = newMultiSlice(tableMetadata, 0, Bound.END, false, value2, value1);
    restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice).mergeWith(slice2);
    bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertStartBound(get(bounds, 0), true, value2);
    assertStartBound(get(bounds, 1), false, value2);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(2, bounds.size());
    assertEndBound(get(bounds, 0), false, value2, value1);
    assertEndBound(get(bounds, 1), false, value1);
    // (clustering_0, clustering1) >= (1, 2) AND (clustering_0, clustering1) <= (2, 1)
    slice = newMultiSlice(tableMetadata, 0, Bound.START, true, value1, value2);
    slice2 = newMultiSlice(tableMetadata, 0, Bound.END, true, value2, value1);
    restrictions = new ClusteringColumnRestrictions(tableMetadata);
    restrictions = restrictions.mergeWith(slice).mergeWith(slice2);
    bounds = restrictions.boundsAsClustering(Bound.START, QueryOptions.DEFAULT);
    assertEquals(3, bounds.size());
    assertStartBound(get(bounds, 0), true, value2);
    assertStartBound(get(bounds, 1), false, value2);
    assertStartBound(get(bounds, 2), true, value1, value2);
    bounds = restrictions.boundsAsClustering(Bound.END, QueryOptions.DEFAULT);
    assertEquals(3, bounds.size());
    assertEndBound(get(bounds, 0), true, value2, value1);
    assertEndBound(get(bounds, 1), false, value1);
    assertEndBound(get(bounds, 2), true, value1);
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 97 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class CreateIndexStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
    TableMetadata current = Schema.instance.getTableMetadata(keyspace(), columnFamily());
    List<IndexTarget> targets = new ArrayList<>(rawTargets.size());
    for (IndexTarget.Raw rawTarget : rawTargets) targets.add(rawTarget.prepare(current));
    String acceptedName = indexName;
    if (Strings.isNullOrEmpty(acceptedName)) {
        acceptedName = Indexes.getAvailableIndexName(keyspace(), columnFamily(), targets.size() == 1 ? targets.get(0).column.toString() : null);
    }
    if (Schema.instance.getKeyspaceMetadata(keyspace()).existingIndexNames(null).contains(acceptedName)) {
        if (ifNotExists)
            return null;
        else
            throw new InvalidRequestException(String.format("Index %s already exists", acceptedName));
    }
    IndexMetadata.Kind kind;
    Map<String, String> indexOptions;
    if (properties.isCustom) {
        kind = IndexMetadata.Kind.CUSTOM;
        indexOptions = properties.getOptions();
    } else {
        indexOptions = Collections.emptyMap();
        kind = current.isCompound() ? IndexMetadata.Kind.COMPOSITES : IndexMetadata.Kind.KEYS;
    }
    IndexMetadata index = IndexMetadata.fromIndexTargets(targets, acceptedName, kind, indexOptions);
    // check to disallow creation of an index which duplicates an existing one in all but name
    Optional<IndexMetadata> existingIndex = Iterables.tryFind(current.indexes, existing -> existing.equalsWithoutName(index));
    if (existingIndex.isPresent()) {
        if (ifNotExists)
            return null;
        else
            throw new InvalidRequestException(String.format("Index %s is a duplicate of existing index %s", index.name, existingIndex.get().name));
    }
    TableMetadata updated = current.unbuild().indexes(current.indexes.with(index)).build();
    logger.trace("Updating index definition for {}", indexName);
    MigrationManager.announceTableUpdate(updated, isLocalOnly);
    // Creating an index is akin to updating the CF
    return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) IndexMetadata(org.apache.cassandra.schema.IndexMetadata)

Example 98 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class CreateIndexStatement method validate.

public void validate(ClientState state) throws RequestValidationException {
    TableMetadata table = Schema.instance.validateTable(keyspace(), columnFamily());
    if (table.isCounter())
        throw new InvalidRequestException("Secondary indexes are not supported on counter tables");
    if (table.isView())
        throw new InvalidRequestException("Secondary indexes are not supported on materialized views");
    if (table.isCompactTable() && !table.isStaticCompactTable())
        throw new InvalidRequestException("Secondary indexes are not supported on COMPACT STORAGE tables that have clustering columns");
    List<IndexTarget> targets = new ArrayList<>(rawTargets.size());
    for (IndexTarget.Raw rawTarget : rawTargets) targets.add(rawTarget.prepare(table));
    if (targets.isEmpty() && !properties.isCustom)
        throw new InvalidRequestException("Only CUSTOM indexes can be created without specifying a target column");
    if (targets.size() > 1)
        validateTargetsForMultiColumnIndex(targets);
    for (IndexTarget target : targets) {
        ColumnMetadata cd = table.getColumn(target.column);
        if (cd == null)
            throw new InvalidRequestException("No column definition found for column " + target.column);
        if (cd.type.referencesDuration()) {
            checkFalse(cd.type.isCollection(), "Secondary indexes are not supported on collections containing durations");
            checkFalse(cd.type.isTuple(), "Secondary indexes are not supported on tuples containing durations");
            checkFalse(cd.type.isUDT(), "Secondary indexes are not supported on UDTs containing durations");
            throw invalidRequest("Secondary indexes are not supported on duration columns");
        }
        // TODO: we could lift that limitation
        if (table.isCompactTable() && cd.isPrimaryKeyColumn())
            throw new InvalidRequestException("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables");
        if (cd.kind == ColumnMetadata.Kind.PARTITION_KEY && table.partitionKeyColumns().size() == 1)
            throw new InvalidRequestException(String.format("Cannot create secondary index on partition key column %s", target.column));
        boolean isMap = cd.type instanceof MapType;
        boolean isFrozenCollection = cd.type.isCollection() && !cd.type.isMultiCell();
        if (isFrozenCollection) {
            validateForFrozenCollection(target);
        } else {
            validateNotFullIndex(target);
            validateIsSimpleIndexIfTargetColumnNotCollection(cd, target);
            validateTargetColumnIsMapIfIndexInvolvesKeys(isMap, target);
        }
    }
    if (!Strings.isNullOrEmpty(indexName)) {
        if (Schema.instance.getKeyspaceMetadata(keyspace()).existingIndexNames(null).contains(indexName)) {
            if (ifNotExists)
                return;
            else
                throw new InvalidRequestException(String.format("Index %s already exists", indexName));
        }
    }
    properties.validate();
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) MapType(org.apache.cassandra.db.marshal.MapType)

Example 99 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class CreateTriggerStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws ConfigurationException, InvalidRequestException {
    TableMetadata current = Schema.instance.getTableMetadata(keyspace(), columnFamily());
    Triggers triggers = current.triggers;
    if (triggers.get(triggerName).isPresent()) {
        if (ifNotExists)
            return null;
        else
            throw new InvalidRequestException(String.format("Trigger %s already exists", triggerName));
    }
    TableMetadata updated = current.unbuild().triggers(triggers.with(TriggerMetadata.create(triggerName, triggerClass))).build();
    logger.info("Adding trigger with name {} and class {}", triggerName, triggerClass);
    MigrationManager.announceTableUpdate(updated, isLocalOnly);
    return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) Triggers(org.apache.cassandra.schema.Triggers) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException)

Example 100 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class CreateViewStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
    // We need to make sure that:
    //  - primary key includes all columns in base table's primary key
    //  - make sure that the select statement does not have anything other than columns
    //    and their names match the base table's names
    //  - make sure that primary key does not include any collections
    //  - make sure there is no where clause in the select statement
    //  - make sure there is not currently a table or view
    //  - make sure baseTable gcGraceSeconds > 0
    properties.validate();
    if (properties.useCompactStorage)
        throw new InvalidRequestException("Cannot use 'COMPACT STORAGE' when defining a materialized view");
    // specific replica would break
    if (!baseName.getKeyspace().equals(keyspace()))
        throw new InvalidRequestException("Cannot create a materialized view on a table in a separate keyspace");
    TableMetadata metadata = Schema.instance.validateTable(baseName.getKeyspace(), baseName.getColumnFamily());
    if (metadata.isCounter())
        throw new InvalidRequestException("Materialized views are not supported on counter tables");
    if (metadata.isView())
        throw new InvalidRequestException("Materialized views cannot be created against other materialized views");
    if (metadata.params.gcGraceSeconds == 0) {
        throw new InvalidRequestException(String.format("Cannot create materialized view '%s' for base table " + "'%s' with gc_grace_seconds of 0, since this value is " + "used to TTL undelivered updates. Setting gc_grace_seconds" + " too low might cause undelivered updates to expire " + "before being replayed.", cfName.getColumnFamily(), baseName.getColumnFamily()));
    }
    Set<ColumnIdentifier> included = Sets.newHashSetWithExpectedSize(selectClause.size());
    for (RawSelector selector : selectClause) {
        Selectable.Raw selectable = selector.selectable;
        if (selectable instanceof Selectable.WithFieldSelection.Raw)
            throw new InvalidRequestException("Cannot select out a part of type when defining a materialized view");
        if (selectable instanceof Selectable.WithFunction.Raw)
            throw new InvalidRequestException("Cannot use function when defining a materialized view");
        if (selectable instanceof Selectable.WritetimeOrTTL.Raw)
            throw new InvalidRequestException("Cannot use function when defining a materialized view");
        if (selector.alias != null)
            throw new InvalidRequestException("Cannot use alias when defining a materialized view");
        Selectable s = selectable.prepare(metadata);
        if (s instanceof Term.Raw)
            throw new InvalidRequestException("Cannot use terms in selection when defining a materialized view");
        ColumnMetadata cdef = (ColumnMetadata) s;
        included.add(cdef.name);
    }
    Set<ColumnMetadata.Raw> targetPrimaryKeys = new HashSet<>();
    for (ColumnMetadata.Raw identifier : Iterables.concat(partitionKeys, clusteringKeys)) {
        if (!targetPrimaryKeys.add(identifier))
            throw new InvalidRequestException("Duplicate entry found in PRIMARY KEY: " + identifier);
        ColumnMetadata cdef = identifier.prepare(metadata);
        if (cdef.type.isMultiCell())
            throw new InvalidRequestException(String.format("Cannot use MultiCell column '%s' in PRIMARY KEY of materialized view", identifier));
        if (cdef.isStatic())
            throw new InvalidRequestException(String.format("Cannot use Static column '%s' in PRIMARY KEY of materialized view", identifier));
        if (cdef.type instanceof DurationType)
            throw new InvalidRequestException(String.format("Cannot use Duration column '%s' in PRIMARY KEY of materialized view", identifier));
    }
    // build the select statement
    Map<ColumnMetadata.Raw, Boolean> orderings = Collections.emptyMap();
    List<ColumnMetadata.Raw> groups = Collections.emptyList();
    SelectStatement.Parameters parameters = new SelectStatement.Parameters(orderings, groups, false, true, false);
    SelectStatement.RawStatement rawSelect = new SelectStatement.RawStatement(baseName, parameters, selectClause, whereClause, null, null);
    ClientState state = ClientState.forInternalCalls();
    state.setKeyspace(keyspace());
    rawSelect.prepareKeyspace(state);
    rawSelect.setBoundVariables(getBoundVariables());
    ParsedStatement.Prepared prepared = rawSelect.prepare(true);
    SelectStatement select = (SelectStatement) prepared.statement;
    StatementRestrictions restrictions = select.getRestrictions();
    if (!prepared.boundNames.isEmpty())
        throw new InvalidRequestException("Cannot use query parameters in CREATE MATERIALIZED VIEW statements");
    String whereClauseText = View.relationsToWhereClause(whereClause.relations);
    Set<ColumnIdentifier> basePrimaryKeyCols = new HashSet<>();
    for (ColumnMetadata definition : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) basePrimaryKeyCols.add(definition.name);
    List<ColumnIdentifier> targetClusteringColumns = new ArrayList<>();
    List<ColumnIdentifier> targetPartitionKeys = new ArrayList<>();
    // This is only used as an intermediate state; this is to catch whether multiple non-PK columns are used
    boolean hasNonPKColumn = false;
    for (ColumnMetadata.Raw raw : partitionKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetPartitionKeys, restrictions);
    for (ColumnMetadata.Raw raw : clusteringKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetClusteringColumns, restrictions);
    // We need to include all of the primary key columns from the base table in order to make sure that we do not
    // overwrite values in the view. We cannot support "collapsing" the base table into a smaller number of rows in
    // the view because if we need to generate a tombstone, we have no way of knowing which value is currently being
    // used in the view and whether or not to generate a tombstone. In order to not surprise our users, we require
    // that they include all of the columns. We provide them with a list of all of the columns left to include.
    boolean missingClusteringColumns = false;
    StringBuilder columnNames = new StringBuilder();
    List<ColumnIdentifier> includedColumns = new ArrayList<>();
    for (ColumnMetadata def : metadata.columns()) {
        ColumnIdentifier identifier = def.name;
        boolean includeDef = included.isEmpty() || included.contains(identifier);
        if (includeDef && def.isStatic()) {
            throw new InvalidRequestException(String.format("Unable to include static column '%s' which would be included by Materialized View SELECT * statement", identifier));
        }
        boolean defInTargetPrimaryKey = targetClusteringColumns.contains(identifier) || targetPartitionKeys.contains(identifier);
        if (includeDef && !defInTargetPrimaryKey) {
            includedColumns.add(identifier);
        }
        if (!def.isPrimaryKeyColumn())
            continue;
        if (!defInTargetPrimaryKey) {
            if (missingClusteringColumns)
                columnNames.append(',');
            else
                missingClusteringColumns = true;
            columnNames.append(identifier);
        }
    }
    if (missingClusteringColumns)
        throw new InvalidRequestException(String.format("Cannot create Materialized View %s without primary key columns from base %s (%s)", columnFamily(), baseName.getColumnFamily(), columnNames.toString()));
    if (targetPartitionKeys.isEmpty())
        throw new InvalidRequestException("Must select at least a column for a Materialized View");
    if (targetClusteringColumns.isEmpty())
        throw new InvalidRequestException("No columns are defined for Materialized View other than primary key");
    TableParams params = properties.properties.asNewTableParams();
    if (params.defaultTimeToLive > 0) {
        throw new InvalidRequestException("Cannot set default_time_to_live for a materialized view. " + "Data in a materialized view always expire at the same time than " + "the corresponding data in the parent table.");
    }
    TableMetadata.Builder builder = TableMetadata.builder(keyspace(), columnFamily(), properties.properties.getId()).isView(true).params(params);
    add(metadata, targetPartitionKeys, builder::addPartitionKeyColumn);
    add(metadata, targetClusteringColumns, builder::addClusteringColumn);
    add(metadata, includedColumns, builder::addRegularColumn);
    ViewMetadata definition = new ViewMetadata(keyspace(), columnFamily(), metadata.id, metadata.name, included.isEmpty(), rawSelect, whereClauseText, builder.build());
    try {
        MigrationManager.announceNewView(definition, isLocalOnly);
        return new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
    } catch (AlreadyExistsException e) {
        if (ifNotExists)
            return null;
        throw e;
    }
}
Also used : ClientState(org.apache.cassandra.service.ClientState) RawSelector(org.apache.cassandra.cql3.selection.RawSelector) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Selectable(org.apache.cassandra.cql3.selection.Selectable) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) StatementRestrictions(org.apache.cassandra.cql3.restrictions.StatementRestrictions) ViewMetadata(org.apache.cassandra.schema.ViewMetadata) TableMetadata(org.apache.cassandra.schema.TableMetadata) DurationType(org.apache.cassandra.db.marshal.DurationType) TableParams(org.apache.cassandra.schema.TableParams) AlreadyExistsException(org.apache.cassandra.exceptions.AlreadyExistsException)

Aggregations

TableMetadata (org.apache.cassandra.schema.TableMetadata)129 Test (org.junit.Test)63 ByteBuffer (java.nio.ByteBuffer)29 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)17 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)13 File (java.io.File)10 PartitionUpdate (org.apache.cassandra.db.partitions.PartitionUpdate)10 Mutation (org.apache.cassandra.db.Mutation)8 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)8 KeyspaceMetadata (org.apache.cassandra.schema.KeyspaceMetadata)8 Descriptor (org.apache.cassandra.io.sstable.Descriptor)7 IndexMetadata (org.apache.cassandra.schema.IndexMetadata)6 IOException (java.io.IOException)5 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)5 IndexTarget (org.apache.cassandra.cql3.statements.IndexTarget)5 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)5 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 AbstractType (org.apache.cassandra.db.marshal.AbstractType)4 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)4