Search in sources :

Example 61 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class TruncateStatement method execute.

public ResultMessage execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws InvalidRequestException, TruncateException {
    try {
        TableMetadata metaData = Schema.instance.getTableMetadata(keyspace(), columnFamily());
        if (metaData.isView())
            throw new InvalidRequestException("Cannot TRUNCATE materialized view directly; must truncate base table instead");
        StorageProxy.truncateBlocking(keyspace(), columnFamily());
    } catch (UnavailableException | TimeoutException e) {
        throw new TruncateException(e);
    }
    return null;
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) TimeoutException(java.util.concurrent.TimeoutException)

Example 62 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class AbstractCommitLogSegmentManager method flushDataFrom.

/**
     * Force a flush on all CFs that are still dirty in @param segments.
     *
     * @return a Future that will finish when all the flushes are complete.
     */
private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force) {
    if (segments.isEmpty())
        return Futures.immediateFuture(null);
    final CommitLogPosition maxCommitLogPosition = segments.get(segments.size() - 1).getCurrentCommitLogPosition();
    // a map of CfId -> forceFlush() to ensure we only queue one flush per cf
    final Map<TableId, ListenableFuture<?>> flushes = new LinkedHashMap<>();
    for (CommitLogSegment segment : segments) {
        for (TableId dirtyTableId : segment.getDirtyTableIds()) {
            TableMetadata metadata = Schema.instance.getTableMetadata(dirtyTableId);
            if (metadata == null) {
                // even though we remove the schema entry before a final flush when dropping a CF,
                // it's still possible for a writer to race and finish his append after the flush.
                logger.trace("Marking clean CF {} that doesn't exist anymore", dirtyTableId);
                segment.markClean(dirtyTableId, CommitLogPosition.NONE, segment.getCurrentCommitLogPosition());
            } else if (!flushes.containsKey(dirtyTableId)) {
                final ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(dirtyTableId);
                // can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
                // no deadlock possibility since switchLock removal
                flushes.put(dirtyTableId, force ? cfs.forceFlush() : cfs.forceFlush(maxCommitLogPosition));
            }
        }
    }
    return Futures.allAsList(flushes.values());
}
Also used : TableId(org.apache.cassandra.schema.TableId) TableMetadata(org.apache.cassandra.schema.TableMetadata)

Example 63 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class DeleteStatement method addUpdateForKey.

@Override
public void addUpdateForKey(PartitionUpdate update, Clustering clustering, UpdateParameters params) throws InvalidRequestException {
    TableMetadata metadata = metadata();
    List<Operation> regularDeletions = getRegularOperations();
    List<Operation> staticDeletions = getStaticOperations();
    if (regularDeletions.isEmpty() && staticDeletions.isEmpty()) {
        // We're not deleting any specific columns so it's either a full partition deletion ....
        if (clustering.size() == 0) {
            update.addPartitionDeletion(params.deletionTime());
        } else // ... or a row deletion ...
        if (clustering.size() == metadata.clusteringColumns().size()) {
            params.newRow(clustering);
            params.addRowDeletion();
            update.add(params.buildRow());
        } else // ... or a range of rows deletion.
        {
            update.add(params.makeRangeTombstone(metadata.comparator, clustering));
        }
    } else {
        if (!regularDeletions.isEmpty()) {
            // if the clustering size is zero but there are some clustering columns, it means that it's a
            // range deletion (the full partition) in which case we need to throw an error as range deletion
            // do not support specific columns
            checkFalse(clustering.size() == 0 && metadata.clusteringColumns().size() != 0, "Range deletions are not supported for specific columns");
            params.newRow(clustering);
            for (Operation op : regularDeletions) op.execute(update.partitionKey(), params);
            update.add(params.buildRow());
        }
        if (!staticDeletions.isEmpty()) {
            params.newRow(Clustering.STATIC_CLUSTERING);
            for (Operation op : staticDeletions) op.execute(update.partitionKey(), params);
            update.add(params.buildRow());
        }
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata)

Example 64 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class DropIndexStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws InvalidRequestException, ConfigurationException {
    TableMetadata current = lookupIndexedTable();
    if (current == null)
        return null;
    TableMetadata updated = current.unbuild().indexes(current.indexes.without(indexName)).build();
    MigrationManager.announceTableUpdate(updated, isLocalOnly);
    // in that method would now throw.
    return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, current.keyspace, current.name);
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata)

Example 65 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class DropTriggerStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws ConfigurationException, InvalidRequestException {
    TableMetadata current = Schema.instance.getTableMetadata(keyspace(), columnFamily());
    Triggers triggers = current.triggers;
    if (!triggers.get(triggerName).isPresent()) {
        if (ifExists)
            return null;
        else
            throw new InvalidRequestException(String.format("Trigger %s was not found", triggerName));
    }
    logger.info("Dropping trigger with name {}", triggerName);
    TableMetadata updated = current.unbuild().triggers(triggers.without(triggerName)).build();
    MigrationManager.announceTableUpdate(updated, isLocalOnly);
    return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) Triggers(org.apache.cassandra.schema.Triggers) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException)

Aggregations

TableMetadata (org.apache.cassandra.schema.TableMetadata)129 Test (org.junit.Test)63 ByteBuffer (java.nio.ByteBuffer)29 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)17 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)13 File (java.io.File)10 PartitionUpdate (org.apache.cassandra.db.partitions.PartitionUpdate)10 Mutation (org.apache.cassandra.db.Mutation)8 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)8 KeyspaceMetadata (org.apache.cassandra.schema.KeyspaceMetadata)8 Descriptor (org.apache.cassandra.io.sstable.Descriptor)7 IndexMetadata (org.apache.cassandra.schema.IndexMetadata)6 IOException (java.io.IOException)5 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)5 IndexTarget (org.apache.cassandra.cql3.statements.IndexTarget)5 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)5 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 AbstractType (org.apache.cassandra.db.marshal.AbstractType)4 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)4