use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class TruncateStatement method execute.
public ResultMessage execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws InvalidRequestException, TruncateException {
try {
TableMetadata metaData = Schema.instance.getTableMetadata(keyspace(), columnFamily());
if (metaData.isView())
throw new InvalidRequestException("Cannot TRUNCATE materialized view directly; must truncate base table instead");
StorageProxy.truncateBlocking(keyspace(), columnFamily());
} catch (UnavailableException | TimeoutException e) {
throw new TruncateException(e);
}
return null;
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class AbstractCommitLogSegmentManager method flushDataFrom.
/**
* Force a flush on all CFs that are still dirty in @param segments.
*
* @return a Future that will finish when all the flushes are complete.
*/
private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force) {
if (segments.isEmpty())
return Futures.immediateFuture(null);
final CommitLogPosition maxCommitLogPosition = segments.get(segments.size() - 1).getCurrentCommitLogPosition();
// a map of CfId -> forceFlush() to ensure we only queue one flush per cf
final Map<TableId, ListenableFuture<?>> flushes = new LinkedHashMap<>();
for (CommitLogSegment segment : segments) {
for (TableId dirtyTableId : segment.getDirtyTableIds()) {
TableMetadata metadata = Schema.instance.getTableMetadata(dirtyTableId);
if (metadata == null) {
// even though we remove the schema entry before a final flush when dropping a CF,
// it's still possible for a writer to race and finish his append after the flush.
logger.trace("Marking clean CF {} that doesn't exist anymore", dirtyTableId);
segment.markClean(dirtyTableId, CommitLogPosition.NONE, segment.getCurrentCommitLogPosition());
} else if (!flushes.containsKey(dirtyTableId)) {
final ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(dirtyTableId);
// can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
// no deadlock possibility since switchLock removal
flushes.put(dirtyTableId, force ? cfs.forceFlush() : cfs.forceFlush(maxCommitLogPosition));
}
}
}
return Futures.allAsList(flushes.values());
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class DeleteStatement method addUpdateForKey.
@Override
public void addUpdateForKey(PartitionUpdate update, Clustering clustering, UpdateParameters params) throws InvalidRequestException {
TableMetadata metadata = metadata();
List<Operation> regularDeletions = getRegularOperations();
List<Operation> staticDeletions = getStaticOperations();
if (regularDeletions.isEmpty() && staticDeletions.isEmpty()) {
// We're not deleting any specific columns so it's either a full partition deletion ....
if (clustering.size() == 0) {
update.addPartitionDeletion(params.deletionTime());
} else // ... or a row deletion ...
if (clustering.size() == metadata.clusteringColumns().size()) {
params.newRow(clustering);
params.addRowDeletion();
update.add(params.buildRow());
} else // ... or a range of rows deletion.
{
update.add(params.makeRangeTombstone(metadata.comparator, clustering));
}
} else {
if (!regularDeletions.isEmpty()) {
// if the clustering size is zero but there are some clustering columns, it means that it's a
// range deletion (the full partition) in which case we need to throw an error as range deletion
// do not support specific columns
checkFalse(clustering.size() == 0 && metadata.clusteringColumns().size() != 0, "Range deletions are not supported for specific columns");
params.newRow(clustering);
for (Operation op : regularDeletions) op.execute(update.partitionKey(), params);
update.add(params.buildRow());
}
if (!staticDeletions.isEmpty()) {
params.newRow(Clustering.STATIC_CLUSTERING);
for (Operation op : staticDeletions) op.execute(update.partitionKey(), params);
update.add(params.buildRow());
}
}
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class DropIndexStatement method announceMigration.
public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws InvalidRequestException, ConfigurationException {
TableMetadata current = lookupIndexedTable();
if (current == null)
return null;
TableMetadata updated = current.unbuild().indexes(current.indexes.without(indexName)).build();
MigrationManager.announceTableUpdate(updated, isLocalOnly);
// in that method would now throw.
return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, current.keyspace, current.name);
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class DropTriggerStatement method announceMigration.
public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws ConfigurationException, InvalidRequestException {
TableMetadata current = Schema.instance.getTableMetadata(keyspace(), columnFamily());
Triggers triggers = current.triggers;
if (!triggers.get(triggerName).isPresent()) {
if (ifExists)
return null;
else
throw new InvalidRequestException(String.format("Trigger %s was not found", triggerName));
}
logger.info("Dropping trigger with name {}", triggerName);
TableMetadata updated = current.unbuild().triggers(triggers.without(triggerName)).build();
MigrationManager.announceTableUpdate(updated, isLocalOnly);
return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
}
Aggregations