use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class DropRecreateAndRestoreTest method testAlterWithId.
@Test(expected = ConfigurationException.class)
public void testAlterWithId() throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY(a, b))");
TableId id = currentTableMetadata().id;
execute(String.format("ALTER TABLE %%s WITH ID = %s", id));
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogSegment method getDirtyTableIds.
/**
* @return a collection of dirty CFIDs for this segment file.
*/
public synchronized Collection<TableId> getDirtyTableIds() {
if (tableClean.isEmpty() || tableDirty.isEmpty())
return tableDirty.keySet();
List<TableId> r = new ArrayList<>(tableDirty.size());
for (Map.Entry<TableId, IntegerInterval> dirty : tableDirty.entrySet()) {
TableId tableId = dirty.getKey();
IntegerInterval dirtyInterval = dirty.getValue();
IntegerInterval.Set cleanSet = tableClean.get(tableId);
if (cleanSet == null || !cleanSet.covers(dirtyInterval))
r.add(dirty.getKey());
}
return r;
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogSegment method dirtyString.
// For debugging, not fast
public String dirtyString() {
StringBuilder sb = new StringBuilder();
for (TableId tableId : getDirtyTableIds()) {
TableMetadata m = Schema.instance.getTableMetadata(tableId);
sb.append(m == null ? "<deleted>" : m.name).append(" (").append(tableId).append(", dirty: ").append(tableDirty.get(tableId)).append(", clean: ").append(tableClean.get(tableId)).append("), ");
}
return sb.toString();
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class AbstractCommitLogSegmentManager method flushDataFrom.
/**
* Force a flush on all CFs that are still dirty in @param segments.
*
* @return a Future that will finish when all the flushes are complete.
*/
private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force) {
if (segments.isEmpty())
return ImmediateFuture.success(null);
final CommitLogPosition maxCommitLogPosition = segments.get(segments.size() - 1).getCurrentCommitLogPosition();
// a map of CfId -> forceFlush() to ensure we only queue one flush per cf
final Map<TableId, Future<?>> flushes = new LinkedHashMap<>();
for (CommitLogSegment segment : segments) {
for (TableId dirtyTableId : segment.getDirtyTableIds()) {
TableMetadata metadata = Schema.instance.getTableMetadata(dirtyTableId);
if (metadata == null) {
// even though we remove the schema entry before a final flush when dropping a CF,
// it's still possible for a writer to race and finish his append after the flush.
logger.trace("Marking clean CF {} that doesn't exist anymore", dirtyTableId);
segment.markClean(dirtyTableId, CommitLogPosition.NONE, segment.getCurrentCommitLogPosition());
} else if (!flushes.containsKey(dirtyTableId)) {
final ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(dirtyTableId);
// can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
// no deadlock possibility since switchLock removal
flushes.put(dirtyTableId, force ? cfs.forceFlush() : cfs.forceFlush(maxCommitLogPosition));
}
}
}
return FutureCombiner.allOf(flushes.values());
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class DescribeStatementTest method testPrimaryKeyPositionWithAndWithoutInternals.
@Test
public void testPrimaryKeyPositionWithAndWithoutInternals() throws Throwable {
String table = createTable("CREATE TABLE %s (pk text, v1 text, v2 int, v3 int, PRIMARY KEY (pk))");
TableId id = Schema.instance.getTableMetadata(KEYSPACE, table).id;
String tableCreateStatement = "CREATE TABLE " + KEYSPACE + "." + table + " (\n" + " pk text PRIMARY KEY,\n" + " v1 text,\n" + " v2 int,\n" + " v3 int\n" + ") WITH ID = " + id + "\n" + " AND " + tableParametersCql();
assertRowsNet(executeDescribeNet("DESCRIBE TABLE " + KEYSPACE + "." + table + " WITH INTERNALS"), row(KEYSPACE, "table", table, tableCreateStatement));
String dropStatement = "ALTER TABLE " + KEYSPACE + "." + table + " DROP v3 USING TIMESTAMP 1589286942065000;";
execute(dropStatement);
assertRowsNet(executeDescribeNet("DESCRIBE TABLE " + KEYSPACE + "." + table + " WITH INTERNALS"), row(KEYSPACE, "table", table, tableCreateStatement + "\n" + dropStatement));
String tableCreateStatementWithoutDroppedColumn = "CREATE TABLE " + KEYSPACE + "." + table + " (\n" + " pk text PRIMARY KEY,\n" + " v1 text,\n" + " v2 int\n" + ") WITH " + tableParametersCql();
assertRowsNet(executeDescribeNet("DESCRIBE TABLE " + KEYSPACE + "." + table), row(KEYSPACE, "table", table, tableCreateStatementWithoutDroppedColumn));
}
Aggregations