use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogTest method testDontDeleteIfDirty.
@Test
public void testDontDeleteIfDirty() throws Exception {
Keyspace ks = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(STANDARD1);
ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
// Roughly 32 MB mutation
Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize() / 4)).build();
// Adding it 5 times
CommitLog.instance.add(m);
CommitLog.instance.add(m);
CommitLog.instance.add(m);
CommitLog.instance.add(m);
CommitLog.instance.add(m);
// Adding new mutation on another CF
Mutation m2 = new RowUpdateBuilder(cfs2.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.allocate(4)).build();
CommitLog.instance.add(m2);
assertEquals(2, CommitLog.instance.segmentManager.getActiveSegments().size());
TableId id2 = m2.getTableIds().iterator().next();
CommitLog.instance.discardCompletedSegments(id2, CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
// Assert we still have both our segments
assertEquals(2, CommitLog.instance.segmentManager.getActiveSegments().size());
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class DropRecreateAndRestoreTest method testAlterWithId.
@Test(expected = ConfigurationException.class)
public void testAlterWithId() throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY(a, b))");
TableId id = currentTableMetadata().id;
execute(String.format("ALTER TABLE %%s WITH ID = %s", id));
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogReplayer method construct.
public static CommitLogReplayer construct(CommitLog commitLog) {
// compute per-CF and global replay intervals
Map<TableId, IntervalSet<CommitLogPosition>> cfPersisted = new HashMap<>();
ReplayFilter replayFilter = ReplayFilter.create();
for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
// but, if we've truncated the cf in question, then we need to need to start replay after the truncation
CommitLogPosition truncatedAt = SystemKeyspace.getTruncatedPosition(cfs.metadata.id);
if (truncatedAt != null) {
// Point in time restore is taken to mean that the tables need to be replayed even if they were
// deleted at a later point in time. Any truncation record after that point must thus be cleared prior
// to replay (CASSANDRA-9195).
long restoreTime = commitLog.archiver.restorePointInTime;
long truncatedTime = SystemKeyspace.getTruncatedAt(cfs.metadata.id);
if (truncatedTime > restoreTime) {
if (replayFilter.includes(cfs.metadata)) {
logger.info("Restore point in time is before latest truncation of table {}.{}. Clearing truncation record.", cfs.metadata.keyspace, cfs.metadata.name);
SystemKeyspace.removeTruncationRecord(cfs.metadata.id);
truncatedAt = null;
}
}
}
IntervalSet<CommitLogPosition> filter = persistedIntervals(cfs.getLiveSSTables(), truncatedAt);
cfPersisted.put(cfs.metadata.id, filter);
}
CommitLogPosition globalPosition = firstNotCovered(cfPersisted.values());
logger.debug("Global replay position is {} from columnfamilies {}", globalPosition, FBUtilities.toString(cfPersisted));
return new CommitLogReplayer(commitLog, globalPosition, cfPersisted, replayFilter);
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogSegment method dirtyString.
// For debugging, not fast
public String dirtyString() {
StringBuilder sb = new StringBuilder();
for (TableId tableId : getDirtyTableIds()) {
TableMetadata m = Schema.instance.getTableMetadata(tableId);
sb.append(m == null ? "<deleted>" : m.name).append(" (").append(tableId).append(", dirty: ").append(tableDirty.get(tableId)).append(", clean: ").append(tableClean.get(tableId)).append("), ");
}
return sb.toString();
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogSegment method getDirtyTableIds.
/**
* @return a collection of dirty CFIDs for this segment file.
*/
public synchronized Collection<TableId> getDirtyTableIds() {
if (tableClean.isEmpty() || tableDirty.isEmpty())
return tableDirty.keySet();
List<TableId> r = new ArrayList<>(tableDirty.size());
for (Map.Entry<TableId, IntegerInterval> dirty : tableDirty.entrySet()) {
TableId tableId = dirty.getKey();
IntegerInterval dirtyInterval = dirty.getValue();
IntegerInterval.Set cleanSet = tableClean.get(tableId);
if (cleanSet == null || !cleanSet.covers(dirtyInterval))
r.add(dirty.getKey());
}
return r;
}
Aggregations