Search in sources :

Example 91 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class CommitLogCQLTest method testTruncateSegmentDiscard.

@Test
public void testTruncateSegmentDiscard() throws Throwable {
    String otherTable = createTable("CREATE TABLE %s (idx INT, data TEXT, PRIMARY KEY(idx));");
    createTable("CREATE TABLE %s (idx INT, data TEXT, PRIMARY KEY(idx));");
    execute("INSERT INTO %s (idx, data) VALUES (?, ?)", 15, Integer.toString(15));
    flush();
    // We write something in different table to advance the commit log position. Current table remains clean.
    executeFormattedQuery(String.format("INSERT INTO %s.%s (idx, data) VALUES (?, ?)", keyspace(), otherTable), 16, Integer.toString(16));
    ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
    assert cfs.getTracker().getView().getCurrentMemtable().isClean();
    // Calling switchMemtable directly applies Flush even though memtable is empty. This can happen with some races
    // (flush with recycling by segment manager). It should still tell commitlog that the memtable's region is clean.
    // CASSANDRA-12436
    cfs.switchMemtable();
    execute("INSERT INTO %s (idx, data) VALUES (?, ?)", 15, Integer.toString(17));
    Collection<CommitLogSegment> active = new ArrayList<>(CommitLog.instance.segmentManager.getActiveSegments());
    CommitLog.instance.forceRecycleAllSegments();
    // If one of the previous segments remains, it wasn't clean.
    active.retainAll(CommitLog.instance.segmentManager.getActiveSegments());
    assert active.isEmpty();
}
Also used : ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 92 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class CompactionControllerTest method testGetFullyExpiredSSTables.

@Test
public void testGetFullyExpiredSSTables() {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF2);
    cfs.truncateBlocking();
    DecoratedKey key = Util.dk("k1");
    // latest timestamp
    long timestamp1 = FBUtilities.timestampMicros();
    long timestamp2 = timestamp1 - 5;
    // oldest timestamp
    long timestamp3 = timestamp2 - 5;
    // create sstable with tombstone that should be expired in no older timestamps
    applyDeleteMutation(cfs.metadata(), key, timestamp2);
    cfs.forceBlockingFlush();
    // first sstable with tombstone is compacting
    Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
    // create another sstable with more recent timestamp
    applyMutation(cfs.metadata(), key, timestamp1);
    cfs.forceBlockingFlush();
    // second sstable is overlapping
    Set<SSTableReader> overlapping = Sets.difference(Sets.newHashSet(cfs.getLiveSSTables()), compacting);
    // the first sstable should be expired because the overlapping sstable is newer and the gc period is later
    int gcBefore = (int) (System.currentTimeMillis() / 1000) + 5;
    Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
    assertNotNull(expired);
    assertEquals(1, expired.size());
    assertEquals(compacting.iterator().next(), expired.iterator().next());
    // however if we add an older mutation to the memtable then the sstable should not be expired
    applyMutation(cfs.metadata(), key, timestamp3);
    expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
    assertNotNull(expired);
    assertEquals(0, expired.size());
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 93 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class CompactionControllerTest method testMaxPurgeableTimestamp.

@Test
public void testMaxPurgeableTimestamp() {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF1);
    cfs.truncateBlocking();
    DecoratedKey key = Util.dk("k1");
    // latest timestamp
    long timestamp1 = FBUtilities.timestampMicros();
    long timestamp2 = timestamp1 - 5;
    // oldest timestamp
    long timestamp3 = timestamp2 - 5;
    // add to first memtable
    applyMutation(cfs.metadata(), key, timestamp1);
    // check max purgeable timestamp without any sstables
    try (CompactionController controller = new CompactionController(cfs, null, 0)) {
        //memtable only
        assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp1);
        cfs.forceBlockingFlush();
        //no memtables and no sstables
        assertTrue(controller.getPurgeEvaluator(key).test(Long.MAX_VALUE));
    }
    // first sstable is compacting
    Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
    // create another sstable
    applyMutation(cfs.metadata(), key, timestamp2);
    cfs.forceBlockingFlush();
    // check max purgeable timestamp when compacting the first sstable with and without a memtable
    try (CompactionController controller = new CompactionController(cfs, compacting, 0)) {
        assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp2);
        applyMutation(cfs.metadata(), key, timestamp3);
        //second sstable and second memtable
        assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
    }
    // check max purgeable timestamp again without any sstables but with different insertion orders on the memtable
    cfs.forceBlockingFlush();
    //newest to oldest
    try (CompactionController controller = new CompactionController(cfs, null, 0)) {
        applyMutation(cfs.metadata(), key, timestamp1);
        applyMutation(cfs.metadata(), key, timestamp2);
        applyMutation(cfs.metadata(), key, timestamp3);
        //memtable only
        assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
    }
    cfs.forceBlockingFlush();
    //oldest to newest
    try (CompactionController controller = new CompactionController(cfs, null, 0)) {
        applyMutation(cfs.metadata(), key, timestamp3);
        applyMutation(cfs.metadata(), key, timestamp2);
        applyMutation(cfs.metadata(), key, timestamp1);
        assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 94 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class DateTieredCompactionStrategyTest method testFilterOldSSTables.

@Test
public void testFilterOldSSTables() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 3 sstables
    int numSSTables = 3;
    for (int r = 0; r < numSSTables; r++) {
        DecoratedKey key = Util.dk(String.valueOf(r));
        new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    cfs.forceBlockingFlush();
    Iterable<SSTableReader> filtered;
    List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
    filtered = filterOldSSTables(sstrs, 0, 2);
    assertEquals("when maxSSTableAge is zero, no sstables should be filtered", sstrs.size(), Iterables.size(filtered));
    filtered = filterOldSSTables(sstrs, 1, 2);
    assertEquals("only the newest 2 sstables should remain", 2, Iterables.size(filtered));
    filtered = filterOldSSTables(sstrs, 1, 3);
    assertEquals("only the newest sstable should remain", 1, Iterables.size(filtered));
    filtered = filterOldSSTables(sstrs, 1, 4);
    assertEquals("no sstables should remain when all are too old", 0, Iterables.size(filtered));
    cfs.truncateBlocking();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 95 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class DateTieredCompactionStrategyTest method testSTCSBigWindow.

@Test
public void testSTCSBigWindow() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.disableAutoCompaction();
    ByteBuffer bigValue = ByteBuffer.wrap(new byte[10000]);
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    int numSSTables = 40;
    // create big sstabels out of half:
    long timestamp = System.currentTimeMillis();
    for (int r = 0; r < numSSTables / 2; r++) {
        for (int i = 0; i < 10; i++) {
            DecoratedKey key = Util.dk(String.valueOf(r));
            new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey()).clustering("column").add("val", bigValue).build().applyUnsafe();
        }
        cfs.forceBlockingFlush();
    }
    // and small ones:
    for (int r = 0; r < numSSTables / 2; r++) {
        DecoratedKey key = Util.dk(String.valueOf(r));
        new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    Map<String, String> options = new HashMap<>();
    options.put(SizeTieredCompactionStrategyOptions.MIN_SSTABLE_SIZE_KEY, "1");
    DateTieredCompactionStrategy dtcs = new DateTieredCompactionStrategy(cfs, options);
    for (SSTableReader sstable : cfs.getSSTables(SSTableSet.CANONICAL)) dtcs.addSSTable(sstable);
    AbstractCompactionTask task = dtcs.getNextBackgroundTask(0);
    assertEquals(20, task.transaction.originals().size());
    task.transaction.abort();
    cfs.truncateBlocking();
}
Also used : DecoratedKey(org.apache.cassandra.db.DecoratedKey) ByteBuffer(java.nio.ByteBuffer) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Aggregations

ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)175 Test (org.junit.Test)110 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)90 Keyspace (org.apache.cassandra.db.Keyspace)64 File (java.io.File)30 Directories (org.apache.cassandra.db.Directories)25 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)24 DecoratedKey (org.apache.cassandra.db.DecoratedKey)22 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)20 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 RandomAccessFile (java.io.RandomAccessFile)19 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 ArrayList (java.util.ArrayList)13 ByteBuffer (java.nio.ByteBuffer)12 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 Range (org.apache.cassandra.dht.Range)11 Table (org.apache.cassandra.db.Table)9 Token (org.apache.cassandra.dht.Token)9 Descriptor (org.apache.cassandra.io.sstable.Descriptor)9 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)9