Search in sources :

Example 96 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class NeverPurgeTest method testHelper.

private void testHelper(String deletionStatement) throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c text, PRIMARY KEY (a, b)) WITH gc_grace_seconds = 0");
    ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
    execute("INSERT INTO %s (a, b, c) VALUES (1, 2, '3')");
    execute(deletionStatement);
    Thread.sleep(1000);
    cfs.forceBlockingFlush();
    cfs.forceMajorCompaction();
    verifyContainsTombstones(cfs.getLiveSSTables(), 1);
}
Also used : ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore)

Example 97 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class NeverPurgeTest method minorNeverPurgeTombstonesTest.

@Test
public void minorNeverPurgeTombstonesTest() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c text, PRIMARY KEY (a, b)) WITH gc_grace_seconds = 0");
    ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
    cfs.disableAutoCompaction();
    for (int i = 0; i < 4; i++) {
        for (int j = 0; j < 1000; j++) {
            execute("INSERT INTO %s (a, b, c) VALUES (" + j + ", 2, '3')");
        }
        cfs.forceBlockingFlush();
    }
    execute("UPDATE %s SET c = null WHERE a=1 AND b=2");
    execute("DELETE FROM %s WHERE a=2 AND b=2");
    execute("DELETE FROM %s WHERE a=3");
    cfs.forceBlockingFlush();
    cfs.enableAutoCompaction();
    while (cfs.getLiveSSTables().size() > 1) Thread.sleep(100);
    verifyContainsTombstones(cfs.getLiveSSTables(), 3);
}
Also used : ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 98 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class OneCompactionTest method testCompaction.

private void testCompaction(String columnFamilyName, int insertsPerTable) {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(columnFamilyName);
    Set<String> inserted = new HashSet<>();
    for (int j = 0; j < insertsPerTable; j++) {
        String key = String.valueOf(j);
        new RowUpdateBuilder(store.metadata(), j, key).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
        inserted.add(key);
        store.forceBlockingFlush();
        assertEquals(inserted.size(), Util.getAll(Util.cmd(store).build()).size());
    }
    CompactionManager.instance.performMaximal(store, false);
    assertEquals(1, store.getLiveSSTables().size());
}
Also used : RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) HashSet(java.util.HashSet)

Example 99 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class SnapshotDeletingTest method testCompactionHook.

@Test
public void testCompactionHook() throws Exception {
    Assume.assumeTrue(FBUtilities.isWindows);
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
    store.clearUnsafe();
    populate(10000);
    store.snapshot("snapshot1");
    // Confirm snapshot deletion fails. Sleep for a bit just to make sure the SnapshotDeletingTask has
    // time to run and fail.
    Thread.sleep(500);
    store.clearSnapshot("snapshot1");
    assertEquals(1, SnapshotDeletingTask.pendingDeletionCount());
    // Compact the cf and confirm that the executor's after hook calls rescheduleDeletion
    populate(20000);
    store.forceBlockingFlush();
    store.forceMajorCompaction();
    long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < 1000 && SnapshotDeletingTask.pendingDeletionCount() > 0) {
        Thread.yield();
    }
    assertEquals(0, SnapshotDeletingTask.pendingDeletionCount());
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 100 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class TimeWindowCompactionStrategyTest method testPrepBucket.

@Test
public void testPrepBucket() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    Long tstamp = System.currentTimeMillis();
    Long tstamp2 = tstamp - (2L * 3600L * 1000L);
    // create 5 sstables
    for (int r = 0; r < 3; r++) {
        DecoratedKey key = Util.dk(String.valueOf(r));
        new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    // Decrement the timestamp to simulate a timestamp in the past hour
    for (int r = 3; r < 5; r++) {
        // And add progressively more cells into each sstable
        DecoratedKey key = Util.dk(String.valueOf(r));
        new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    cfs.forceBlockingFlush();
    HashMultimap<Long, SSTableReader> buckets = HashMultimap.create();
    List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
    // We'll put 3 sstables into the newest bucket
    for (int i = 0; i < 3; i++) {
        Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, tstamp);
        buckets.put(bounds.left, sstrs.get(i));
    }
    List<SSTableReader> newBucket = newestBucket(buckets, 4, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
    assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
    newBucket = newestBucket(buckets, 2, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
    assertTrue("incoming bucket should be accepted when it is larger than the min threshold SSTables", !newBucket.isEmpty());
    // And 2 into the second bucket (1 hour back)
    for (int i = 3; i < 5; i++) {
        Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, tstamp2);
        buckets.put(bounds.left, sstrs.get(i));
    }
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
    // Test trim
    int numSSTables = 40;
    for (int r = 5; r < numSSTables; r++) {
        DecoratedKey key = Util.dk(String.valueOf(r));
        for (int i = 0; i < r; i++) {
            new RowUpdateBuilder(cfs.metadata(), tstamp + r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        }
        cfs.forceBlockingFlush();
    }
    // Reset the buckets, overfill it now
    sstrs = new ArrayList<>(cfs.getLiveSSTables());
    for (int i = 0; i < 40; i++) {
        Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, sstrs.get(i).getMaxTimestamp());
        buckets.put(bounds.left, sstrs.get(i));
    }
    newBucket = newestBucket(buckets, 4, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
    assertEquals("new bucket should be trimmed to max threshold of 32", newBucket.size(), 32);
}
Also used : DecoratedKey(org.apache.cassandra.db.DecoratedKey) ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Aggregations

ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)175 Test (org.junit.Test)110 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)90 Keyspace (org.apache.cassandra.db.Keyspace)64 File (java.io.File)30 Directories (org.apache.cassandra.db.Directories)25 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)24 DecoratedKey (org.apache.cassandra.db.DecoratedKey)22 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)20 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 RandomAccessFile (java.io.RandomAccessFile)19 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 ArrayList (java.util.ArrayList)13 ByteBuffer (java.nio.ByteBuffer)12 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 Range (org.apache.cassandra.dht.Range)11 Table (org.apache.cassandra.db.Table)9 Token (org.apache.cassandra.dht.Token)9 Descriptor (org.apache.cassandra.io.sstable.Descriptor)9 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)9