Search in sources :

Example 6 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class RepairedDataTombstonesTest method compactionDropExpiredSSTableTest.

@Test
public void compactionDropExpiredSSTableTest() throws Throwable {
    createTable("create table %s (id int, id2 int, t text, primary key (id, id2)) with gc_grace_seconds=0 and compaction = {'class':'SizeTieredCompactionStrategy', 'only_purge_repaired_tombstones':true}");
    for (int i = 0; i < 10; i++) {
        execute("delete from %s where id=? and id2=?", 1, i);
    }
    flush();
    SSTableReader repairedSSTable = getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE).iterator().next();
    repair(getCurrentColumnFamilyStore(), repairedSSTable);
    Thread.sleep(2000);
    for (int i = 10; i < 20; i++) {
        execute("delete from %s where id=? and id2=?", 1, i);
    }
    flush();
    Thread.sleep(1000);
    getCurrentColumnFamilyStore().forceMajorCompaction();
    verifyIncludingPurgeable();
    verify2IncludingPurgeable(1);
    assertEquals(1, Iterables.size(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE)));
    assertFalse(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE).iterator().next().isRepaired());
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Test(org.junit.Test)

Example 7 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class RangeTombstoneTest method testTrackTimesPartitionTombstoneWithData.

@Test
public void testTrackTimesPartitionTombstoneWithData() throws ExecutionException, InterruptedException {
    Keyspace ks = Keyspace.open(KSNAME);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
    cfs.truncateBlocking();
    String key = "rt_times";
    UpdateBuilder.create(cfs.metadata(), key).withTimestamp(999).newRow(5).add("val", 5).apply();
    key = "rt_times2";
    int nowInSec = FBUtilities.nowInSeconds();
    new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1000, nowInSec)).apply();
    cfs.forceBlockingFlush();
    SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
    assertTimes(sstable.getSSTableMetadata(), 999, 1000, Integer.MAX_VALUE);
    cfs.forceMajorCompaction();
    sstable = cfs.getLiveSSTables().iterator().next();
    assertTimes(sstable.getSSTableMetadata(), 999, 1000, Integer.MAX_VALUE);
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Test(org.junit.Test)

Example 8 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class RangeTombstoneTest method testTrackTimesRangeTombstoneWithData.

@Test
public void testTrackTimesRangeTombstoneWithData() throws ExecutionException, InterruptedException {
    Keyspace ks = Keyspace.open(KSNAME);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
    cfs.truncateBlocking();
    String key = "rt_times";
    UpdateBuilder.create(cfs.metadata(), key).withTimestamp(999).newRow(5).add("val", 5).apply();
    key = "rt_times2";
    int nowInSec = FBUtilities.nowInSeconds();
    new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1000, nowInSec)).apply();
    cfs.forceBlockingFlush();
    cfs.forceBlockingFlush();
    SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
    assertTimes(sstable.getSSTableMetadata(), 999, 1000, Integer.MAX_VALUE);
    cfs.forceMajorCompaction();
    sstable = cfs.getLiveSSTables().iterator().next();
    assertTimes(sstable.getSSTableMetadata(), 999, 1000, Integer.MAX_VALUE);
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Test(org.junit.Test)

Example 9 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class RangeTombstoneTest method testRangeTombstoneCompaction.

@Test
public void testRangeTombstoneCompaction() throws Exception {
    Keyspace table = Keyspace.open(KSNAME);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(CFNAME);
    ByteBuffer key = ByteBufferUtil.bytes("k4");
    // remove any existing sstables before starting
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
    for (int i = 0; i < 10; i += 2) builder.newRow(i).add("val", i);
    builder.applyUnsafe();
    cfs.forceBlockingFlush();
    new RowUpdateBuilder(cfs.metadata(), 0, key).addRangeTombstone(0, 7).build().applyUnsafe();
    cfs.forceBlockingFlush();
    // there should be 2 sstables
    assertEquals(2, cfs.getLiveSSTables().size());
    // compact down to single sstable
    CompactionManager.instance.performMaximal(cfs, false);
    assertEquals(1, cfs.getLiveSSTables().size());
    // test the physical structure of the sstable i.e. rt & columns on disk
    SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
    try (UnfilteredPartitionIterator scanner = sstable.getScanner()) {
        try (UnfilteredRowIterator iter = scanner.next()) {
            // after compaction, we should have a single RT with a single row (the row 8)
            Unfiltered u1 = iter.next();
            assertTrue("Expecting open marker, got " + u1.toString(cfs.metadata()), u1 instanceof RangeTombstoneMarker);
            Unfiltered u2 = iter.next();
            assertTrue("Expecting close marker, got " + u2.toString(cfs.metadata()), u2 instanceof RangeTombstoneMarker);
            Unfiltered u3 = iter.next();
            assertTrue("Expecting row, got " + u3.toString(cfs.metadata()), u3 instanceof Row);
        }
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) UpdateBuilder(org.apache.cassandra.UpdateBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 10 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class CrcCheckChanceTest method testChangingCrcCheckChance.

public void testChangingCrcCheckChance(boolean newFormat) throws Throwable {
    //Start with crc_check_chance of 99%
    if (newFormat)
        createTable("CREATE TABLE %s (p text, c text, v text, s text static, PRIMARY KEY (p, c)) WITH compression = {'sstable_compression': 'LZ4Compressor'} AND crc_check_chance = 0.99;");
    else
        createTable("CREATE TABLE %s (p text, c text, v text, s text static, PRIMARY KEY (p, c)) WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance' : 0.99}");
    execute("CREATE INDEX foo ON %s(v)");
    execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
    execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
    execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
    ColumnFamilyStore cfs = Keyspace.open(CQLTester.KEYSPACE).getColumnFamilyStore(currentTable());
    ColumnFamilyStore indexCfs = cfs.indexManager.getAllIndexColumnFamilyStores().iterator().next();
    cfs.forceBlockingFlush();
    Assert.assertEquals(0.99, cfs.getCrcCheckChance());
    Assert.assertEquals(0.99, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    Assert.assertEquals(0.99, indexCfs.getCrcCheckChance());
    Assert.assertEquals(0.99, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    //Test for stack overflow
    if (newFormat)
        alterTable("ALTER TABLE %s WITH crc_check_chance = 0.99");
    else
        alterTable("ALTER TABLE %s WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance': 0.99}");
    assertRows(execute("SELECT * FROM %s WHERE p=?", "p1"), row("p1", "k1", "sv1", "v1"), row("p1", "k2", "sv1", "v2"));
    assertRows(execute("SELECT * FROM %s WHERE v=?", "v1"), row("p1", "k1", "sv1", "v1"));
    //Write a few SSTables then Compact
    execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
    execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
    execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
    cfs.forceBlockingFlush();
    execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
    execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
    execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
    cfs.forceBlockingFlush();
    execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
    execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
    execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
    cfs.forceBlockingFlush();
    cfs.forceMajorCompaction();
    //Now let's change via JMX
    cfs.setCrcCheckChance(0.01);
    Assert.assertEquals(0.01, cfs.getCrcCheckChance());
    Assert.assertEquals(0.01, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    Assert.assertEquals(0.01, indexCfs.getCrcCheckChance());
    Assert.assertEquals(0.01, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    assertRows(execute("SELECT * FROM %s WHERE p=?", "p1"), row("p1", "k1", "sv1", "v1"), row("p1", "k2", "sv1", "v2"));
    assertRows(execute("SELECT * FROM %s WHERE v=?", "v1"), row("p1", "k1", "sv1", "v1"));
    //Alter again via schema
    if (newFormat)
        alterTable("ALTER TABLE %s WITH crc_check_chance = 0.5");
    else
        alterTable("ALTER TABLE %s WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance': 0.5}");
    //We should be able to get the new value by accessing directly the schema metadata
    Assert.assertEquals(0.5, cfs.metadata().params.crcCheckChance);
    //but previous JMX-set value will persist until next restart
    Assert.assertEquals(0.01, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    Assert.assertEquals(0.01, indexCfs.getCrcCheckChance());
    Assert.assertEquals(0.01, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    //Verify the call used by JMX still works
    cfs.setCrcCheckChance(0.03);
    Assert.assertEquals(0.03, cfs.getCrcCheckChance());
    Assert.assertEquals(0.03, cfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    Assert.assertEquals(0.03, indexCfs.getCrcCheckChance());
    Assert.assertEquals(0.03, indexCfs.getLiveSSTables().iterator().next().getCrcCheckChance());
    // Also check that any open readers also use the updated value
    // note: only compressed files currently perform crc checks, so only the dfile reader is relevant here
    SSTableReader baseSSTable = cfs.getLiveSSTables().iterator().next();
    SSTableReader idxSSTable = indexCfs.getLiveSSTables().iterator().next();
    try (RandomAccessReader baseDataReader = baseSSTable.openDataReader();
        RandomAccessReader idxDataReader = idxSSTable.openDataReader()) {
        Assert.assertEquals(0.03, baseDataReader.getCrcCheckChance());
        Assert.assertEquals(0.03, idxDataReader.getCrcCheckChance());
        cfs.setCrcCheckChance(0.31);
        Assert.assertEquals(0.31, baseDataReader.getCrcCheckChance());
        Assert.assertEquals(0.31, idxDataReader.getCrcCheckChance());
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RandomAccessReader(org.apache.cassandra.io.util.RandomAccessReader) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore)

Aggregations

SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)289 Test (org.junit.Test)159 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)91 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)55 Keyspace (org.apache.cassandra.db.Keyspace)49 File (java.io.File)45 UUID (java.util.UUID)28 Range (org.apache.cassandra.dht.Range)28 Directories (org.apache.cassandra.db.Directories)27 Token (org.apache.cassandra.dht.Token)24 RandomAccessFile (java.io.RandomAccessFile)22 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 ArrayList (java.util.ArrayList)18 ByteBuffer (java.nio.ByteBuffer)17 HashSet (java.util.HashSet)16 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)16 DecoratedKey (org.apache.cassandra.db.DecoratedKey)16 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)16 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)13