Search in sources :

Example 41 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class ScrubTest method testScrubCorruptedCounterRow.

@Test
public void testScrubCorruptedCounterRow() throws IOException, WriteTimeoutException {
    // When compression is enabled, for testing corrupted chunks we need enough partitions to cover
    // at least 3 chunks of size COMPRESSION_CHUNK_LENGTH
    int numPartitions = 1000;
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
    cfs.clearUnsafe();
    fillCounterCF(cfs, numPartitions);
    assertOrderedAll(cfs, numPartitions);
    assertEquals(1, cfs.getLiveSSTables().size());
    SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
    //make sure to override at most 1 chunk when compression is enabled
    overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
    // with skipCorrupted == false, the scrub is expected to fail
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
        Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
        scrubber.scrub();
        fail("Expected a CorruptSSTableException to be thrown");
    } catch (IOError err) {
    }
    // with skipCorrupted == true, the corrupt rows will be skipped
    Scrubber.ScrubResult scrubResult;
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
        Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
        scrubResult = scrubber.scrubWithResult();
    }
    assertNotNull(scrubResult);
    boolean compression = Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false"));
    if (compression) {
        assertEquals(0, scrubResult.emptyRows);
        assertEquals(numPartitions, scrubResult.badRows + scrubResult.goodRows);
        //because we only corrupted 1 chunk and we chose enough partitions to cover at least 3 chunks
        assertTrue(scrubResult.goodRows >= scrubResult.badRows * 2);
    } else {
        assertEquals(0, scrubResult.emptyRows);
        assertEquals(1, scrubResult.badRows);
        assertEquals(numPartitions - 1, scrubResult.goodRows);
    }
    assertEquals(1, cfs.getLiveSSTables().size());
    assertOrderedAll(cfs, scrubResult.goodRows);
}
Also used : Scrubber(org.apache.cassandra.db.compaction.Scrubber) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SchemaLoader.createKeyspace(org.apache.cassandra.SchemaLoader.createKeyspace) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 42 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class ScrubTest method testScrubCorruptedRowInSmallFile.

@Test
public void testScrubCorruptedRowInSmallFile() throws IOException, WriteTimeoutException {
    // cannot test this with compression
    assumeTrue(!Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false")));
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
    cfs.clearUnsafe();
    fillCounterCF(cfs, 2);
    assertOrderedAll(cfs, 2);
    SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
    // overwrite one row with garbage
    overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
    // with skipCorrupted == false, the scrub is expected to fail
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
        Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
        // with skipCorrupted == true, the corrupt row will be skipped
        scrubber.scrub();
        fail("Expected a CorruptSSTableException to be thrown");
    } catch (IOError err) {
    }
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
        Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
        // with skipCorrupted == true, the corrupt row will be skipped
        scrubber.scrub();
        scrubber.close();
    }
    assertEquals(1, cfs.getLiveSSTables().size());
    // verify that we can read all of the rows, and there is now one less row
    assertOrderedAll(cfs, 1);
}
Also used : Scrubber(org.apache.cassandra.db.compaction.Scrubber) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SchemaLoader.createKeyspace(org.apache.cassandra.SchemaLoader.createKeyspace) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 43 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class CompactionAwareWriterTest method testMajorLeveledCompactionWriter.

@Test
public void testMajorLeveledCompactionWriter() throws Throwable {
    ColumnFamilyStore cfs = getColumnFamilyStore();
    cfs.disableAutoCompaction();
    int rowCount = 20000;
    int targetSSTableCount = 50;
    populate(rowCount);
    LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
    long beforeSize = txn.originals().iterator().next().onDiskLength();
    int sstableSize = (int) beforeSize / targetSSTableCount;
    CompactionAwareWriter writer = new MajorLeveledCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), sstableSize);
    int rows = compact(cfs, txn, writer);
    assertEquals(targetSSTableCount, cfs.getLiveSSTables().size());
    int[] levelCounts = new int[5];
    assertEquals(rowCount, rows);
    for (SSTableReader sstable : cfs.getLiveSSTables()) {
        levelCounts[sstable.getSSTableLevel()]++;
    }
    assertEquals(0, levelCounts[0]);
    assertEquals(10, levelCounts[1]);
    // note that if we want more levels, fix this
    assertEquals(targetSSTableCount - 10, levelCounts[2]);
    for (int i = 3; i < levelCounts.length; i++) assertEquals(0, levelCounts[i]);
    validateData(cfs, rowCount);
    cfs.truncateBlocking();
}
Also used : CompactionAwareWriter(org.apache.cassandra.db.compaction.writers.CompactionAwareWriter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) MajorLeveledCompactionWriter(org.apache.cassandra.db.compaction.writers.MajorLeveledCompactionWriter)

Example 44 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class CompactionAwareWriterTest method testMaxSSTableSizeWriter.

@Test
public void testMaxSSTableSizeWriter() throws Throwable {
    ColumnFamilyStore cfs = getColumnFamilyStore();
    cfs.disableAutoCompaction();
    int rowCount = 1000;
    populate(rowCount);
    LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
    long beforeSize = txn.originals().iterator().next().onDiskLength();
    int sstableSize = (int) beforeSize / 10;
    CompactionAwareWriter writer = new MaxSSTableSizeWriter(cfs, cfs.getDirectories(), txn, txn.originals(), sstableSize, 0);
    int rows = compact(cfs, txn, writer);
    assertEquals(10, cfs.getLiveSSTables().size());
    assertEquals(rowCount, rows);
    validateData(cfs, rowCount);
    cfs.truncateBlocking();
}
Also used : CompactionAwareWriter(org.apache.cassandra.db.compaction.writers.CompactionAwareWriter) MaxSSTableSizeWriter(org.apache.cassandra.db.compaction.writers.MaxSSTableSizeWriter) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction)

Example 45 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class AntiCompactionTest method antiCompactionSizeTest.

@Ignore
@Test
public void antiCompactionSizeTest() throws InterruptedException, IOException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    cfs.disableAutoCompaction();
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Range<Token> range = new Range<Token>(new BytesToken(ByteBufferUtil.bytes(0)), new BytesToken(ByteBufferUtil.bytes(500)));
    Collection<SSTableReader> sstables = cfs.getLiveSSTables();
    UUID parentRepairSession = UUID.randomUUID();
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
        Refs<SSTableReader> refs = Refs.ref(sstables)) {
        CompactionManager.instance.performAnticompaction(cfs, Arrays.asList(range), refs, txn, 12345, NO_PENDING_REPAIR, parentRepairSession);
    }
    long sum = 0;
    long rows = 0;
    for (SSTableReader x : cfs.getLiveSSTables()) {
        sum += x.bytesOnDisk();
        rows += x.getTotalRows();
    }
    assertEquals(sum, cfs.metric.liveDiskSpaceUsed.getCount());
    //See writeFile for how this number is derived
    assertEquals(rows, 1000 * (1000 * 5));
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) UUID(java.util.UUID) Ignore(org.junit.Ignore) Test(org.junit.Test)

Aggregations

LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)60 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)53 Test (org.junit.Test)28 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)24 Keyspace (org.apache.cassandra.db.Keyspace)23 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 File (java.io.File)10 Range (org.apache.cassandra.dht.Range)7 UUID (java.util.UUID)5 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)5 Token (org.apache.cassandra.dht.Token)5 IOException (java.io.IOException)4 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)4 CompactionAwareWriter (org.apache.cassandra.db.compaction.writers.CompactionAwareWriter)4 SSTableWriter (org.apache.cassandra.io.sstable.format.SSTableWriter)4 RestorableMeter (org.apache.cassandra.metrics.RestorableMeter)4 ByteBuffer (java.nio.ByteBuffer)3 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)3 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)3