Search in sources :

Example 36 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class ViewTest method testCompaction.

@Test
public void testCompaction() {
    ColumnFamilyStore cfs = MockSchema.newCFS();
    View initialView = fakeView(0, 5, cfs, true);
    View cur = initialView;
    List<SSTableReader> readers = ImmutableList.copyOf(initialView.sstables);
    Assert.assertTrue(View.permitCompacting(readers).apply(cur));
    // check we permit compacting duplicates in the predicate, so we don't spin infinitely if there is a screw up
    Assert.assertTrue(View.permitCompacting(ImmutableList.copyOf(concat(readers, readers))).apply(cur));
    // check we fail in the application in the presence of duplicates
    testFailure(View.updateCompacting(emptySet(), concat(readers.subList(0, 1), readers.subList(0, 1))), cur);
    // do lots of trivial checks that the compacting set and related methods behave properly for a simple update
    cur = View.updateCompacting(emptySet(), readers.subList(0, 2)).apply(cur);
    Assert.assertTrue(View.permitCompacting(readers.subList(2, 5)).apply(cur));
    Assert.assertFalse(View.permitCompacting(readers.subList(0, 2)).apply(cur));
    Assert.assertFalse(View.permitCompacting(readers.subList(0, 1)).apply(cur));
    Assert.assertFalse(View.permitCompacting(readers.subList(1, 2)).apply(cur));
    Assert.assertTrue(readers.subList(2, 5).containsAll(copyOf(cur.getUncompacting(readers))));
    Assert.assertEquals(3, copyOf(cur.getUncompacting(readers)).size());
    Assert.assertTrue(ImmutableSet.copyOf(cur.select(SSTableSet.NONCOMPACTING)).containsAll(readers.subList(2, 5)));
    Assert.assertEquals(3, ImmutableSet.copyOf(cur.select(SSTableSet.NONCOMPACTING)).size());
    // check marking already compacting readers fails with an exception
    testFailure(View.updateCompacting(emptySet(), readers.subList(0, 1)), cur);
    testFailure(View.updateCompacting(emptySet(), readers.subList(1, 2)), cur);
    testFailure(View.updateCompacting(copyOf(readers.subList(0, 1)), readers.subList(1, 2)), cur);
    // make equivalents of readers.subList(0, 3) that are different instances
    SSTableReader r0 = MockSchema.sstable(0, cfs), r1 = MockSchema.sstable(1, cfs), r2 = MockSchema.sstable(2, cfs);
    // attempt to mark compacting a version not in the live set
    testFailure(View.updateCompacting(emptySet(), of(r2)), cur);
    // update one compacting, one non-compacting, of the liveset to another instance of the same readers;
    // confirm liveset changes but compacting does not
    cur = View.updateLiveSet(copyOf(readers.subList(1, 3)), of(r1, r2)).apply(cur);
    Assert.assertSame(readers.get(0), cur.sstablesMap.get(r0));
    Assert.assertSame(r1, cur.sstablesMap.get(r1));
    Assert.assertSame(r2, cur.sstablesMap.get(r2));
    testFailure(View.updateCompacting(emptySet(), readers.subList(2, 3)), cur);
    Assert.assertSame(readers.get(1), Iterables.getFirst(Iterables.filter(cur.compacting, Predicates.equalTo(r1)), null));
    // unmark compacting, and check our methods are all correctly updated
    cur = View.updateCompacting(copyOf(readers.subList(0, 1)), emptySet()).apply(cur);
    Assert.assertTrue(View.permitCompacting(concat(readers.subList(0, 1), of(r2), readers.subList(3, 5))).apply(cur));
    Assert.assertFalse(View.permitCompacting(readers.subList(1, 2)).apply(cur));
    testFailure(View.updateCompacting(emptySet(), readers.subList(1, 2)), cur);
    testFailure(View.updateCompacting(copyOf(readers.subList(0, 2)), emptySet()), cur);
    Assert.assertTrue(copyOf(concat(readers.subList(0, 1), readers.subList(2, 5))).containsAll(copyOf(cur.getUncompacting(readers))));
    Assert.assertEquals(4, copyOf(cur.getUncompacting(readers)).size());
    Set<SSTableReader> nonCompacting = ImmutableSet.copyOf(cur.select(SSTableSet.NONCOMPACTING));
    Assert.assertTrue(nonCompacting.containsAll(readers.subList(2, 5)));
    Assert.assertTrue(nonCompacting.containsAll(readers.subList(0, 1)));
    Assert.assertEquals(4, nonCompacting.size());
    for (SSTableReader sstable : initialView.sstables) sstable.selfRef().release();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 37 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class DateTieredCompactionStrategyTest method testPrepBucket.

@Test
public void testPrepBucket() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 3 sstables
    int numSSTables = 3;
    for (int r = 0; r < numSSTables; r++) {
        DecoratedKey key = Util.dk(String.valueOf(r));
        new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    cfs.forceBlockingFlush();
    List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
    List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
    assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
    newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
    assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
    cfs.truncateBlocking();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 38 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class DateTieredCompactionStrategyTest method testDropExpiredSSTables.

@Test
public void testDropExpiredSSTables() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 2 sstables
    DecoratedKey key = Util.dk(String.valueOf("expired"));
    new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), 1, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
    cfs.forceBlockingFlush();
    SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
    Thread.sleep(10);
    key = Util.dk(String.valueOf("nonexpired"));
    new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
    cfs.forceBlockingFlush();
    assertEquals(cfs.getLiveSSTables().size(), 2);
    Map<String, String> options = new HashMap<>();
    options.put(DateTieredCompactionStrategyOptions.BASE_TIME_KEY, "30");
    options.put(DateTieredCompactionStrategyOptions.TIMESTAMP_RESOLUTION_KEY, "MILLISECONDS");
    options.put(DateTieredCompactionStrategyOptions.MAX_SSTABLE_AGE_KEY, Double.toString((1d / (24 * 60 * 60))));
    options.put(DateTieredCompactionStrategyOptions.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, "0");
    DateTieredCompactionStrategy dtcs = new DateTieredCompactionStrategy(cfs, options);
    for (SSTableReader sstable : cfs.getLiveSSTables()) dtcs.addSSTable(sstable);
    dtcs.startup();
    assertNull(dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000)));
    Thread.sleep(2000);
    AbstractCompactionTask t = dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000));
    assertNotNull(t);
    assertEquals(1, Iterables.size(t.transaction.originals()));
    SSTableReader sstable = t.transaction.originals().iterator().next();
    assertEquals(sstable, expiredSSTable);
    t.transaction.abort();
    cfs.truncateBlocking();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 39 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class LeveledCompactionStrategyTest method testCompactionProgress.

@Test
public void testCompactionProgress() throws Exception {
    // make sure we have SSTables in L1
    byte[] b = new byte[100 * 1024];
    new Random().nextBytes(b);
    ByteBuffer value = ByteBuffer.wrap(b);
    int rows = 2;
    int columns = 10;
    for (int r = 0; r < rows; r++) {
        UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
        for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
        update.applyUnsafe();
        cfs.forceBlockingFlush();
    }
    waitForLeveling(cfs);
    LeveledCompactionStrategy strategy = (LeveledCompactionStrategy) cfs.getCompactionStrategyManager().getStrategies().get(1).get(0);
    assert strategy.getLevelSize(1) > 0;
    // get LeveledScanner for level 1 sstables
    Collection<SSTableReader> sstables = strategy.manifest.getLevel(1);
    List<ISSTableScanner> scanners = strategy.getScanners(sstables).scanners;
    // should be one per level
    assertEquals(1, scanners.size());
    ISSTableScanner scanner = scanners.get(0);
    // scan through to the end
    while (scanner.hasNext()) scanner.next();
    // scanner.getCurrentPosition should be equal to total bytes of L1 sstables
    assertEquals(scanner.getCurrentPosition(), SSTableReader.getTotalUncompressedBytes(sstables));
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Random(java.util.Random) UpdateBuilder(org.apache.cassandra.UpdateBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 40 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class LeveledCompactionStrategyTest method testNewRepairedSSTable.

@Test
public void testNewRepairedSSTable() throws Exception {
    byte[] b = new byte[100 * 1024];
    new Random().nextBytes(b);
    // 100 KB value, make it easy to have multiple files
    ByteBuffer value = ByteBuffer.wrap(b);
    // Enough data to have a level 1 and 2
    int rows = 40;
    int columns = 20;
    // Adds enough data to trigger multiple sstable per level
    for (int r = 0; r < rows; r++) {
        UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
        for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
        update.applyUnsafe();
        cfs.forceBlockingFlush();
    }
    waitForLeveling(cfs);
    cfs.disableAutoCompaction();
    while (CompactionManager.instance.isCompacting(Arrays.asList(cfs))) Thread.sleep(100);
    CompactionStrategyManager manager = cfs.getCompactionStrategyManager();
    List<List<AbstractCompactionStrategy>> strategies = manager.getStrategies();
    LeveledCompactionStrategy repaired = (LeveledCompactionStrategy) strategies.get(0).get(0);
    LeveledCompactionStrategy unrepaired = (LeveledCompactionStrategy) strategies.get(1).get(0);
    assertEquals(0, repaired.manifest.getLevelCount());
    assertEquals(2, unrepaired.manifest.getLevelCount());
    assertTrue(manager.getSSTableCountPerLevel()[1] > 0);
    assertTrue(manager.getSSTableCountPerLevel()[2] > 0);
    for (SSTableReader sstable : cfs.getLiveSSTables()) assertFalse(sstable.isRepaired());
    int sstableCount = 0;
    for (List<SSTableReader> level : unrepaired.manifest.generations) sstableCount += level.size();
    // we only have unrepaired sstables:
    assertEquals(sstableCount, cfs.getLiveSSTables().size());
    SSTableReader sstable1 = unrepaired.manifest.generations[2].get(0);
    SSTableReader sstable2 = unrepaired.manifest.generations[1].get(0);
    sstable1.descriptor.getMetadataSerializer().mutateRepaired(sstable1.descriptor, System.currentTimeMillis(), null);
    sstable1.reloadSSTableMetadata();
    assertTrue(sstable1.isRepaired());
    manager.handleNotification(new SSTableRepairStatusChanged(Arrays.asList(sstable1)), this);
    int repairedSSTableCount = 0;
    for (List<SSTableReader> level : repaired.manifest.generations) repairedSSTableCount += level.size();
    assertEquals(1, repairedSSTableCount);
    // make sure the repaired sstable ends up in the same level in the repaired manifest:
    assertTrue(repaired.manifest.generations[2].contains(sstable1));
    // and that it is gone from unrepaired
    assertFalse(unrepaired.manifest.generations[2].contains(sstable1));
    unrepaired.removeSSTable(sstable2);
    manager.handleNotification(new SSTableAddedNotification(singleton(sstable2)), this);
    assertTrue(unrepaired.manifest.getLevel(1).contains(sstable2));
    assertFalse(repaired.manifest.getLevel(1).contains(sstable2));
}
Also used : UpdateBuilder(org.apache.cassandra.UpdateBuilder) SSTableAddedNotification(org.apache.cassandra.notifications.SSTableAddedNotification) ByteBuffer(java.nio.ByteBuffer) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SSTableRepairStatusChanged(org.apache.cassandra.notifications.SSTableRepairStatusChanged) Random(java.util.Random) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Aggregations

SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)289 Test (org.junit.Test)159 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)91 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)55 Keyspace (org.apache.cassandra.db.Keyspace)49 File (java.io.File)45 UUID (java.util.UUID)28 Range (org.apache.cassandra.dht.Range)28 Directories (org.apache.cassandra.db.Directories)27 Token (org.apache.cassandra.dht.Token)24 RandomAccessFile (java.io.RandomAccessFile)22 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 ArrayList (java.util.ArrayList)18 ByteBuffer (java.nio.ByteBuffer)17 HashSet (java.util.HashSet)16 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)16 DecoratedKey (org.apache.cassandra.db.DecoratedKey)16 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)16 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)13