Search in sources :

Example 46 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class SizeTieredCompactionStrategyTest method testPrepBucket.

@Test
public void testPrepBucket() throws Exception {
    String ksname = KEYSPACE1;
    String cfname = "Standard1";
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 3 sstables
    int numSSTables = 3;
    for (int r = 0; r < numSSTables; r++) {
        String key = String.valueOf(r);
        new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    cfs.forceBlockingFlush();
    List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
    Pair<List<SSTableReader>, Double> bucket;
    List<SSTableReader> interestingBucket = mostInterestingBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32);
    assertTrue("nothing should be returned when all buckets are below the min threshold", interestingBucket.isEmpty());
    sstrs.get(0).overrideReadMeter(new RestorableMeter(100.0, 100.0));
    sstrs.get(1).overrideReadMeter(new RestorableMeter(200.0, 200.0));
    sstrs.get(2).overrideReadMeter(new RestorableMeter(300.0, 300.0));
    long estimatedKeys = sstrs.get(0).estimatedKeys();
    // if we have more than the max threshold, the coldest should be dropped
    bucket = trimToThresholdWithHotness(sstrs, 2);
    assertEquals("one bucket should have been dropped", 2, bucket.left.size());
    double expectedBucketHotness = (200.0 + 300.0) / estimatedKeys;
    assertEquals(String.format("bucket hotness (%f) should be close to %f", bucket.right, expectedBucketHotness), expectedBucketHotness, bucket.right, 1.0);
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) RestorableMeter(org.apache.cassandra.metrics.RestorableMeter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 47 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class KeyCollisionTest method testGetSliceWithCollision.

@Test
public void testGetSliceWithCollision() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    cfs.clearUnsafe();
    // token = 2, kq ordered after row below lexicographically
    insert("k1", "k2", "kq");
    // token = 4
    insert("key1", "key2", "key3");
    // token = 8
    insert("longKey1", "longKey2");
    List<FilteredPartition> partitions = Util.getAll(Util.cmd(cfs).fromKeyIncl("k2").toKeyIncl("key2").build());
    assert partitions.get(0).partitionKey().getKey().equals(ByteBufferUtil.bytes("k2"));
    assert partitions.get(1).partitionKey().getKey().equals(ByteBufferUtil.bytes("kq"));
    assert partitions.get(2).partitionKey().getKey().equals(ByteBufferUtil.bytes("key1"));
    assert partitions.get(3).partitionKey().getKey().equals(ByteBufferUtil.bytes("key2"));
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 48 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class SSTableWriterTestBase method truncateCF.

@After
public void truncateCF() {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
    store.truncateBlocking();
    LifecycleTransaction.waitForDeletions();
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) After(org.junit.After)

Example 49 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class PerSSTableIndexWriterTest method testSparse.

@Test
public void testSparse() throws Exception {
    final String columnName = "timestamp";
    ColumnFamilyStore cfs = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME);
    ColumnMetadata column = cfs.metadata().getColumn(UTF8Type.instance.decompose(columnName));
    SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName(cfs.name + "_" + columnName);
    File directory = cfs.getDirectories().getDirectoryForNewSSTables();
    Descriptor descriptor = cfs.newSSTableDescriptor(directory);
    PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
    final long now = System.currentTimeMillis();
    indexWriter.begin();
    indexWriter.indexes.put(column, indexWriter.newIndex(sasi.getIndex()));
    populateSegment(cfs.metadata(), indexWriter.getIndex(column), new HashMap<Long, Set<Integer>>() {

        {
            put(now, new HashSet<>(Arrays.asList(0, 1)));
            put(now + 1, new HashSet<>(Arrays.asList(2, 3)));
            put(now + 2, new HashSet<>(Arrays.asList(4, 5, 6, 7, 8, 9)));
        }
    });
    Callable<OnDiskIndex> segmentBuilder = indexWriter.getIndex(column).scheduleSegmentFlush(false);
    Assert.assertNull(segmentBuilder.call());
    PerSSTableIndexWriter.Index index = indexWriter.getIndex(column);
    Random random = ThreadLocalRandom.current();
    Set<String> segments = new HashSet<>();
    // now let's test multiple correct segments with yield incorrect final segment
    for (int i = 0; i < 3; i++) {
        populateSegment(cfs.metadata(), index, new HashMap<Long, Set<Integer>>() {

            {
                put(now, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
                put(now + 1, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
                put(now + 2, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
            }
        });
        try {
            // flush each of the new segments, they should all succeed
            OnDiskIndex segment = index.scheduleSegmentFlush(false).call();
            index.segments.add(Futures.immediateFuture(segment));
            segments.add(segment.getIndexPath());
        } catch (Exception | FSError e) {
            e.printStackTrace();
            Assert.fail();
        }
    }
    // make sure that all of the segments are present of the filesystem
    for (String segment : segments) Assert.assertTrue(new File(segment).exists());
    indexWriter.complete();
    // make sure that individual segments have been cleaned up
    for (String segment : segments) Assert.assertFalse(new File(segment).exists());
    // and combined index doesn't exist either
    Assert.assertFalse(new File(index.outputFile).exists());
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) FSError(org.apache.cassandra.io.FSError) SASIIndex(org.apache.cassandra.index.sasi.SASIIndex) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Descriptor(org.apache.cassandra.io.sstable.Descriptor) File(java.io.File) Test(org.junit.Test)

Example 50 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class LegacySSTableTest method loadLegacyTable.

private static void loadLegacyTable(String tablePattern, String legacyVersion, String compactSuffix) throws IOException {
    String table = String.format(tablePattern, legacyVersion, compactSuffix);
    logger.info("Loading legacy table {}", table);
    ColumnFamilyStore cfs = Keyspace.open("legacy_tables").getColumnFamilyStore(table);
    for (File cfDir : cfs.getDirectories().getCFDirectories()) {
        copySstablesToTestData(legacyVersion, table, cfDir);
    }
    cfs.loadNewSSTables();
}
Also used : ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) File(java.io.File)

Aggregations

ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)175 Test (org.junit.Test)110 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)90 Keyspace (org.apache.cassandra.db.Keyspace)64 File (java.io.File)30 Directories (org.apache.cassandra.db.Directories)25 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)24 DecoratedKey (org.apache.cassandra.db.DecoratedKey)22 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)20 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 RandomAccessFile (java.io.RandomAccessFile)19 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 ArrayList (java.util.ArrayList)13 ByteBuffer (java.nio.ByteBuffer)12 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 Range (org.apache.cassandra.dht.Range)11 Table (org.apache.cassandra.db.Table)9 Token (org.apache.cassandra.dht.Token)9 Descriptor (org.apache.cassandra.io.sstable.Descriptor)9 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)9