Search in sources :

Example 51 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SSTableRewriterTest method testCanonicalView.

@Test
public void testCanonicalView() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> sstables = Sets.newHashSet(s);
    assertEquals(1, sstables.size());
    boolean checked = false;
    try (ISSTableScanner scanner = sstables.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, sstables, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            writer.append(ci.next());
            if (!checked && writer.currentWriter().getFilePointer() > 15000000) {
                checked = true;
                ColumnFamilyStore.ViewFragment viewFragment = cfs.select(View.selectFunction(SSTableSet.CANONICAL));
                // canonical view should have only one SSTable which is not opened early.
                assertEquals(1, viewFragment.sstables.size());
                SSTableReader sstable = viewFragment.sstables.get(0);
                assertEquals(s.descriptor, sstable.descriptor);
                assertTrue("Found early opened SSTable in canonical view: " + sstable.getFilename(), sstable.openReason != SSTableReader.OpenReason.EARLY);
            }
        }
    }
    truncateCF();
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 52 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SSTableRewriterTest method testNumberOfFiles_truncate.

@Test
public void testNumberOfFiles_truncate() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    cfs.disableAutoCompaction();
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    List<SSTableReader> sstables;
    int files = 1;
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
                files++;
                // we have one original file plus the ones we have switched out.
                assertEquals(cfs.getLiveSSTables().size(), files);
            }
        }
        sstables = rewriter.finish();
    }
    LifecycleTransaction.waitForDeletions();
    assertFileCounts(s.descriptor.directory.list());
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 53 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SSTableMetadataTest method trackMaxMinColNames.

@Test
public void trackMaxMinColNames() throws CharacterCodingException, ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard3");
    for (int j = 0; j < 8; j++) {
        String key = "row" + j;
        for (int i = 100; i < 150; i++) {
            new RowUpdateBuilder(store.metadata(), System.currentTimeMillis(), key).clustering(j + "col" + i).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
        }
    }
    store.forceBlockingFlush();
    assertEquals(1, store.getLiveSSTables().size());
    for (SSTableReader sstable : store.getLiveSSTables()) {
        assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0col100");
        assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "7col149");
    }
    String key = "row2";
    for (int i = 101; i < 299; i++) {
        new RowUpdateBuilder(store.metadata(), System.currentTimeMillis(), key).clustering(9 + "col" + i).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    store.forceBlockingFlush();
    store.forceMajorCompaction();
    assertEquals(1, store.getLiveSSTables().size());
    for (SSTableReader sstable : store.getLiveSSTables()) {
        assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0col100");
        assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "9col298");
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 54 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SSTableRewriterTest method testSmallFiles.

@Test
public void testSmallFiles() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    cfs.disableAutoCompaction();
    SSTableReader s = writeFile(cfs, 400);
    cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    List<SSTableReader> sstables;
    int files = 1;
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 2500000) {
                // all files are now opened early
                assertEquals(files, cfs.getLiveSSTables().size());
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
                files++;
            }
        }
        sstables = rewriter.finish();
    }
    assertEquals(files, sstables.size());
    assertEquals(files, cfs.getLiveSSTables().size());
    LifecycleTransaction.waitForDeletions();
    assertFileCounts(s.descriptor.directory.list());
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 55 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SSTableRewriterTest method testSSTableSplit.

@Test
public void testSSTableSplit() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    cfs.disableAutoCompaction();
    SSTableReader s = writeFile(cfs, 1000);
    try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.UNKNOWN, s)) {
        SSTableSplitter splitter = new SSTableSplitter(cfs, txn, 10);
        splitter.split();
        assertFileCounts(s.descriptor.directory.list());
        LifecycleTransaction.waitForDeletions();
        for (File f : s.descriptor.directory.listFiles()) {
            // we need to clear out the data dir, otherwise tests running after this breaks
            FileUtils.deleteRecursive(f);
        }
    }
    truncate(cfs);
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SSTableSplitter(org.apache.cassandra.db.compaction.SSTableSplitter) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) File(java.io.File) Test(org.junit.Test)

Aggregations

Keyspace (org.apache.cassandra.db.Keyspace)69 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)63 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)48 Test (org.junit.Test)42 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)23 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)17 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 DecoratedKey (org.apache.cassandra.db.DecoratedKey)11 ByteBuffer (java.nio.ByteBuffer)10 File (java.io.File)5 Directories (org.apache.cassandra.db.Directories)5 SystemKeyspace (org.apache.cassandra.db.SystemKeyspace)5 RestorableMeter (org.apache.cassandra.metrics.RestorableMeter)5 ArrayList (java.util.ArrayList)4 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)4 Range (org.apache.cassandra.dht.Range)4 Token (org.apache.cassandra.dht.Token)4 Descriptor (org.apache.cassandra.io.sstable.Descriptor)4 PrintStream (java.io.PrintStream)3