Search in sources :

Example 21 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testNumberOfFiles_dont_clean_readers.

@Test
public void testNumberOfFiles_dont_clean_readers() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    List<SSTableReader> sstables;
    int files = 1;
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
                files++;
                // we have one original file plus the ones we have switched out.
                assertEquals(cfs.getLiveSSTables().size(), files);
            }
        }
        sstables = rewriter.finish();
    }
    assertEquals(files, sstables.size());
    assertEquals(files, cfs.getLiveSSTables().size());
    LifecycleTransaction.waitForDeletions();
    assertFileCounts(s.descriptor.directory.list());
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 22 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testNumberOfFiles_abort.

private void testNumberOfFiles_abort(RewriterTest test) throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    DecoratedKey origFirst = s.first;
    DecoratedKey origLast = s.last;
    long startSize = cfs.metric.liveDiskSpaceUsed.getCount();
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false)) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        test.run(scanner, controller, s, cfs, rewriter, txn);
    }
    LifecycleTransaction.waitForDeletions();
    assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount());
    assertEquals(1, cfs.getLiveSSTables().size());
    assertFileCounts(s.descriptor.directory.list());
    assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst);
    assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast);
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction)

Example 23 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testTwoWriters.

/**
     * emulates anticompaction - writing from one source sstable to two new sstables
     *
     * @throws IOException
     */
@Test
public void testTwoWriters() throws IOException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> sstables = Sets.newHashSet(s);
    assertEquals(1, sstables.size());
    int nowInSec = FBUtilities.nowInSeconds();
    try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = SSTableRewriter.constructWithoutEarlyOpening(txn, false, 1000);
        SSTableRewriter writer2 = SSTableRewriter.constructWithoutEarlyOpening(txn, false, 1000);
        CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        writer2.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            if (writer.currentWriter().getFilePointer() < 15000000)
                writer.append(ci.next());
            else
                writer2.append(ci.next());
        }
        for (int i = 0; i < 5000; i++) assertFalse(Util.getOnlyPartition(Util.cmd(cfs, ByteBufferUtil.bytes(i)).build()).isEmpty());
    }
    truncateCF();
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 24 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testNumberOfFilesAndSizes.

@Test
public void testNumberOfFilesAndSizes() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    long startStorageMetricsLoad = StorageMetrics.load.getCount();
    long sBytesOnDisk = s.bytesOnDisk();
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    List<SSTableReader> sstables;
    int files = 1;
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
                files++;
                // we have one original file plus the ones we have switched out.
                assertEquals(cfs.getLiveSSTables().size(), files);
                assertEquals(s.bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
                assertEquals(s.bytesOnDisk(), cfs.metric.totalDiskSpaceUsed.getCount());
            }
        }
        sstables = rewriter.finish();
    }
    LifecycleTransaction.waitForDeletions();
    long sum = 0;
    for (SSTableReader x : cfs.getLiveSSTables()) sum += x.bytesOnDisk();
    assertEquals(sum, cfs.metric.liveDiskSpaceUsed.getCount());
    assertEquals(startStorageMetricsLoad - sBytesOnDisk + sum, StorageMetrics.load.getCount());
    assertEquals(files, sstables.size());
    assertEquals(files, cfs.getLiveSSTables().size());
    LifecycleTransaction.waitForDeletions();
    // tmplink and tmp files should be gone:
    assertEquals(sum, cfs.metric.totalDiskSpaceUsed.getCount());
    assertFileCounts(s.descriptor.directory.list());
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 25 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method getPositionsTest.

@Test
public void getPositionsTest() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
    assertEquals(1, sstables.size());
    int nowInSec = FBUtilities.nowInSeconds();
    boolean checked = false;
    try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            UnfilteredRowIterator row = ci.next();
            writer.append(row);
            if (!checked && writer.currentWriter().getFilePointer() > 1500000) {
                checked = true;
                for (SSTableReader sstable : cfs.getLiveSSTables()) {
                    if (sstable.openReason == SSTableReader.OpenReason.EARLY) {
                        SSTableReader c = txn.current(sstables.iterator().next());
                        Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.getPartitioner().getMinimumToken(), cfs.getPartitioner().getMinimumToken()));
                        List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
                        List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
                        assertEquals(1, tmplinkPositions.size());
                        assertEquals(1, compactingPositions.size());
                        assertEquals(0, tmplinkPositions.get(0).left.longValue());
                        // make sure we have no overlap between the early opened file and the compacting one:
                        assertEquals(tmplinkPositions.get(0).right.longValue(), compactingPositions.get(0).left.longValue());
                        assertEquals(c.uncompressedLength(), compactingPositions.get(0).right.longValue());
                    }
                }
            }
        }
        assertTrue(checked);
        writer.finish();
    }
    LifecycleTransaction.waitForDeletions();
    assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Range(org.apache.cassandra.dht.Range) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Pair(org.apache.cassandra.utils.Pair) Test(org.junit.Test)

Aggregations

LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)60 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)53 Test (org.junit.Test)28 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)24 Keyspace (org.apache.cassandra.db.Keyspace)23 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 File (java.io.File)10 Range (org.apache.cassandra.dht.Range)7 UUID (java.util.UUID)5 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)5 Token (org.apache.cassandra.dht.Token)5 IOException (java.io.IOException)4 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)4 CompactionAwareWriter (org.apache.cassandra.db.compaction.writers.CompactionAwareWriter)4 SSTableWriter (org.apache.cassandra.io.sstable.format.SSTableWriter)4 RestorableMeter (org.apache.cassandra.metrics.RestorableMeter)4 ByteBuffer (java.nio.ByteBuffer)3 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)3 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)3