Search in sources :

Example 6 with CompactionController

use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.

the class SSTableRewriterTest method getPositionsTest.

@Test
public void getPositionsTest() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
    assertEquals(1, sstables.size());
    int nowInSec = FBUtilities.nowInSeconds();
    boolean checked = false;
    try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            UnfilteredRowIterator row = ci.next();
            writer.append(row);
            if (!checked && writer.currentWriter().getFilePointer() > 1500000) {
                checked = true;
                for (SSTableReader sstable : cfs.getLiveSSTables()) {
                    if (sstable.openReason == SSTableReader.OpenReason.EARLY) {
                        SSTableReader c = txn.current(sstables.iterator().next());
                        Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.getPartitioner().getMinimumToken(), cfs.getPartitioner().getMinimumToken()));
                        List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
                        List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
                        assertEquals(1, tmplinkPositions.size());
                        assertEquals(1, compactingPositions.size());
                        assertEquals(0, tmplinkPositions.get(0).left.longValue());
                        // make sure we have no overlap between the early opened file and the compacting one:
                        assertEquals(tmplinkPositions.get(0).right.longValue(), compactingPositions.get(0).left.longValue());
                        assertEquals(c.uncompressedLength(), compactingPositions.get(0).right.longValue());
                    }
                }
            }
        }
        assertTrue(checked);
        writer.finish();
    }
    LifecycleTransaction.waitForDeletions();
    assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Range(org.apache.cassandra.dht.Range) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Pair(org.apache.cassandra.utils.Pair) Test(org.junit.Test)

Example 7 with CompactionController

use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.

the class SSTableRewriterTest method testAbortHelper.

private void testAbortHelper(boolean earlyException, boolean offline) throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    if (!offline)
        cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = offline ? LifecycleTransaction.offline(OperationType.UNKNOWN, compacting) : cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 100, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
            }
        }
        try {
            rewriter.throwDuringPrepare(earlyException);
            rewriter.prepareToCommit();
        } catch (Throwable t) {
            rewriter.abort();
        }
    } finally {
        if (offline)
            s.selfRef().release();
    }
    LifecycleTransaction.waitForDeletions();
    int filecount = assertFileCounts(s.descriptor.directory.list());
    assertEquals(filecount, 1);
    if (!offline) {
        assertEquals(1, cfs.getLiveSSTables().size());
        validateCFS(cfs);
        truncate(cfs);
    } else {
        assertEquals(0, cfs.getLiveSSTables().size());
        cfs.truncateBlocking();
    }
    filecount = assertFileCounts(s.descriptor.directory.list());
    if (offline) {
        // the file is not added to the CFS, therefore not truncated away above
        assertEquals(1, filecount);
        for (File f : s.descriptor.directory.listFiles()) {
            FileUtils.deleteRecursive(f);
        }
        filecount = assertFileCounts(s.descriptor.directory.list());
    }
    assertEquals(0, filecount);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) File(java.io.File)

Example 8 with CompactionController

use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.

the class SSTableRewriterTest method basicTest.

@Test
public void basicTest() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    for (int j = 0; j < 100; j++) {
        new RowUpdateBuilder(cfs.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
    }
    cfs.forceBlockingFlush();
    Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
    assertEquals(1, sstables.size());
    assertEquals(sstables.iterator().next().bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
    int nowInSec = FBUtilities.nowInSeconds();
    try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
        CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            writer.append(ci.next());
        }
        writer.finish();
    }
    LifecycleTransaction.waitForDeletions();
    assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 9 with CompactionController

use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.

the class SSTableRewriterTest method testAllKeysReadable.

@Test
public void testAllKeysReadable() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    for (int i = 0; i < 100; i++) {
        String key = Integer.toString(i);
        for (int j = 0; j < 10; j++) new RowUpdateBuilder(cfs.metadata(), 100, key).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
    }
    cfs.forceBlockingFlush();
    cfs.forceMajorCompaction();
    validateKeys(keyspace);
    assertEquals(1, cfs.getLiveSSTables().size());
    SSTableReader s = cfs.getLiveSSTables().iterator().next();
    Set<SSTableReader> compacting = new HashSet<>();
    compacting.add(s);
    int keyCount = 0;
    try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (keyCount % 10 == 0) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
            }
            keyCount++;
            validateKeys(keyspace);
        }
        rewriter.finish();
    }
    validateKeys(keyspace);
    LifecycleTransaction.waitForDeletions();
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 10 with CompactionController

use of org.apache.cassandra.db.compaction.CompactionController in project eiger by wlloyd.

the class IncomingStreamReader method streamIn.

private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException {
    ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
    DecoratedKey key;
    SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
    CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
    try {
        BytesReadTracker in = new BytesReadTracker(input);
        for (Pair<Long, Long> section : localFile.sections) {
            long length = section.right - section.left;
            long bytesRead = 0;
            while (bytesRead < length) {
                in.reset(0);
                key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                long dataSize = SSTableReader.readRowSize(in, localFile.desc);
                ColumnFamily cached = cfs.getRawCachedRow(key);
                if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit()) {
                    // need to update row cache
                    // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                    SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                    PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                    // We don't expire anything so the row shouldn't be empty
                    assert !row.isEmpty();
                    writer.append(row);
                    // row append does not update the max timestamp on its own
                    writer.updateMaxTimestamp(row.maxTimestamp());
                    // update cache
                    ColumnFamily cf = row.getFullColumnFamily();
                    cfs.updateRowCache(key, cf);
                } else {
                    writer.appendFromStream(key, cfs.metadata, dataSize, in);
                    cfs.invalidateCachedRow(key);
                }
                bytesRead += in.getBytesRead();
                remoteFile.progress += in.getBytesRead();
            }
        }
        return writer.closeAndOpenReader();
    } catch (Exception e) {
        writer.abort();
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw FBUtilities.unchecked(e);
    }
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamily(org.apache.cassandra.db.ColumnFamily) BytesReadTracker(org.apache.cassandra.utils.BytesReadTracker) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) PrecompactedRow(org.apache.cassandra.db.compaction.PrecompactedRow)

Aggregations

CompactionController (org.apache.cassandra.db.compaction.CompactionController)15 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)14 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)14 Keyspace (org.apache.cassandra.db.Keyspace)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)13 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)13 Test (org.junit.Test)11 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)5 File (java.io.File)2 DecoratedKey (org.apache.cassandra.db.DecoratedKey)2 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)2 ColumnFamily (org.apache.cassandra.db.ColumnFamily)1 PrecompactedRow (org.apache.cassandra.db.compaction.PrecompactedRow)1 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)1 Range (org.apache.cassandra.dht.Range)1 Descriptor (org.apache.cassandra.io.sstable.Descriptor)1 SSTableRewriter (org.apache.cassandra.io.sstable.SSTableRewriter)1 TableMetadataRef (org.apache.cassandra.schema.TableMetadataRef)1 BytesReadTracker (org.apache.cassandra.utils.BytesReadTracker)1 Pair (org.apache.cassandra.utils.Pair)1