Search in sources :

Example 26 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testAbortHelper.

private void testAbortHelper(boolean earlyException, boolean offline) throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    if (!offline)
        cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = offline ? LifecycleTransaction.offline(OperationType.UNKNOWN, compacting) : cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 100, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
            }
        }
        try {
            rewriter.throwDuringPrepare(earlyException);
            rewriter.prepareToCommit();
        } catch (Throwable t) {
            rewriter.abort();
        }
    } finally {
        if (offline)
            s.selfRef().release();
    }
    LifecycleTransaction.waitForDeletions();
    int filecount = assertFileCounts(s.descriptor.directory.list());
    assertEquals(filecount, 1);
    if (!offline) {
        assertEquals(1, cfs.getLiveSSTables().size());
        validateCFS(cfs);
        truncate(cfs);
    } else {
        assertEquals(0, cfs.getLiveSSTables().size());
        cfs.truncateBlocking();
    }
    filecount = assertFileCounts(s.descriptor.directory.list());
    if (offline) {
        // the file is not added to the CFS, therefore not truncated away above
        assertEquals(1, filecount);
        for (File f : s.descriptor.directory.listFiles()) {
            FileUtils.deleteRecursive(f);
        }
        filecount = assertFileCounts(s.descriptor.directory.list());
    }
    assertEquals(0, filecount);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) File(java.io.File)

Example 27 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method basicTest.

@Test
public void basicTest() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    for (int j = 0; j < 100; j++) {
        new RowUpdateBuilder(cfs.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
    }
    cfs.forceBlockingFlush();
    Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
    assertEquals(1, sstables.size());
    assertEquals(sstables.iterator().next().bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
    int nowInSec = FBUtilities.nowInSeconds();
    try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
        CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            writer.append(ci.next());
        }
        writer.finish();
    }
    LifecycleTransaction.waitForDeletions();
    assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 28 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testAllKeysReadable.

@Test
public void testAllKeysReadable() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    for (int i = 0; i < 100; i++) {
        String key = Integer.toString(i);
        for (int j = 0; j < 10; j++) new RowUpdateBuilder(cfs.metadata(), 100, key).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
    }
    cfs.forceBlockingFlush();
    cfs.forceMajorCompaction();
    validateKeys(keyspace);
    assertEquals(1, cfs.getLiveSSTables().size());
    SSTableReader s = cfs.getLiveSSTables().iterator().next();
    Set<SSTableReader> compacting = new HashSet<>();
    compacting.add(s);
    int keyCount = 0;
    try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (keyCount % 10 == 0) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
            }
            keyCount++;
            validateKeys(keyspace);
        }
        rewriter.finish();
    }
    validateKeys(keyspace);
    LifecycleTransaction.waitForDeletions();
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 29 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class IndexSummaryManagerTest method testRebuildAtSamplingLevel.

@Test
public void testRebuildAtSamplingLevel() throws IOException {
    String ksname = KEYSPACE1;
    String cfname = CF_STANDARDLOWiINTERVAL;
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    int numRows = 256;
    for (int row = 0; row < numRows; row++) {
        String key = String.format("%3d", row);
        new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
    }
    cfs.forceBlockingFlush();
    List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
    assertEquals(1, sstables.size());
    SSTableReader original = sstables.get(0);
    SSTableReader sstable = original;
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
        for (int samplingLevel = 1; samplingLevel < BASE_SAMPLING_LEVEL; samplingLevel++) {
            sstable = sstable.cloneWithNewSummarySamplingLevel(cfs, samplingLevel);
            assertEquals(samplingLevel, sstable.getIndexSummarySamplingLevel());
            int expectedSize = (numRows * samplingLevel) / (cfs.metadata().params.minIndexInterval * BASE_SAMPLING_LEVEL);
            assertEquals(expectedSize, sstable.getIndexSummarySize(), 1);
            txn.update(sstable, true);
            txn.checkpoint();
        }
        txn.finish();
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 30 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class IndexSummaryManagerTest method testChangeMinIndexInterval.

@Test
public void testChangeMinIndexInterval() throws IOException {
    String ksname = KEYSPACE1;
    // index interval of 8, no key caching
    String cfname = CF_STANDARDLOWiINTERVAL;
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    int numSSTables = 1;
    int numRows = 256;
    createSSTables(ksname, cfname, numSSTables, numRows);
    List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
    for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
    for (SSTableReader sstable : sstables) assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
    // double the min_index_interval
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build(), true);
    IndexSummaryManager.instance.redistributeSummaries();
    for (SSTableReader sstable : cfs.getLiveSSTables()) {
        assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
        assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
    }
    // return min_index_interval to its original value
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
    IndexSummaryManager.instance.redistributeSummaries();
    for (SSTableReader sstable : cfs.getLiveSSTables()) {
        assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
        assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
    }
    // halve the min_index_interval, but constrain the available space to exactly what we have now; as a result,
    // the summary shouldn't change
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval / 2).build(), true);
    SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
    long summarySpace = sstable.getIndexSummaryOffHeapSize();
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
        redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), summarySpace);
    }
    sstable = cfs.getLiveSSTables().iterator().next();
    assertEquals(originalMinIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
    assertEquals(numRows / originalMinIndexInterval, sstable.getIndexSummarySize());
    // keep the min_index_interval the same, but now give the summary enough space to grow by 50%
    double previousInterval = sstable.getEffectiveIndexInterval();
    int previousSize = sstable.getIndexSummarySize();
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
        redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace * 1.5));
    }
    sstable = cfs.getLiveSSTables().iterator().next();
    assertEquals(previousSize * 1.5, (double) sstable.getIndexSummarySize(), 1);
    assertEquals(previousInterval * (1.0 / 1.5), sstable.getEffectiveIndexInterval(), 0.001);
    // return min_index_interval to it's original value (double it), but only give the summary enough space
    // to have an effective index interval of twice the new min
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
        redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace / 2.0));
    }
    sstable = cfs.getLiveSSTables().iterator().next();
    assertEquals(originalMinIndexInterval * 2, sstable.getEffectiveIndexInterval(), 0.001);
    assertEquals(numRows / (originalMinIndexInterval * 2), sstable.getIndexSummarySize());
    // raise the min_index_interval above our current effective interval, but set the max_index_interval lower
    // than what we actually have space for (meaning the index summary would ideally be smaller, but this would
    // result in an effective interval above the new max)
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 4).build(), true);
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMinIndexInterval * 4).build(), true);
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
        redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 10);
    }
    sstable = cfs.getLiveSSTables().iterator().next();
    assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
}
Also used : RestorableMeter(org.apache.cassandra.metrics.RestorableMeter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Aggregations

LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)60 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)53 Test (org.junit.Test)28 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)24 Keyspace (org.apache.cassandra.db.Keyspace)23 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 File (java.io.File)10 Range (org.apache.cassandra.dht.Range)7 UUID (java.util.UUID)5 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)5 Token (org.apache.cassandra.dht.Token)5 IOException (java.io.IOException)4 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)4 CompactionAwareWriter (org.apache.cassandra.db.compaction.writers.CompactionAwareWriter)4 SSTableWriter (org.apache.cassandra.io.sstable.format.SSTableWriter)4 RestorableMeter (org.apache.cassandra.metrics.RestorableMeter)4 ByteBuffer (java.nio.ByteBuffer)3 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)3 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)3