Search in sources :

Example 51 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class IndexSummaryManagerTest method testCancelIndex.

@Test
public void testCancelIndex() throws Exception {
    String ksname = KEYSPACE1;
    // index interval of 8, no key caching
    String cfname = CF_STANDARDLOWiINTERVAL;
    Keyspace keyspace = Keyspace.open(ksname);
    final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    final int numSSTables = 4;
    int numRows = 256;
    createSSTables(ksname, cfname, numSSTables, numRows);
    final List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
    for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
    final long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
    // everything should get cut in half
    final AtomicReference<CompactionInterruptedException> exception = new AtomicReference<>();
    // barrier to control when redistribution runs
    final CountDownLatch barrier = new CountDownLatch(1);
    Thread t = NamedThreadFactory.createThread(new Runnable() {

        public void run() {
            try {
                // Don't leave enough space for even the minimal index summaries
                try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
                    IndexSummaryManager.redistributeSummaries(new ObservableRedistribution(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), singleSummaryOffHeapSpace, barrier));
                }
            } catch (CompactionInterruptedException ex) {
                exception.set(ex);
            } catch (IOException ignored) {
            }
        }
    });
    t.start();
    while (CompactionManager.instance.getActiveCompactions() == 0 && t.isAlive()) Thread.sleep(1);
    // to ensure that the stop condition check in IndexSummaryRedistribution::redistributeSummaries
    // is made *after* the halt request is made to the CompactionManager, don't allow the redistribution
    // to proceed until stopCompaction has been called.
    CompactionManager.instance.stopCompaction("INDEX_SUMMARY");
    // allows the redistribution to proceed
    barrier.countDown();
    t.join();
    assertNotNull("Expected compaction interrupted exception", exception.get());
    assertTrue("Expected no active compactions", CompactionMetrics.getCompactions().isEmpty());
    Set<SSTableReader> beforeRedistributionSSTables = new HashSet<>(sstables);
    Set<SSTableReader> afterCancelSSTables = new HashSet<>(cfs.getLiveSSTables());
    Set<SSTableReader> disjoint = Sets.symmetricDifference(beforeRedistributionSSTables, afterCancelSSTables);
    assertTrue(String.format("Mismatched files before and after cancelling redistribution: %s", Joiner.on(",").join(disjoint)), disjoint.isEmpty());
    validateData(cfs, numRows);
}
Also used : CompactionInterruptedException(org.apache.cassandra.db.compaction.CompactionInterruptedException) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) RestorableMeter(org.apache.cassandra.metrics.RestorableMeter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 52 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableReaderTest method testIndexSummaryReplacement.

@Test
public void testIndexSummaryReplacement() throws IOException, ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    // index interval of 8, no key caching
    final ColumnFamilyStore store = keyspace.getColumnFamilyStore("StandardLowIndexInterval");
    CompactionManager.instance.disableAutoCompaction();
    final int NUM_PARTITIONS = 512;
    for (int j = 0; j < NUM_PARTITIONS; j++) {
        new RowUpdateBuilder(store.metadata(), j, String.format("%3d", j)).clustering("0").add("val", String.format("%3d", j)).build().applyUnsafe();
    }
    store.forceBlockingFlush();
    CompactionManager.instance.performMaximal(store, false);
    Collection<SSTableReader> sstables = store.getLiveSSTables();
    assert sstables.size() == 1;
    final SSTableReader sstable = sstables.iterator().next();
    ThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(5);
    List<Future> futures = new ArrayList<>(NUM_PARTITIONS * 2);
    for (int i = 0; i < NUM_PARTITIONS; i++) {
        final ByteBuffer key = ByteBufferUtil.bytes(String.format("%3d", i));
        final int index = i;
        futures.add(executor.submit(new Runnable() {

            public void run() {
                Row row = Util.getOnlyRowUnfiltered(Util.cmd(store, key).build());
                assertEquals(0, ByteBufferUtil.compare(String.format("%3d", index).getBytes(), row.cells().iterator().next().value()));
            }
        }));
        futures.add(executor.submit(new Runnable() {

            public void run() {
                Iterable<DecoratedKey> results = store.keySamples(new Range<>(sstable.getPartitioner().getMinimumToken(), sstable.getPartitioner().getToken(key)));
                assertTrue(results.iterator().hasNext());
            }
        }));
    }
    SSTableReader replacement;
    try (LifecycleTransaction txn = store.getTracker().tryModify(Arrays.asList(sstable), OperationType.UNKNOWN)) {
        replacement = sstable.cloneWithNewSummarySamplingLevel(store, 1);
        txn.update(replacement, true);
        txn.finish();
    }
    for (Future future : futures) future.get();
    assertEquals(sstable.estimatedKeys(), replacement.estimatedKeys(), 1);
}
Also used : LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) ByteBuffer(java.nio.ByteBuffer) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Row(org.apache.cassandra.db.rows.Row) Test(org.junit.Test)

Example 53 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testCanonicalView.

@Test
public void testCanonicalView() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> sstables = Sets.newHashSet(s);
    assertEquals(1, sstables.size());
    boolean checked = false;
    try (ISSTableScanner scanner = sstables.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, sstables, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
        SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
        while (ci.hasNext()) {
            writer.append(ci.next());
            if (!checked && writer.currentWriter().getFilePointer() > 15000000) {
                checked = true;
                ColumnFamilyStore.ViewFragment viewFragment = cfs.select(View.selectFunction(SSTableSet.CANONICAL));
                // canonical view should have only one SSTable which is not opened early.
                assertEquals(1, viewFragment.sstables.size());
                SSTableReader sstable = viewFragment.sstables.get(0);
                assertEquals(s.descriptor, sstable.descriptor);
                assertTrue("Found early opened SSTable in canonical view: " + sstable.getFilename(), sstable.openReason != SSTableReader.OpenReason.EARLY);
            }
        }
    }
    truncateCF();
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 54 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testNumberOfFiles_truncate.

@Test
public void testNumberOfFiles_truncate() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    cfs.disableAutoCompaction();
    SSTableReader s = writeFile(cfs, 1000);
    cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    List<SSTableReader> sstables;
    int files = 1;
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
                files++;
                // we have one original file plus the ones we have switched out.
                assertEquals(cfs.getLiveSSTables().size(), files);
            }
        }
        sstables = rewriter.finish();
    }
    LifecycleTransaction.waitForDeletions();
    assertFileCounts(s.descriptor.directory.list());
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Example 55 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class SSTableRewriterTest method testSmallFiles.

@Test
public void testSmallFiles() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    cfs.disableAutoCompaction();
    SSTableReader s = writeFile(cfs, 400);
    cfs.addSSTable(s);
    Set<SSTableReader> compacting = Sets.newHashSet(s);
    List<SSTableReader> sstables;
    int files = 1;
    try (ISSTableScanner scanner = s.getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1000000, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (rewriter.currentWriter().getOnDiskFilePointer() > 2500000) {
                // all files are now opened early
                assertEquals(files, cfs.getLiveSSTables().size());
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
                files++;
            }
        }
        sstables = rewriter.finish();
    }
    assertEquals(files, sstables.size());
    assertEquals(files, cfs.getLiveSSTables().size());
    LifecycleTransaction.waitForDeletions();
    assertFileCounts(s.descriptor.directory.list());
    validateCFS(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Test(org.junit.Test)

Aggregations

LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)60 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)53 Test (org.junit.Test)28 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)24 Keyspace (org.apache.cassandra.db.Keyspace)23 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 File (java.io.File)10 Range (org.apache.cassandra.dht.Range)7 UUID (java.util.UUID)5 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)5 Token (org.apache.cassandra.dht.Token)5 IOException (java.io.IOException)4 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)4 CompactionAwareWriter (org.apache.cassandra.db.compaction.writers.CompactionAwareWriter)4 SSTableWriter (org.apache.cassandra.io.sstable.format.SSTableWriter)4 RestorableMeter (org.apache.cassandra.metrics.RestorableMeter)4 ByteBuffer (java.nio.ByteBuffer)3 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)3 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)3