Search in sources :

Example 61 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class TTLExpiryTest method testCheckForExpiredSSTableBlockers.

@Test
public void testCheckForExpiredSSTableBlockers() throws InterruptedException {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore("Standard1");
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build(), true);
    new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), "test").noRowMarker().add("col1", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    cfs.forceBlockingFlush();
    SSTableReader blockingSSTable = cfs.getSSTables(SSTableSet.LIVE).iterator().next();
    for (int i = 0; i < 10; i++) {
        new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), "test").noRowMarker().delete("col1").build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    Multimap<SSTableReader, SSTableReader> blockers = SSTableExpiredBlockers.checkForExpiredSSTableBlockers(cfs.getSSTables(SSTableSet.LIVE), (int) (System.currentTimeMillis() / 1000) + 100);
    assertEquals(1, blockers.keySet().size());
    assertTrue(blockers.keySet().contains(blockingSSTable));
    assertEquals(10, blockers.get(blockingSSTable).size());
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Test(org.junit.Test)

Example 62 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class CustomCassandraIndex method buildBlocking.

private void buildBlocking() {
    baseCfs.forceBlockingFlush();
    try (ColumnFamilyStore.RefViewFragment viewFragment = baseCfs.selectAndReference(View.selectFunction(SSTableSet.CANONICAL));
        Refs<SSTableReader> sstables = viewFragment.refs) {
        if (sstables.isEmpty()) {
            logger.info("No SSTable data for {}.{} to build index {} from, marking empty index as built", baseCfs.metadata.keyspace, baseCfs.metadata.name, metadata.name);
            baseCfs.indexManager.markIndexBuilt(metadata.name);
            return;
        }
        logger.info("Submitting index build of {} for data in {}", metadata.name, getSSTableNames(sstables));
        SecondaryIndexBuilder builder = new CollatedViewIndexBuilder(baseCfs, Collections.singleton(this), new ReducingKeyIterator(sstables));
        Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
        FBUtilities.waitOnFuture(future);
        indexCfs.forceBlockingFlush();
        baseCfs.indexManager.markIndexBuilt(metadata.name);
    }
    logger.info("Index build of {} complete", metadata.name);
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SecondaryIndexBuilder(org.apache.cassandra.index.SecondaryIndexBuilder) ReducingKeyIterator(org.apache.cassandra.io.sstable.ReducingKeyIterator)

Example 63 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableWriterTest method testAbortTxnWithClosedWriterShouldRemoveSSTable.

@Test
public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    File dir = cfs.getDirectories().getDirectoryForNewSSTables();
    LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
    try (SSTableWriter writer = getWriter(cfs, dir, txn)) {
        for (int i = 0; i < 10000; i++) {
            UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
            for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
            writer.append(builder.build().unfilteredIterator());
        }
        assertFileCounts(dir.list());
        for (int i = 10000; i < 20000; i++) {
            UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
            for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
            writer.append(builder.build().unfilteredIterator());
        }
        SSTableReader sstable = writer.finish(true);
        int datafiles = assertFileCounts(dir.list());
        assertEquals(datafiles, 1);
        sstable.selfRef().release();
        // open till .abort() is called (via the builder)
        if (!FBUtilities.isWindows) {
            LifecycleTransaction.waitForDeletions();
            assertFileCounts(dir.list());
        }
        txn.abort();
        LifecycleTransaction.waitForDeletions();
        datafiles = assertFileCounts(dir.list());
        assertEquals(datafiles, 0);
        validateCFS(cfs);
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SSTableWriter(org.apache.cassandra.io.sstable.format.SSTableWriter) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) File(java.io.File) Test(org.junit.Test)

Example 64 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableWriterTest method testValueTooBigCorruption.

@Test
public void testValueTooBigCorruption() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_SMALL_MAX_VALUE);
    truncate(cfs);
    File dir = cfs.getDirectories().getDirectoryForNewSSTables();
    LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
    try (SSTableWriter writer1 = getWriter(cfs, dir, txn)) {
        UpdateBuilder largeValue = UpdateBuilder.create(cfs.metadata(), "large_value").withTimestamp(1);
        largeValue.newRow("clustering").add("val", ByteBuffer.allocate(2 * 1024 * 1024));
        writer1.append(largeValue.build().unfilteredIterator());
        SSTableReader sstable = writer1.finish(true);
        txn.update(sstable, false);
        try {
            DecoratedKey dk = Util.dk("large_value");
            UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata()), false);
            while (rowIter.hasNext()) {
                rowIter.next();
            // no-op read, as values may not appear expected
            }
            fail("Expected a CorruptSSTableException to be thrown");
        } catch (CorruptSSTableException e) {
        }
        txn.abort();
        LifecycleTransaction.waitForDeletions();
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SSTableWriter(org.apache.cassandra.io.sstable.format.SSTableWriter) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) File(java.io.File) Test(org.junit.Test)

Example 65 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableWriterTestBase method validateCFS.

/**
     * Validate the column family store by checking that all live
     * sstables are referenced only once and are not marked as
     * compacting. It also checks that the generation of the data
     * files on disk is the same as that of the live sstables,
     * to ensure that the data files on disk belong to the live
     * sstables. Finally, it checks that the metrics contain the
     * correct disk space used, live and total.
     *
     * Note that this method will submit a maximal compaction task
     * if there are live sstables, in order to check that there is at least
     * a maximal task when there are live sstables.
     *
     * This method has therefore side effects and should be called after
     * performing any other checks on previous operations, especially
     * checks involving files on disk.
     *
     * @param cfs - the column family store to validate
     */
public static void validateCFS(ColumnFamilyStore cfs) {
    Set<Integer> liveDescriptors = new HashSet<>();
    long spaceUsed = 0;
    for (SSTableReader sstable : cfs.getLiveSSTables()) {
        assertFalse(sstable.isMarkedCompacted());
        assertEquals(1, sstable.selfRef().globalCount());
        liveDescriptors.add(sstable.descriptor.generation);
        spaceUsed += sstable.bytesOnDisk();
    }
    for (File dir : cfs.getDirectories().getCFDirectories()) {
        for (File f : dir.listFiles()) {
            if (f.getName().contains("Data")) {
                Descriptor d = Descriptor.fromFilename(f.getAbsolutePath());
                assertTrue(d.toString(), liveDescriptors.contains(d.generation));
            }
        }
    }
    assertEquals(spaceUsed, cfs.metric.liveDiskSpaceUsed.getCount());
    assertEquals(spaceUsed, cfs.metric.totalDiskSpaceUsed.getCount());
    assertTrue(cfs.getTracker().getCompacting().isEmpty());
    if (cfs.getLiveSSTables().size() > 0)
        assertFalse(CompactionManager.instance.submitMaximal(cfs, cfs.gcBefore((int) (System.currentTimeMillis() / 1000)), false).isEmpty());
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) File(java.io.File) HashSet(java.util.HashSet)

Aggregations

SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)289 Test (org.junit.Test)159 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)91 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)55 Keyspace (org.apache.cassandra.db.Keyspace)49 File (java.io.File)45 UUID (java.util.UUID)28 Range (org.apache.cassandra.dht.Range)28 Directories (org.apache.cassandra.db.Directories)27 Token (org.apache.cassandra.dht.Token)24 RandomAccessFile (java.io.RandomAccessFile)22 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 ArrayList (java.util.ArrayList)18 ByteBuffer (java.nio.ByteBuffer)17 HashSet (java.util.HashSet)16 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)16 DecoratedKey (org.apache.cassandra.db.DecoratedKey)16 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)16 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)13