Search in sources :

Example 1 with SSTable

use of org.apache.cassandra.io.sstable.SSTable in project eiger by wlloyd.

the class ColumnFamilyStoreTest method testDeleteStandardRowSticksAfterFlush.

@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable {
    // test to make sure flushing after a delete doesn't resurrect delted cols.
    String tableName = "Keyspace1";
    String cfName = "Standard1";
    Table table = Table.open(tableName);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("f-flush-resurrection");
    SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);
    // insert
    putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1));
    assertRowAndColCount(1, 2, null, false, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // flush.
    cfs.forceBlockingFlush();
    // insert, don't flush
    putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1));
    assertRowAndColCount(1, 4, null, false, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // delete (from sstable and memtable)
    RowMutation rm = new RowMutation(table.name, key.key);
    rm.delete(new QueryPath(cfs.columnFamily, null, null), 2);
    rm.apply();
    // verify delete
    assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // flush
    cfs.forceBlockingFlush();
    // re-verify delete. // first breakage is right here because of CASSANDRA-1837.
    assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
    putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1));
    // should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
    assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // make sure that new writes are recognized.
    putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3));
    assertRowAndColCount(1, 2, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // and it remains so after flush. (this wasn't failing before, but it's good to check.)
    cfs.forceBlockingFlush();
    assertRowAndColCount(1, 2, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
}
Also used : SSTable(org.apache.cassandra.io.sstable.SSTable) Test(org.junit.Test)

Example 2 with SSTable

use of org.apache.cassandra.io.sstable.SSTable in project eiger by wlloyd.

the class ColumnFamilyStoreTest method testDeleteSuperRowSticksAfterFlush.

@Test
public void testDeleteSuperRowSticksAfterFlush() throws Throwable {
    String tableName = "Keyspace1";
    String cfName = "Super1";
    ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper");
    Table table = Table.open(tableName);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("flush-resurrection");
    // create an isolated sstable.
    putColsSuper(cfs, key, scfName, new Column(getBytes(1L), ByteBufferUtil.bytes("val1"), 1), new Column(getBytes(2L), ByteBufferUtil.bytes("val2"), 1), new Column(getBytes(3L), ByteBufferUtil.bytes("val3"), 1));
    cfs.forceBlockingFlush();
    // insert, don't flush.
    putColsSuper(cfs, key, scfName, new Column(getBytes(4L), ByteBufferUtil.bytes("val4"), 1), new Column(getBytes(5L), ByteBufferUtil.bytes("val5"), 1), new Column(getBytes(6L), ByteBufferUtil.bytes("val6"), 1));
    // verify insert.
    final SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);
    assertRowAndColCount(1, 6, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // deeleet.
    RowMutation rm = new RowMutation(table.name, key.key);
    rm.delete(new QueryPath(cfName, scfName), 2);
    rm.apply();
    // verify delete.
    assertRowAndColCount(1, 0, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // flush
    cfs.forceBlockingFlush();
    // re-verify delete.
    assertRowAndColCount(1, 0, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // late insert.
    putColsSuper(cfs, key, scfName, new Column(getBytes(4L), ByteBufferUtil.bytes("val4"), 1L), new Column(getBytes(7L), ByteBufferUtil.bytes("val7"), 1L));
    // re-verify delete.
    assertRowAndColCount(1, 0, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
    // make sure new writes are recognized.
    putColsSuper(cfs, key, scfName, new Column(getBytes(3L), ByteBufferUtil.bytes("val3"), 3), new Column(getBytes(8L), ByteBufferUtil.bytes("val8"), 3), new Column(getBytes(9L), ByteBufferUtil.bytes("val9"), 3));
    assertRowAndColCount(1, 3, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
}
Also used : SSTable(org.apache.cassandra.io.sstable.SSTable) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 3 with SSTable

use of org.apache.cassandra.io.sstable.SSTable in project cassandra by apache.

the class CompactionStrategyManager method findUpgradeSSTableTask.

/**
 * finds the oldest (by modification date) non-latest-version sstable on disk and creates an upgrade task for it
 * @return
 */
@VisibleForTesting
// transaction is closed by AbstractCompactionTask::execute
@SuppressWarnings("resource")
AbstractCompactionTask findUpgradeSSTableTask() {
    if (!isEnabled() || !DatabaseDescriptor.automaticSSTableUpgrade())
        return null;
    Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
    List<SSTableReader> potentialUpgrade = cfs.getLiveSSTables().stream().filter(s -> !compacting.contains(s) && !s.descriptor.version.isLatestVersion()).sorted((o1, o2) -> {
        File f1 = new File(o1.descriptor.filenameFor(Component.DATA));
        File f2 = new File(o2.descriptor.filenameFor(Component.DATA));
        return Longs.compare(f1.lastModified(), f2.lastModified());
    }).collect(Collectors.toList());
    for (SSTableReader sstable : potentialUpgrade) {
        LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.UPGRADE_SSTABLES);
        if (txn != null) {
            logger.debug("Running automatic sstable upgrade for {}", sstable);
            return getCompactionStrategyFor(sstable).getCompactionTask(txn, Integer.MIN_VALUE, Long.MAX_VALUE);
        }
    }
    return null;
}
Also used : Arrays(java.util.Arrays) SSTableSet(org.apache.cassandra.db.lifecycle.SSTableSet) File(org.apache.cassandra.io.util.File) LoggerFactory(org.slf4j.LoggerFactory) INotification(org.apache.cassandra.notifications.INotification) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) INotificationConsumer(org.apache.cassandra.notifications.INotificationConsumer) Index(org.apache.cassandra.index.Index) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Longs(com.google.common.primitives.Longs) Collection(java.util.Collection) CompactionParams(org.apache.cassandra.schema.CompactionParams) Set(java.util.Set) SSTableAddedNotification(org.apache.cassandra.notifications.SSTableAddedNotification) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleNewTracker(org.apache.cassandra.db.lifecycle.LifecycleNewTracker) TableMetadata(org.apache.cassandra.schema.TableMetadata) StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) ConcurrentModificationException(java.util.ConcurrentModificationException) Directories(org.apache.cassandra.db.Directories) TaskSupplier(org.apache.cassandra.db.compaction.AbstractStrategyHolder.TaskSupplier) Iterables(com.google.common.collect.Iterables) Range(org.apache.cassandra.dht.Range) SSTable(org.apache.cassandra.io.sstable.SSTable) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Token(org.apache.cassandra.dht.Token) SSTableDeletingNotification(org.apache.cassandra.notifications.SSTableDeletingNotification) ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) Lists(com.google.common.collect.Lists) SSTableMultiWriter(org.apache.cassandra.io.sstable.SSTableMultiWriter) ImmutableList(com.google.common.collect.ImmutableList) SSTableMetadataChanged(org.apache.cassandra.notifications.SSTableMetadataChanged) CleanupTask(org.apache.cassandra.db.compaction.PendingRepairManager.CleanupTask) Component(org.apache.cassandra.io.sstable.Component) Descriptor(org.apache.cassandra.io.sstable.Descriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) SerializationHeader(org.apache.cassandra.db.SerializationHeader) CleanupSummary(org.apache.cassandra.repair.consistent.admin.CleanupSummary) SSTableListChangedNotification(org.apache.cassandra.notifications.SSTableListChangedNotification) Logger(org.slf4j.Logger) GroupedSSTableContainer(org.apache.cassandra.db.compaction.AbstractStrategyHolder.GroupedSSTableContainer) ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) IOException(java.io.IOException) DiskBoundaries(org.apache.cassandra.db.DiskBoundaries) VisibleForTesting(com.google.common.annotations.VisibleForTesting) SSTableRepairStatusChanged(org.apache.cassandra.notifications.SSTableRepairStatusChanged) Collections(java.util.Collections) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) File(org.apache.cassandra.io.util.File) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 4 with SSTable

use of org.apache.cassandra.io.sstable.SSTable in project cassandra by apache.

the class LogRecord method make.

public static Map<SSTable, LogRecord> make(Type type, Iterable<SSTableReader> tables) {
    // contains a mapping from sstable absolute path (everything up until the 'Data'/'Index'/etc part of the filename) to the sstable
    Map<String, SSTable> absolutePaths = new HashMap<>();
    for (SSTableReader table : tables) absolutePaths.put(absolutePath(table.descriptor.baseFilename()), table);
    // maps sstable base file name to the actual files on disk
    Map<String, List<File>> existingFiles = getExistingFiles(absolutePaths.keySet());
    Map<SSTable, LogRecord> records = new HashMap<>(existingFiles.size());
    for (Map.Entry<String, List<File>> entry : existingFiles.entrySet()) {
        List<File> filesOnDisk = entry.getValue();
        String baseFileName = entry.getKey();
        SSTable sstable = absolutePaths.get(baseFileName);
        records.put(sstable, make(type, filesOnDisk, sstable.getAllFilePaths().size(), baseFileName));
    }
    return records;
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SSTable(org.apache.cassandra.io.sstable.SSTable) File(org.apache.cassandra.io.util.File)

Example 5 with SSTable

use of org.apache.cassandra.io.sstable.SSTable in project cassandra by apache.

the class SASIIndexTest method testTableRebuild.

@Test
public void testTableRebuild() throws Exception {
    ColumnFamilyStore store = Keyspace.open(KS_NAME).getColumnFamilyStore(CLUSTERING_CF_NAME_1);
    executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Pavel", "xedin", "US", 27, 183, 1.0);
    executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, location, age, height, score) VALUES (?, ?, ?, ?, ?)", "Pavel", "BY", 28, 182, 2.0);
    executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Jordan", "jrwest", "US", 27, 182, 1.0);
    store.forceBlockingFlush();
    SSTable ssTable = store.getSSTables(SSTableSet.LIVE).iterator().next();
    Path path = FileSystems.getDefault().getPath(ssTable.getFilename().replace("-Data", "-SI_" + CLUSTERING_CF_NAME_1 + "_age"));
    // Overwrite index file with garbage
    try (FileChannel fc = FileChannel.open(path, StandardOpenOption.WRITE)) {
        fc.truncate(8).write(ByteBuffer.wrap("grabage".getBytes(StandardCharsets.UTF_8)));
    }
    long size1 = Files.readAttributes(path, BasicFileAttributes.class).size();
    // Trying to query the corrupted index file yields no results
    Assert.assertTrue(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE age = 27 AND name = 'Pavel'").isEmpty());
    // Rebuld index
    store.rebuildSecondaryIndex(CLUSTERING_CF_NAME_1 + "_age");
    long size2 = Files.readAttributes(path, BasicFileAttributes.class).size();
    // Make sure that garbage was overwriten
    Assert.assertTrue(size2 > size1);
    // Make sure that indexes work for rebuit tables
    CQLTester.assertRows(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE age = 27 AND name = 'Pavel'"), CQLTester.row("Pavel", "US", 27, "xedin", 183, 1.0));
    CQLTester.assertRows(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE age = 28"), CQLTester.row("Pavel", "BY", 28, "xedin", 182, 2.0));
    CQLTester.assertRows(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE score < 2.0 AND nickname = 'jrwest' ALLOW FILTERING"), CQLTester.row("Jordan", "US", 27, "jrwest", 182, 1.0));
}
Also used : Path(java.nio.file.Path) FileChannel(java.nio.channels.FileChannel) SSTable(org.apache.cassandra.io.sstable.SSTable) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes)

Aggregations

SSTable (org.apache.cassandra.io.sstable.SSTable)6 ByteBuffer (java.nio.ByteBuffer)2 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)2 File (org.apache.cassandra.io.util.File)2 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ImmutableList (com.google.common.collect.ImmutableList)1 Iterables (com.google.common.collect.Iterables)1 Lists (com.google.common.collect.Lists)1 Longs (com.google.common.primitives.Longs)1 File (java.io.File)1 IOException (java.io.IOException)1 FileChannel (java.nio.channels.FileChannel)1 Path (java.nio.file.Path)1 BasicFileAttributes (java.nio.file.attribute.BasicFileAttributes)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 ConcurrentModificationException (java.util.ConcurrentModificationException)1