Search in sources :

Example 11 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class StandaloneSplitter method main.

public static void main(String[] args) {
    Options options = Options.parseArgs(args);
    Util.initDatabaseDescriptor();
    try {
        // load keyspace descriptions.
        Schema.instance.loadFromDisk(false);
        String ksName = null;
        String cfName = null;
        Map<Descriptor, Set<Component>> parsedFilenames = new HashMap<Descriptor, Set<Component>>();
        for (String filename : options.filenames) {
            File file = new File(filename);
            if (!file.exists()) {
                System.out.println("Skipping inexisting file " + file);
                continue;
            }
            Descriptor desc = SSTable.tryDescriptorFromFilename(file);
            if (desc == null) {
                System.out.println("Skipping non sstable file " + file);
                continue;
            }
            if (ksName == null)
                ksName = desc.ksname;
            else if (!ksName.equals(desc.ksname))
                throw new IllegalArgumentException("All sstables must be part of the same keyspace");
            if (cfName == null)
                cfName = desc.cfname;
            else if (!cfName.equals(desc.cfname))
                throw new IllegalArgumentException("All sstables must be part of the same table");
            Set<Component> components = new HashSet<Component>(Arrays.asList(new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.COMPRESSION_INFO, Component.STATS }));
            Iterator<Component> iter = components.iterator();
            while (iter.hasNext()) {
                Component component = iter.next();
                if (!(new File(desc.filenameFor(component)).exists()))
                    iter.remove();
            }
            parsedFilenames.put(desc, components);
        }
        if (ksName == null || cfName == null) {
            System.err.println("No valid sstables to split");
            System.exit(1);
        }
        // Do not load sstables since they might be broken
        Keyspace keyspace = Keyspace.openWithoutSSTables(ksName);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
        String snapshotName = "pre-split-" + System.currentTimeMillis();
        List<SSTableReader> sstables = new ArrayList<>();
        for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet()) {
            try {
                SSTableReader sstable = SSTableReader.openNoValidation(fn.getKey(), fn.getValue(), cfs);
                if (!isSSTableLargerEnough(sstable, options.sizeInMB)) {
                    System.out.println(String.format("Skipping %s: it's size (%.3f MB) is less than the split size (%d MB)", sstable.getFilename(), ((sstable.onDiskLength() * 1.0d) / 1024L) / 1024L, options.sizeInMB));
                    continue;
                }
                sstables.add(sstable);
                if (options.snapshot) {
                    File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName);
                    sstable.createLinks(snapshotDirectory.getPath());
                }
            } catch (Exception e) {
                JVMStabilityInspector.inspectThrowable(e);
                System.err.println(String.format("Error Loading %s: %s", fn.getKey(), e.getMessage()));
                if (options.debug)
                    e.printStackTrace(System.err);
            }
        }
        if (sstables.isEmpty()) {
            System.out.println("No sstables needed splitting.");
            System.exit(0);
        }
        if (options.snapshot)
            System.out.println(String.format("Pre-split sstables snapshotted into snapshot %s", snapshotName));
        for (SSTableReader sstable : sstables) {
            try (LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.UNKNOWN, sstable)) {
                new SSTableSplitter(cfs, transaction, options.sizeInMB).split();
            } catch (Exception e) {
                System.err.println(String.format("Error splitting %s: %s", sstable, e.getMessage()));
                if (options.debug)
                    e.printStackTrace(System.err);
                sstable.selfRef().release();
            }
        }
        CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
        LifecycleTransaction.waitForDeletions();
        // We need that to stop non daemonized threads
        System.exit(0);
    } catch (Exception e) {
        System.err.println(e.getMessage());
        if (options.debug)
            e.printStackTrace(System.err);
        System.exit(1);
    }
}
Also used : CmdLineOptions(org.apache.cassandra.tools.BulkLoader.CmdLineOptions) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SSTableSplitter(org.apache.cassandra.db.compaction.SSTableSplitter) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) File(java.io.File)

Example 12 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class LongCompactionsTest method testCompaction.

protected void testCompaction(int sstableCount, int partitionsPerSSTable, int rowsPerPartition) throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
    ArrayList<SSTableReader> sstables = new ArrayList<>();
    for (int k = 0; k < sstableCount; k++) {
        SortedMap<String, PartitionUpdate> rows = new TreeMap<>();
        for (int j = 0; j < partitionsPerSSTable; j++) {
            String key = String.valueOf(j);
            // last sstable has highest timestamps
            UpdateBuilder builder = UpdateBuilder.create(store.metadata(), String.valueOf(j)).withTimestamp(k);
            for (int i = 0; i < rowsPerPartition; i++) builder.newRow(String.valueOf(i)).add("val", String.valueOf(i));
            rows.put(key, builder.build());
        }
        Collection<SSTableReader> readers = SSTableUtils.prepare().write(rows);
        sstables.addAll(readers);
        store.addSSTables(readers);
    }
    // give garbage collection a bit of time to catch up
    Thread.sleep(1000);
    long start = System.nanoTime();
    final int gcBefore = (int) (System.currentTimeMillis() / 1000) - Schema.instance.getTableMetadata(KEYSPACE1, "Standard1").params.gcGraceSeconds;
    try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.COMPACTION)) {
        assert txn != null : "Cannot markCompacting all sstables";
        new CompactionTask(store, txn, gcBefore).execute(null);
    }
    System.out.println(String.format("%s: sstables=%d rowsper=%d colsper=%d: %d ms", this.getClass().getName(), sstableCount, partitionsPerSSTable, rowsPerPartition, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)));
}
Also used : LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) UpdateBuilder(org.apache.cassandra.UpdateBuilder) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader)

Example 13 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class AntiCompactionTest method antiCompactOne.

private void antiCompactOne(long repairedAt, UUID pendingRepair) throws Exception {
    assert repairedAt != UNREPAIRED_SSTABLE || pendingRepair != null;
    ColumnFamilyStore store = prepareColumnFamilyStore();
    Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
    assertEquals(store.getLiveSSTables().size(), sstables.size());
    Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("4".getBytes()));
    List<Range<Token>> ranges = Arrays.asList(range);
    int repairedKeys = 0;
    int pendingKeys = 0;
    int nonRepairedKeys = 0;
    try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
        Refs<SSTableReader> refs = Refs.ref(sstables)) {
        if (txn == null)
            throw new IllegalStateException();
        UUID parentRepairSession = UUID.randomUUID();
        CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, pendingRepair, parentRepairSession);
    }
    assertEquals(2, store.getLiveSSTables().size());
    for (SSTableReader sstable : store.getLiveSSTables()) {
        try (ISSTableScanner scanner = sstable.getScanner()) {
            while (scanner.hasNext()) {
                UnfilteredRowIterator row = scanner.next();
                if (sstable.isRepaired() || sstable.isPendingRepair()) {
                    assertTrue(range.contains(row.partitionKey().getToken()));
                    repairedKeys += sstable.isRepaired() ? 1 : 0;
                    pendingKeys += sstable.isPendingRepair() ? 1 : 0;
                } else {
                    assertFalse(range.contains(row.partitionKey().getToken()));
                    nonRepairedKeys++;
                }
            }
        }
    }
    for (SSTableReader sstable : store.getLiveSSTables()) {
        assertFalse(sstable.isMarkedCompacted());
        assertEquals(1, sstable.selfRef().globalCount());
    }
    assertEquals(0, store.getTracker().getCompacting().size());
    assertEquals(repairedKeys, repairedAt != UNREPAIRED_SSTABLE ? 4 : 0);
    assertEquals(pendingKeys, pendingRepair != NO_PENDING_REPAIR ? 4 : 0);
    assertEquals(nonRepairedKeys, 6);
}
Also used : UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) UUID(java.util.UUID)

Example 14 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class AntiCompactionTest method shouldSkipAntiCompactionForNonIntersectingRange.

@Test
public void shouldSkipAntiCompactionForNonIntersectingRange() throws InterruptedException, IOException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
    store.disableAutoCompaction();
    for (int table = 0; table < 10; table++) {
        generateSStable(store, Integer.toString(table));
    }
    Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
    assertEquals(store.getLiveSSTables().size(), sstables.size());
    Range<Token> range = new Range<Token>(new BytesToken("-1".getBytes()), new BytesToken("-10".getBytes()));
    List<Range<Token>> ranges = Arrays.asList(range);
    UUID parentRepairSession = UUID.randomUUID();
    try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
        Refs<SSTableReader> refs = Refs.ref(sstables)) {
        CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, 1, NO_PENDING_REPAIR, parentRepairSession);
    }
    assertThat(store.getLiveSSTables().size(), is(10));
    assertThat(Iterables.get(store.getLiveSSTables(), 0).isRepaired(), is(false));
}
Also used : LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) UUID(java.util.UUID) Test(org.junit.Test)

Example 15 with LifecycleTransaction

use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.

the class CompactionAwareWriterTest method testSplittingSizeTieredCompactionWriter.

@Test
public void testSplittingSizeTieredCompactionWriter() throws Throwable {
    ColumnFamilyStore cfs = getColumnFamilyStore();
    cfs.disableAutoCompaction();
    int rowCount = 10000;
    populate(rowCount);
    LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
    long beforeSize = txn.originals().iterator().next().onDiskLength();
    CompactionAwareWriter writer = new SplittingSizeTieredCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), 0);
    int rows = compact(cfs, txn, writer);
    long expectedSize = beforeSize / 2;
    List<SSTableReader> sortedSSTables = new ArrayList<>(cfs.getLiveSSTables());
    Collections.sort(sortedSSTables, new Comparator<SSTableReader>() {

        @Override
        public int compare(SSTableReader o1, SSTableReader o2) {
            return Longs.compare(o2.onDiskLength(), o1.onDiskLength());
        }
    });
    for (SSTableReader sstable : sortedSSTables) {
        // we dont create smaller files than this, everything will be in the last file
        if (expectedSize > SplittingSizeTieredCompactionWriter.DEFAULT_SMALLEST_SSTABLE_BYTES)
            // allow 1% diff in estimated vs actual size
            assertEquals(expectedSize, sstable.onDiskLength(), expectedSize / 100);
        expectedSize /= 2;
    }
    assertEquals(rowCount, rows);
    validateData(cfs, rowCount);
    cfs.truncateBlocking();
}
Also used : CompactionAwareWriter(org.apache.cassandra.db.compaction.writers.CompactionAwareWriter) SplittingSizeTieredCompactionWriter(org.apache.cassandra.db.compaction.writers.SplittingSizeTieredCompactionWriter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction)

Aggregations

LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)60 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)53 Test (org.junit.Test)28 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)24 Keyspace (org.apache.cassandra.db.Keyspace)23 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 File (java.io.File)10 Range (org.apache.cassandra.dht.Range)7 UUID (java.util.UUID)5 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)5 Token (org.apache.cassandra.dht.Token)5 IOException (java.io.IOException)4 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)4 CompactionAwareWriter (org.apache.cassandra.db.compaction.writers.CompactionAwareWriter)4 SSTableWriter (org.apache.cassandra.io.sstable.format.SSTableWriter)4 RestorableMeter (org.apache.cassandra.metrics.RestorableMeter)4 ByteBuffer (java.nio.ByteBuffer)3 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)3 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)3