Search in sources :

Example 11 with StatsMetadata

use of org.apache.cassandra.io.sstable.metadata.StatsMetadata in project cassandra by apache.

the class BigTableWriter method openFinal.

@SuppressWarnings("resource")
private SSTableReader openFinal(SSTableReader.OpenReason openReason) {
    if (maxDataAge < 0)
        maxDataAge = System.currentTimeMillis();
    StatsMetadata stats = statsMetadata();
    // finalize in-memory state for the reader
    IndexSummary indexSummary = iwriter.summary.build(metadata().partitioner);
    long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
    int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
    int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
    FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete();
    if (compression)
        dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0));
    FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete();
    invalidateCacheAtBoundary(dfile);
    SSTableReader sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, openReason, header);
    sstable.first = getMinimalKey(first);
    sstable.last = getMinimalKey(last);
    return sstable;
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader)

Example 12 with StatsMetadata

use of org.apache.cassandra.io.sstable.metadata.StatsMetadata in project cassandra by apache.

the class SSTableLevelResetter method main.

/**
     * @param args a list of sstables whose metadata we are changing
     */
public static void main(String[] args) {
    PrintStream out = System.out;
    if (args.length == 0) {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstablelevelreset <keyspace> <table>");
        System.exit(1);
    }
    if (!args[0].equals("--really-reset") || args.length != 3) {
        out.println("This command should be run with Cassandra stopped, otherwise you will get very strange behavior");
        out.println("Verify that Cassandra is not running and then execute the command like this:");
        out.println("Usage: sstablelevelreset --really-reset <keyspace> <table>");
        System.exit(1);
    }
    Util.initDatabaseDescriptor();
    // So we have to explicitly call System.exit.
    try {
        // load keyspace descriptions.
        Schema.instance.loadFromDisk(false);
        String keyspaceName = args[1];
        String columnfamily = args[2];
        // validate columnfamily
        if (Schema.instance.getTableMetadataRef(keyspaceName, columnfamily) == null) {
            System.err.println("ColumnFamily not found: " + keyspaceName + "/" + columnfamily);
            System.exit(1);
        }
        Keyspace keyspace = Keyspace.openWithoutSSTables(keyspaceName);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnfamily);
        boolean foundSSTable = false;
        for (Map.Entry<Descriptor, Set<Component>> sstable : cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).list().entrySet()) {
            if (sstable.getValue().contains(Component.STATS)) {
                foundSSTable = true;
                Descriptor descriptor = sstable.getKey();
                StatsMetadata metadata = (StatsMetadata) descriptor.getMetadataSerializer().deserialize(descriptor, MetadataType.STATS);
                if (metadata.sstableLevel > 0) {
                    out.println("Changing level from " + metadata.sstableLevel + " to 0 on " + descriptor.filenameFor(Component.DATA));
                    descriptor.getMetadataSerializer().mutateLevel(descriptor, 0);
                } else {
                    out.println("Skipped " + descriptor.filenameFor(Component.DATA) + " since it is already on level 0");
                }
            }
        }
        if (!foundSSTable) {
            out.println("Found no sstables, did you give the correct keyspace/table?");
        }
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        t.printStackTrace();
        System.exit(1);
    }
    System.exit(0);
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) PrintStream(java.io.PrintStream) Set(java.util.Set) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Descriptor(org.apache.cassandra.io.sstable.Descriptor) Map(java.util.Map)

Example 13 with StatsMetadata

use of org.apache.cassandra.io.sstable.metadata.StatsMetadata in project cassandra by apache.

the class CompactionManager method doCleanupOne.

/**
     * This function goes over a file and removes the keys that the node is not responsible for
     * and only keeps keys that this node is responsible for.
     *
     * @throws IOException
     */
private void doCleanupOne(final ColumnFamilyStore cfs, LifecycleTransaction txn, CleanupStrategy cleanupStrategy, Collection<Range<Token>> ranges, boolean hasIndexes) throws IOException {
    assert !cfs.isIndex();
    SSTableReader sstable = txn.onlyOne();
    if (!hasIndexes && !new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges)) {
        txn.obsoleteOriginals();
        txn.finish();
        return;
    }
    if (!needsCleanup(sstable, ranges)) {
        logger.trace("Skipping {} for cleanup; all rows should be kept", sstable);
        return;
    }
    long start = System.nanoTime();
    long totalkeysWritten = 0;
    long expectedBloomFilterSize = Math.max(cfs.metadata().params.minIndexInterval, SSTableReader.getApproximateKeyCount(txn.originals()));
    if (logger.isTraceEnabled())
        logger.trace("Expected bloom filter size : {}", expectedBloomFilterSize);
    logger.info("Cleaning up {}", sstable);
    File compactionFileLocation = sstable.descriptor.directory;
    RateLimiter limiter = getRateLimiter();
    double compressionRatio = sstable.getCompressionRatio();
    if (compressionRatio == MetadataCollector.NO_COMPRESSION_RATIO)
        compressionRatio = 1.0;
    List<SSTableReader> finished;
    int nowInSec = FBUtilities.nowInSeconds();
    try (SSTableRewriter writer = SSTableRewriter.construct(cfs, txn, false, sstable.maxDataAge);
        ISSTableScanner scanner = cleanupStrategy.getScanner(sstable);
        CompactionController controller = new CompactionController(cfs, txn.originals(), getDefaultGcBefore(cfs, nowInSec));
        CompactionIterator ci = new CompactionIterator(OperationType.CLEANUP, Collections.singletonList(scanner), controller, nowInSec, UUIDGen.getTimeUUID(), metrics)) {
        StatsMetadata metadata = sstable.getSSTableMetadata();
        writer.switchWriter(createWriter(cfs, compactionFileLocation, expectedBloomFilterSize, metadata.repairedAt, metadata.pendingRepair, sstable, txn));
        long lastBytesScanned = 0;
        while (ci.hasNext()) {
            if (ci.isStopRequested())
                throw new CompactionInterruptedException(ci.getCompactionInfo());
            try (UnfilteredRowIterator partition = ci.next();
                UnfilteredRowIterator notCleaned = cleanupStrategy.cleanup(partition)) {
                if (notCleaned == null)
                    continue;
                if (writer.append(notCleaned) != null)
                    totalkeysWritten++;
                long bytesScanned = scanner.getBytesScanned();
                compactionRateLimiterAcquire(limiter, bytesScanned, lastBytesScanned, compressionRatio);
                lastBytesScanned = bytesScanned;
            }
        }
        // flush to ensure we don't lose the tombstones on a restart, since they are not commitlog'd
        cfs.indexManager.flushAllIndexesBlocking();
        finished = writer.finish();
    }
    if (!finished.isEmpty()) {
        String format = "Cleaned up to %s.  %s to %s (~%d%% of original) for %,d keys.  Time: %,dms.";
        long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
        long startsize = sstable.onDiskLength();
        long endsize = 0;
        for (SSTableReader newSstable : finished) endsize += newSstable.onDiskLength();
        double ratio = (double) endsize / (double) startsize;
        logger.info(String.format(format, finished.get(0).getFilename(), FBUtilities.prettyPrintMemory(startsize), FBUtilities.prettyPrintMemory(endsize), (int) (ratio * 100), totalkeysWritten, dTime));
    }
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) SSTableRewriter(org.apache.cassandra.io.sstable.SSTableRewriter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) File(java.io.File)

Example 14 with StatsMetadata

use of org.apache.cassandra.io.sstable.metadata.StatsMetadata in project cassandra by apache.

the class BigTableWriter method openEarly.

@SuppressWarnings("resource")
public SSTableReader openEarly() {
    // find the max (exclusive) readable key
    IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable();
    if (boundary == null)
        return null;
    StatsMetadata stats = statsMetadata();
    assert boundary.indexLength > 0 && boundary.dataLength > 0;
    // open the reader early
    IndexSummary indexSummary = iwriter.summary.build(metadata().partitioner, boundary);
    long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
    int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
    FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength);
    if (compression)
        dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength));
    int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
    FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength);
    invalidateCacheAtBoundary(dfile);
    SSTableReader sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, SSTableReader.OpenReason.EARLY, header);
    // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed)
    sstable.first = getMinimalKey(first);
    sstable.last = getMinimalKey(boundary.lastKey);
    return sstable;
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader)

Example 15 with StatsMetadata

use of org.apache.cassandra.io.sstable.metadata.StatsMetadata in project cassandra by apache.

the class SSTableMetadataTrackingTest method testTrackMetadata_rowMarkerDelete.

@Test
public void testTrackMetadata_rowMarkerDelete() throws Throwable {
    createTable("CREATE TABLE %s (a int, PRIMARY KEY (a))");
    ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
    execute("DELETE FROM %s USING TIMESTAMP 9999 WHERE a=1");
    cfs.forceBlockingFlush();
    assertEquals(1, cfs.getLiveSSTables().size());
    StatsMetadata metadata = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
    assertEquals(9999, metadata.minTimestamp);
    assertEquals(9999, metadata.maxTimestamp);
    assertEquals(System.currentTimeMillis() / 1000, metadata.maxLocalDeletionTime, 5);
    cfs.forceMajorCompaction();
    StatsMetadata metadata2 = cfs.getLiveSSTables().iterator().next().getSSTableMetadata();
    assertEquals(metadata.maxLocalDeletionTime, metadata2.maxLocalDeletionTime);
    assertEquals(metadata.minTimestamp, metadata2.minTimestamp);
    assertEquals(metadata.maxTimestamp, metadata2.maxTimestamp);
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Aggregations

StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)16 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)8 Test (org.junit.Test)7 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)6 File (java.io.File)3 RandomAccessFile (java.io.RandomAccessFile)2 ByteBuffer (java.nio.ByteBuffer)2 CompressedSequentialWriter (org.apache.cassandra.io.compress.CompressedSequentialWriter)2 Descriptor (org.apache.cassandra.io.sstable.Descriptor)2 MetadataCollector (org.apache.cassandra.io.sstable.metadata.MetadataCollector)2 AlwaysPresentFilter (org.apache.cassandra.utils.AlwaysPresentFilter)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Throwables (com.google.common.base.Throwables)1 java.io (java.io)1 IOException (java.io.IOException)1 PrintStream (java.io.PrintStream)1 java.util (java.util)1 Map (java.util.Map)1 Set (java.util.Set)1 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)1