Search in sources :

Example 6 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class SSTableMetadataViewer method main.

/**
     * @param args a list of sstables whose metadata we're interested in
     */
public static void main(String[] args) throws IOException {
    PrintStream out = System.out;
    Option optGcgs = new Option(null, GCGS_KEY, true, "The " + GCGS_KEY + " to use when calculating droppable tombstones");
    Options options = new Options();
    options.addOption(optGcgs);
    CommandLine cmd = null;
    CommandLineParser parser = new PosixParser();
    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        printHelp(options, out);
    }
    if (cmd.getArgs().length == 0) {
        printHelp(options, out);
    }
    int gcgs = Integer.parseInt(cmd.getOptionValue(GCGS_KEY, "0"));
    Util.initDatabaseDescriptor();
    for (String fname : cmd.getArgs()) {
        if (new File(fname).exists()) {
            Descriptor descriptor = Descriptor.fromFilename(fname);
            Map<MetadataType, MetadataComponent> metadata = descriptor.getMetadataSerializer().deserialize(descriptor, EnumSet.allOf(MetadataType.class));
            ValidationMetadata validation = (ValidationMetadata) metadata.get(MetadataType.VALIDATION);
            StatsMetadata stats = (StatsMetadata) metadata.get(MetadataType.STATS);
            CompactionMetadata compaction = (CompactionMetadata) metadata.get(MetadataType.COMPACTION);
            CompressionMetadata compression = null;
            File compressionFile = new File(descriptor.filenameFor(Component.COMPRESSION_INFO));
            if (compressionFile.exists())
                compression = CompressionMetadata.create(fname);
            SerializationHeader.Component header = (SerializationHeader.Component) metadata.get(MetadataType.HEADER);
            out.printf("SSTable: %s%n", descriptor);
            if (validation != null) {
                out.printf("Partitioner: %s%n", validation.partitioner);
                out.printf("Bloom Filter FP chance: %f%n", validation.bloomFilterFPChance);
            }
            if (stats != null) {
                out.printf("Minimum timestamp: %s%n", stats.minTimestamp);
                out.printf("Maximum timestamp: %s%n", stats.maxTimestamp);
                out.printf("SSTable min local deletion time: %s%n", stats.minLocalDeletionTime);
                out.printf("SSTable max local deletion time: %s%n", stats.maxLocalDeletionTime);
                out.printf("Compressor: %s%n", compression != null ? compression.compressor().getClass().getName() : "-");
                if (compression != null)
                    out.printf("Compression ratio: %s%n", stats.compressionRatio);
                out.printf("TTL min: %s%n", stats.minTTL);
                out.printf("TTL max: %s%n", stats.maxTTL);
                if (validation != null && header != null)
                    printMinMaxToken(descriptor, FBUtilities.newPartitioner(descriptor), header.getKeyType(), out);
                if (header != null && header.getClusteringTypes().size() == stats.minClusteringValues.size()) {
                    List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
                    List<ByteBuffer> minClusteringValues = stats.minClusteringValues;
                    List<ByteBuffer> maxClusteringValues = stats.maxClusteringValues;
                    String[] minValues = new String[clusteringTypes.size()];
                    String[] maxValues = new String[clusteringTypes.size()];
                    for (int i = 0; i < clusteringTypes.size(); i++) {
                        minValues[i] = clusteringTypes.get(i).getString(minClusteringValues.get(i));
                        maxValues[i] = clusteringTypes.get(i).getString(maxClusteringValues.get(i));
                    }
                    out.printf("minClustringValues: %s%n", Arrays.toString(minValues));
                    out.printf("maxClustringValues: %s%n", Arrays.toString(maxValues));
                }
                out.printf("Estimated droppable tombstones: %s%n", stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000) - gcgs));
                out.printf("SSTable Level: %d%n", stats.sstableLevel);
                out.printf("Repaired at: %d%n", stats.repairedAt);
                out.printf("Pending repair: %s%n", stats.pendingRepair);
                out.printf("Replay positions covered: %s%n", stats.commitLogIntervals);
                out.printf("totalColumnsSet: %s%n", stats.totalColumnsSet);
                out.printf("totalRows: %s%n", stats.totalRows);
                out.println("Estimated tombstone drop times:");
                for (Map.Entry<Number, long[]> entry : stats.estimatedTombstoneDropTime.getAsMap().entrySet()) {
                    out.printf("%-10s:%10s%n", entry.getKey().intValue(), entry.getValue()[0]);
                }
                printHistograms(stats, out);
            }
            if (compaction != null) {
                out.printf("Estimated cardinality: %s%n", compaction.cardinalityEstimator.cardinality());
            }
            if (header != null) {
                EncodingStats encodingStats = header.getEncodingStats();
                AbstractType<?> keyType = header.getKeyType();
                List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
                Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
                Map<String, String> statics = staticColumns.entrySet().stream().collect(Collectors.toMap(e -> UTF8Type.instance.getString(e.getKey()), e -> e.getValue().toString()));
                Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
                Map<String, String> regulars = regularColumns.entrySet().stream().collect(Collectors.toMap(e -> UTF8Type.instance.getString(e.getKey()), e -> e.getValue().toString()));
                out.printf("EncodingStats minTTL: %s%n", encodingStats.minTTL);
                out.printf("EncodingStats minLocalDeletionTime: %s%n", encodingStats.minLocalDeletionTime);
                out.printf("EncodingStats minTimestamp: %s%n", encodingStats.minTimestamp);
                out.printf("KeyType: %s%n", keyType.toString());
                out.printf("ClusteringTypes: %s%n", clusteringTypes.toString());
                out.printf("StaticColumns: {%s}%n", FBUtilities.toString(statics));
                out.printf("RegularColumns: {%s}%n", FBUtilities.toString(regulars));
            }
        } else {
            out.println("No such file: " + fname);
        }
    }
}
Also used : Arrays(java.util.Arrays) CompressionMetadata(org.apache.cassandra.io.compress.CompressionMetadata) Options(org.apache.commons.cli.Options) AbstractType(org.apache.cassandra.db.marshal.AbstractType) HelpFormatter(org.apache.commons.cli.HelpFormatter) ByteBuffer(java.nio.ByteBuffer) UTF8Type(org.apache.cassandra.db.marshal.UTF8Type) DecoratedKey(org.apache.cassandra.db.DecoratedKey) Pair(org.apache.cassandra.utils.Pair) org.apache.cassandra.io.sstable.metadata(org.apache.cassandra.io.sstable.metadata) Map(java.util.Map) Component(org.apache.cassandra.io.sstable.Component) Descriptor(org.apache.cassandra.io.sstable.Descriptor) CommandLine(org.apache.commons.cli.CommandLine) PosixParser(org.apache.commons.cli.PosixParser) SerializationHeader(org.apache.cassandra.db.SerializationHeader) EnumSet(java.util.EnumSet) Option(org.apache.commons.cli.Option) FBUtilities(org.apache.cassandra.utils.FBUtilities) CommandLineParser(org.apache.commons.cli.CommandLineParser) Collectors(java.util.stream.Collectors) List(java.util.List) java.io(java.io) IPartitioner(org.apache.cassandra.dht.IPartitioner) ParseException(org.apache.commons.cli.ParseException) EncodingStats(org.apache.cassandra.db.rows.EncodingStats) IndexSummary(org.apache.cassandra.io.sstable.IndexSummary) Options(org.apache.commons.cli.Options) PosixParser(org.apache.commons.cli.PosixParser) EncodingStats(org.apache.cassandra.db.rows.EncodingStats) CommandLineParser(org.apache.commons.cli.CommandLineParser) Component(org.apache.cassandra.io.sstable.Component) CompressionMetadata(org.apache.cassandra.io.compress.CompressionMetadata) ByteBuffer(java.nio.ByteBuffer) CommandLine(org.apache.commons.cli.CommandLine) SerializationHeader(org.apache.cassandra.db.SerializationHeader) AbstractType(org.apache.cassandra.db.marshal.AbstractType) Descriptor(org.apache.cassandra.io.sstable.Descriptor) Option(org.apache.commons.cli.Option) ParseException(org.apache.commons.cli.ParseException) Map(java.util.Map)

Example 7 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class DirectoriesTest method createFakeSSTable.

private static void createFakeSSTable(File dir, String cf, int gen, List<File> addTo) throws IOException {
    Descriptor desc = new Descriptor(dir, KS, cf, gen, SSTableFormat.Type.BIG);
    for (Component c : new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER }) {
        File f = new File(desc.filenameFor(c));
        f.createNewFile();
        addTo.add(f);
    }
}
Also used : Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Component(org.apache.cassandra.io.sstable.Component) File(java.io.File)

Example 8 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class DirectoriesTest method testSecondaryIndexDirectories.

@Test
public void testSecondaryIndexDirectories() {
    TableMetadata.Builder builder = TableMetadata.builder(KS, "cf").addPartitionKeyColumn("thekey", UTF8Type.instance).addClusteringColumn("col", UTF8Type.instance);
    ColumnIdentifier col = ColumnIdentifier.getInterned("col", true);
    IndexMetadata indexDef = IndexMetadata.fromIndexTargets(Collections.singletonList(new IndexTarget(col, IndexTarget.Type.VALUES)), "idx", IndexMetadata.Kind.KEYS, Collections.emptyMap());
    builder.indexes(Indexes.of(indexDef));
    TableMetadata PARENT_CFM = builder.build();
    TableMetadata INDEX_CFM = CassandraIndex.indexCfsMetadata(PARENT_CFM, indexDef);
    Directories parentDirectories = new Directories(PARENT_CFM);
    Directories indexDirectories = new Directories(INDEX_CFM);
    // secondary index has its own directory
    for (File dir : indexDirectories.getCFDirectories()) {
        assertEquals(cfDir(INDEX_CFM), dir);
    }
    Descriptor parentDesc = new Descriptor(parentDirectories.getDirectoryForNewSSTables(), KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
    Descriptor indexDesc = new Descriptor(indexDirectories.getDirectoryForNewSSTables(), KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
    // snapshot dir should be created under its parent's
    File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, "test");
    File indexSnapshotDirectory = Directories.getSnapshotDirectory(indexDesc, "test");
    assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.getParentFile());
    // check if snapshot directory exists
    parentSnapshotDirectory.mkdirs();
    assertTrue(parentDirectories.snapshotExists("test"));
    assertTrue(indexDirectories.snapshotExists("test"));
    // check their creation time
    assertEquals(parentDirectories.snapshotCreationTime("test"), indexDirectories.snapshotCreationTime("test"));
    // check true snapshot size
    Descriptor parentSnapshot = new Descriptor(parentSnapshotDirectory, KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
    createFile(parentSnapshot.filenameFor(Component.DATA), 30);
    Descriptor indexSnapshot = new Descriptor(indexSnapshotDirectory, KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
    createFile(indexSnapshot.filenameFor(Component.DATA), 40);
    assertEquals(30, parentDirectories.trueSnapshotsSize());
    assertEquals(40, indexDirectories.trueSnapshotsSize());
    // check snapshot details
    Map<String, Pair<Long, Long>> parentSnapshotDetail = parentDirectories.getSnapshotDetails();
    assertTrue(parentSnapshotDetail.containsKey("test"));
    assertEquals(30L, parentSnapshotDetail.get("test").right.longValue());
    Map<String, Pair<Long, Long>> indexSnapshotDetail = indexDirectories.getSnapshotDetails();
    assertTrue(indexSnapshotDetail.containsKey("test"));
    assertEquals(40L, indexSnapshotDetail.get("test").right.longValue());
    // check backup directory
    File parentBackupDirectory = Directories.getBackupsDirectory(parentDesc);
    File indexBackupDirectory = Directories.getBackupsDirectory(indexDesc);
    assertEquals(parentBackupDirectory, indexBackupDirectory.getParentFile());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) IndexTarget(org.apache.cassandra.cql3.statements.IndexTarget) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) IndexMetadata(org.apache.cassandra.schema.IndexMetadata) File(java.io.File) Pair(org.apache.cassandra.utils.Pair) Test(org.junit.Test)

Example 9 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class RealTransactionsTest method replaceSSTable.

private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction txn, boolean fail) {
    List<SSTableReader> newsstables = null;
    int nowInSec = FBUtilities.nowInSeconds();
    try (CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(FBUtilities.nowInSeconds()))) {
        try (SSTableRewriter rewriter = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
            AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals());
            CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())) {
            long lastCheckObsoletion = System.nanoTime();
            File directory = txn.originals().iterator().next().descriptor.directory;
            Descriptor desc = cfs.newSSTableDescriptor(directory);
            TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
            rewriter.switchWriter(SSTableWriter.create(metadata, desc, 0, 0, null, 0, SerializationHeader.make(cfs.metadata(), txn.originals()), cfs.indexManager.listIndexes(), txn));
            while (ci.hasNext()) {
                rewriter.append(ci.next());
                if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
                    controller.maybeRefreshOverlaps();
                    lastCheckObsoletion = System.nanoTime();
                }
            }
            if (!fail)
                newsstables = rewriter.finish();
            else
                rewriter.abort();
        }
    }
    assertTrue(fail || newsstables != null);
    if (newsstables != null) {
        Assert.assertEquals(1, newsstables.size());
        return newsstables.iterator().next();
    }
    return null;
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) Descriptor(org.apache.cassandra.io.sstable.Descriptor) SSTableRewriter(org.apache.cassandra.io.sstable.SSTableRewriter) File(java.io.File)

Example 10 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class SSTableFlushObserverTest method testFlushObserver.

@Test
public void testFlushObserver() {
    TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
    LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
    FlushObserver observer = new FlushObserver();
    String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
    File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
    directory.deleteOnExit();
    if (!directory.exists() && !directory.mkdirs())
        throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
    SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
    BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
    SSTableReader reader = null;
    Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
    try {
        final long now = System.currentTimeMillis();
        ByteBuffer key = UTF8Type.instance.fromString("key1");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
        writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key2");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key3");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        reader = writer.finish(true);
    } finally {
        FileUtils.closeQuietly(writer);
    }
    Assert.assertTrue(observer.isComplete);
    Assert.assertEquals(expected.size(), observer.rows.size());
    for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
        ByteBuffer key = e.left;
        Long indexPosition = e.right;
        try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
            ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
            Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
        } catch (IOException ex) {
            throw new FSReadError(ex, reader.getIndexFilename());
        }
        Assert.assertEquals(expected.get(key), observer.rows.get(e));
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) FSWriteError(org.apache.cassandra.io.FSWriteError) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) FileDataInput(org.apache.cassandra.io.util.FileDataInput) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FSReadError(org.apache.cassandra.io.FSReadError) BigTableWriter(org.apache.cassandra.io.sstable.format.big.BigTableWriter) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(java.io.File) Test(org.junit.Test)

Aggregations

Descriptor (org.apache.cassandra.io.sstable.Descriptor)38 File (java.io.File)24 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)15 Test (org.junit.Test)12 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)9 Component (org.apache.cassandra.io.sstable.Component)8 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)8 TableMetadata (org.apache.cassandra.schema.TableMetadata)6 Pair (org.apache.cassandra.utils.Pair)6 IOException (java.io.IOException)5 Map (java.util.Map)4 Directories (org.apache.cassandra.db.Directories)4 Keyspace (org.apache.cassandra.db.Keyspace)4 PrintStream (java.io.PrintStream)3 ByteBuffer (java.nio.ByteBuffer)3 Set (java.util.Set)3 DecoratedKey (org.apache.cassandra.db.DecoratedKey)3 SerializationHeader (org.apache.cassandra.db.SerializationHeader)3 NonBlockingHashMap (org.cliffc.high_scale_lib.NonBlockingHashMap)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2